From 76ccaa54e9a1aa224ffac27787498f7fab451bb6 Mon Sep 17 00:00:00 2001
From: unknown <365893829@qq.com>
Date: Mon, 16 Jan 2023 16:56:38 +0800
Subject: [PATCH 1/2] =?UTF-8?q?=E6=B7=BB=E5=8A=A0mmaction2=E6=B5=8B?=
=?UTF-8?q?=E8=AF=95=E7=94=A8=E4=BE=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 9 +-
.../.github/CODE_OF_CONDUCT.md | 76 +
.../mmaction2-0.24.1/.github/CONTRIBUTING.md | 1 +
.../.github/ISSUE_TEMPLATE/config.yml | 9 +
.../.github/ISSUE_TEMPLATE/error-report.md | 49 +
.../.github/ISSUE_TEMPLATE/feature_request.md | 27 +
.../ISSUE_TEMPLATE/general_questions.md | 14 +
.../reimplementation_questions.md | 69 +
.../.github/pull_request_template.md | 26 +
.../.github/workflows/build.yml | 248 +++
.../.github/workflows/deploy.yml | 26 +
.../.github/workflows/lint.yml | 27 +
.../.github/workflows/test_mim.yml | 47 +
openmmlab_test/mmaction2-0.24.1/.gitignore | 140 ++
.../mmaction2-0.24.1/.pre-commit-config.yaml | 52 +
openmmlab_test/mmaction2-0.24.1/.pylintrc | 624 ++++++
.../mmaction2-0.24.1/.readthedocs.yml | 7 +
openmmlab_test/mmaction2-0.24.1/CITATION.cff | 8 +
openmmlab_test/mmaction2-0.24.1/LICENSE | 203 ++
openmmlab_test/mmaction2-0.24.1/MANIFEST.in | 3 +
openmmlab_test/mmaction2-0.24.1/README.md | 320 +++
.../mmaction2-0.24.1/README_zh-CN.md | 331 +++
.../configs/_base_/default_runtime.py | 18 +
.../configs/_base_/models/audioonly_r50.py | 18 +
.../configs/_base_/models/bmn_400x100.py | 12 +
.../configs/_base_/models/bsn_pem.py | 13 +
.../configs/_base_/models/bsn_tem.py | 8 +
.../_base_/models/c3d_sports1m_pretrained.py | 23 +
.../configs/_base_/models/i3d_r50.py | 27 +
.../configs/_base_/models/ircsn_r152.py | 22 +
.../configs/_base_/models/r2plus1d_r34.py | 28 +
.../configs/_base_/models/slowfast_r50.py | 39 +
.../configs/_base_/models/slowonly_r50.py | 22 +
.../configs/_base_/models/tanet_r50.py | 20 +
.../configs/_base_/models/tin_r50.py | 21 +
.../configs/_base_/models/tpn_slowonly_r50.py | 40 +
.../configs/_base_/models/tpn_tsm_r50.py | 36 +
.../configs/_base_/models/trn_r50.py | 22 +
.../configs/_base_/models/tsm_mobilenet_v2.py | 22 +
.../configs/_base_/models/tsm_r50.py | 21 +
.../configs/_base_/models/tsn_r50.py | 19 +
.../configs/_base_/models/tsn_r50_audio.py | 13 +
.../configs/_base_/models/x3d.py | 14 +
.../configs/_base_/schedules/adam_20e.py | 7 +
.../configs/_base_/schedules/sgd_100e.py | 10 +
.../_base_/schedules/sgd_150e_warmup.py | 13 +
.../configs/_base_/schedules/sgd_50e.py | 10 +
.../configs/_base_/schedules/sgd_tsm_100e.py | 12 +
.../configs/_base_/schedules/sgd_tsm_50e.py | 12 +
.../schedules/sgd_tsm_mobilenet_v2_100e.py | 12 +
.../schedules/sgd_tsm_mobilenet_v2_50e.py | 12 +
.../detection/_base_/models/slowonly_r50.py | 43 +
.../_base_/models/slowonly_r50_nl.py | 50 +
.../configs/detection/acrn/README.md | 97 +
.../configs/detection/acrn/README_zh-CN.md | 81 +
.../configs/detection/acrn/metafile.yml | 49 +
...etrained_r50_8x8x1_cosine_10e_ava22_rgb.py | 170 ++
...pretrained_r50_8x8x1_cosine_10e_ava_rgb.py | 170 ++
.../configs/detection/ava/README.md | 146 ++
.../configs/detection/ava/README_zh-CN.md | 129 ++
.../configs/detection/ava/metafile.yml | 259 +++
...etics_pretrained_r50_4x16x1_20e_ava_rgb.py | 175 ++
...etics_pretrained_r50_4x16x1_20e_ava_rgb.py | 174 ++
...d_r50_4x16x1_20e_ava_rgb_custom_classes.py | 184 ++
...netics_pretrained_r50_8x8x1_20e_ava_rgb.py | 175 ++
...etrained_r50_8x8x1_cosine_10e_ava22_rgb.py | 168 ++
...etrained_r50_8x8x1_cosine_10e_ava22_rgb.py | 171 ++
...etrained_r50_8x8x1_cosine_10e_ava22_rgb.py | 169 ++
...etics_pretrained_r101_8x8x1_20e_ava_rgb.py | 158 ++
...etics_pretrained_r50_4x16x1_20e_ava_rgb.py | 158 ++
...d_r50_4x16x1_20e_ava_rgb_custom_classes.py | 169 ++
...etics_pretrained_r50_4x16x1_10e_ava_rgb.py | 120 ++
...netics_pretrained_r50_8x8x1_10e_ava_rgb.py | 119 +
...ource_pretrained_r101_8x8x1_20e_ava_rgb.py | 158 ++
...ource_pretrained_r50_4x16x1_20e_ava_rgb.py | 159 ++
.../configs/detection/lfb/README.md | 132 ++
.../configs/detection/lfb/README_zh-CN.md | 103 +
...trained_slowonly_r50_4x16x1_20e_ava_rgb.py | 137 ++
...trained_slowonly_r50_4x16x1_20e_ava_rgb.py | 137 ++
...trained_slowonly_r50_4x16x1_20e_ava_rgb.py | 147 ++
.../lfb/lfb_slowonly_r50_ava_infer.py | 65 +
.../configs/detection/lfb/metafile.yml | 70 +
.../configs/localization/bmn/README.md | 115 +
.../configs/localization/bmn/README_zh-CN.md | 98 +
.../bmn_400x100_2x8_9e_activitynet_feature.py | 88 +
.../configs/localization/bmn/metafile.yml | 73 +
.../configs/localization/bsn/README.md | 173 ++
.../configs/localization/bsn/README_zh-CN.md | 156 ++
...em_400x100_1x16_20e_activitynet_feature.py | 95 +
.../bsn_pgm_400x100_activitynet_feature.py | 32 +
...em_400x100_1x16_20e_activitynet_feature.py | 79 +
.../configs/localization/bsn/metafile.yml | 85 +
.../configs/localization/ssn/README.md | 79 +
.../configs/localization/ssn/README_zh-CN.md | 63 +
.../configs/localization/ssn/metafile.yml | 30 +
.../ssn/ssn_r50_450e_thumos14_rgb_test.py | 109 +
.../ssn/ssn_r50_450e_thumos14_rgb_train.py | 154 ++
.../configs/recognition/c3d/README.md | 87 +
.../configs/recognition/c3d/README_zh-CN.md | 69 +
.../c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py | 95 +
.../configs/recognition/c3d/metafile.yml | 30 +
.../configs/recognition/csn/README.md | 108 +
.../configs/recognition/csn/README_zh-CN.md | 92 +
...frozen_r152_32x2x1_180e_kinetics400_rgb.py | 95 +
...nfrozen_r152_32x2x1_58e_kinetics400_rgb.py | 15 +
...nfrozen_r152_32x2x1_58e_kinetics400_rgb.py | 88 +
...frozen_r152_32x2x1_180e_kinetics400_rgb.py | 95 +
...nfrozen_r50_32x2x1_180e_kinetics400_rgb.py | 97 +
...nfrozen_r152_32x2x1_58e_kinetics400_rgb.py | 102 +
...bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py | 103 +
...trained_r152_32x2x1_58e_kinetics400_rgb.py | 100 +
...nfrozen_r152_32x2x1_58e_kinetics400_rgb.py | 88 +
.../configs/recognition/csn/metafile.yml | 204 ++
.../configs/recognition/i3d/README.md | 108 +
.../configs/recognition/i3d/README_zh-CN.md | 91 +
...product_r50_32x2x1_100e_kinetics400_rgb.py | 96 +
...aussian_r50_32x2x1_100e_kinetics400_rgb.py | 13 +
...aussian_r50_32x2x1_100e_kinetics400_rgb.py | 13 +
.../i3d_r50_32x2x1_100e_kinetics400_rgb.py | 86 +
...d_r50_dense_32x2x1_100e_kinetics400_rgb.py | 80 +
...3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py | 88 +
...3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py | 84 +
...d_r50_video_32x2x1_100e_kinetics400_rgb.py | 83 +
..._video_heavy_8x8x1_100e_kinetics400_rgb.py | 83 +
...ideo_imgaug_32x2x1_100e_kinetics400_rgb.py | 111 +
...o_inference_32x2x1_100e_kinetics400_rgb.py | 30 +
.../configs/recognition/i3d/metafile.yml | 237 ++
.../configs/recognition/omnisource/README.md | 80 +
.../recognition/omnisource/README_zh-CN.md | 72 +
.../recognition/omnisource/metafile.yml | 388 ++++
.../recognition/omnisource/pipeline.png | Bin 0 -> 245041 bytes
...8x8x1_256e_minikinetics_googleimage_rgb.py | 130 ++
...50_8x8x1_256e_minikinetics_insvideo_rgb.py | 134 ++
...8x8x1_256e_minikinetics_kineticsraw_rgb.py | 133 ++
..._8x8x1_256e_minikinetics_omnisource_rgb.py | 181 ++
...lowonly_r50_8x8x1_256e_minikinetics_rgb.py | 108 +
...50_8x8x1_256e_minikinetics_webimage_rgb.py | 132 ++
...1x1x8_100e_minikinetics_googleimage_rgb.py | 126 ++
...50_1x1x8_100e_minikinetics_insvideo_rgb.py | 130 ++
...1x1x8_100e_minikinetics_kineticsraw_rgb.py | 129 ++
..._1x1x8_100e_minikinetics_omnisource_rgb.py | 177 ++
.../tsn_r50_1x1x8_100e_minikinetics_rgb.py | 100 +
...50_1x1x8_100e_minikinetics_webimage_rgb.py | 129 ++
.../configs/recognition/r2plus1d/README.md | 88 +
.../recognition/r2plus1d/README_zh-CN.md | 73 +
.../configs/recognition/r2plus1d/metafile.yml | 99 +
...2plus1d_r34_32x2x1_180e_kinetics400_rgb.py | 81 +
...r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py | 92 +
...1d_r34_video_8x8x1_180e_kinetics400_rgb.py | 87 +
...eo_inference_8x8x1_180e_kinetics400_rgb.py | 33 +
.../configs/recognition/slowfast/README.md | 101 +
.../recognition/slowfast/README_zh-CN.md | 86 +
.../configs/recognition/slowfast/metafile.yml | 260 +++
...ultigrid_r50_8x8x1_358e_kinetics400_rgb.py | 153 ++
...t_prebn_r50_4x16x1_256e_kinetics400_rgb.py | 96 +
...n_r50_8x8x1_256e_kinetics400_rgb_steplr.py | 15 +
...lowfast_r101_8x8x1_256e_kinetics400_rgb.py | 137 ++
...st_r101_r50_4x16x1_256e_kinetics400_rgb.py | 136 ++
...st_r152_r50_4x16x1_256e_kinetics400_rgb.py | 136 ++
.../slowfast_r50_16x8x1_22e_sthv1_rgb.py | 111 +
...lowfast_r50_4x16x1_256e_kinetics400_rgb.py | 94 +
...slowfast_r50_8x8x1_256e_kinetics400_rgb.py | 10 +
...t_r50_8x8x1_256e_kinetics400_rgb_steplr.py | 13 +
...t_r50_video_4x16x1_256e_kinetics400_rgb.py | 85 +
...o_inference_4x16x1_256e_kinetics400_rgb.py | 32 +
.../configs/recognition/slowonly/README.md | 160 ++
.../recognition/slowonly/README_zh-CN.md | 145 ++
...edcrop_256p_4x16x1_256e_kinetics400_rgb.py | 115 +
...edcrop_320p_4x16x1_256e_kinetics400_rgb.py | 114 +
...rop_340x256_4x16x1_256e_kinetics400_rgb.py | 114 +
.../configs/recognition/slowonly/metafile.yml | 550 +++++
...et_pretrained_r50_4x16x1_120e_gym99_rgb.py | 89 +
...trained_r50_4x16x1_150e_kinetics400_rgb.py | 96 +
...net_pretrained_r50_8x4x1_64e_hmdb51_rgb.py | 93 +
...enet_pretrained_r50_8x4x1_64e_sthv1_rgb.py | 100 +
...enet_pretrained_r50_8x4x1_64e_sthv2_rgb.py | 97 +
...net_pretrained_r50_8x4x1_64e_ucf101_rgb.py | 93 +
...etrained_r50_8x8x1_150e_kinetics400_rgb.py | 96 +
...net_pretrained_r50_8x8x1_64e_jester_rgb.py | 97 +
...0_pretrained_r50_4x16x1_120e_gym99_flow.py | 101 +
...400_pretrained_r50_8x4x1_40e_hmdb51_rgb.py | 81 +
...400_pretrained_r50_8x4x1_40e_ucf101_rgb.py | 97 +
...aussian_r50_4x16x1_150e_kinetics400_rgb.py | 93 +
...gaussian_r50_8x8x1_150e_kinetics400_rgb.py | 98 +
...lowonly_r101_8x8x1_196e_kinetics400_rgb.py | 21 +
...owonly_r50_4x16x1_256e_kinetics400_flow.py | 103 +
...lowonly_r50_4x16x1_256e_kinetics400_rgb.py | 93 +
...lowonly_r50_8x8x1_256e_kinetics400_flow.py | 103 +
...slowonly_r50_8x8x1_256e_kinetics400_rgb.py | 93 +
..._r50_clip_feature_extraction_4x16x1_rgb.py | 45 +
...y_r50_video_4x16x1_256e_kinetics400_rgb.py | 96 +
...ly_r50_video_8x8x1_256e_kinetics600_rgb.py | 93 +
...ly_r50_video_8x8x1_256e_kinetics700_rgb.py | 92 +
...o_inference_4x16x1_256e_kinetics400_rgb.py | 33 +
.../configs/recognition/tanet/README.md | 92 +
.../configs/recognition/tanet/README_zh-CN.md | 77 +
.../configs/recognition/tanet/metafile.yml | 80 +
.../tanet/tanet_r50_1x1x16_50e_sthv1_rgb.py | 102 +
.../tanet/tanet_r50_1x1x8_50e_sthv1_rgb.py | 100 +
...et_r50_dense_1x1x8_100e_kinetics400_rgb.py | 100 +
.../configs/recognition/timesformer/README.md | 88 +
.../recognition/timesformer/README_zh-CN.md | 72 +
.../recognition/timesformer/metafile.yml | 70 +
...former_divST_8x32x1_15e_kinetics400_rgb.py | 120 ++
...rmer_jointST_8x32x1_15e_kinetics400_rgb.py | 119 +
...er_spaceOnly_8x32x1_15e_kinetics400_rgb.py | 118 +
.../configs/recognition/tin/README.md | 102 +
.../configs/recognition/tin/README_zh-CN.md | 85 +
.../configs/recognition/tin/metafile.yml | 76 +
.../tin/tin_r50_1x1x8_40e_sthv1_rgb.py | 106 +
.../tin/tin_r50_1x1x8_40e_sthv2_rgb.py | 103 +
..._finetune_r50_1x1x8_50e_kinetics400_rgb.py | 93 +
.../configs/recognition/tpn/README.md | 92 +
.../configs/recognition/tpn/README_zh-CN.md | 74 +
.../configs/recognition/tpn/metafile.yml | 76 +
...ed_slowonly_r50_8x8x1_150e_kinetics_rgb.py | 89 +
...pn_slowonly_r50_8x8x1_150e_kinetics_rgb.py | 7 +
.../tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py | 89 +
.../configs/recognition/trn/README.md | 94 +
.../configs/recognition/trn/README_zh-CN.md | 78 +
.../configs/recognition/trn/metafile.yml | 55 +
.../trn/trn_r50_1x1x8_50e_sthv1_rgb.py | 102 +
.../trn/trn_r50_1x1x8_50e_sthv2_rgb.py | 99 +
.../configs/recognition/tsm/README.md | 193 ++
.../configs/recognition/tsm/README_zh-CN.md | 184 ++
.../configs/recognition/tsm/metafile.yml | 830 +++++++
...00_pretrained_r50_1x1x16_25e_hmdb51_rgb.py | 101 +
...00_pretrained_r50_1x1x16_25e_ucf101_rgb.py | 101 +
...400_pretrained_r50_1x1x8_25e_hmdb51_rgb.py | 101 +
...400_pretrained_r50_1x1x8_25e_ucf101_rgb.py | 101 +
...enetv2_dense_1x1x8_100e_kinetics400_rgb.py | 88 +
..._video_dense_1x1x8_100e_kinetics400_rgb.py | 96 +
...erence_dense_1x1x8_100e_kinetics400_rgb.py | 33 +
...t_product_r50_1x1x8_50e_kinetics400_rgb.py | 96 +
..._gaussian_r50_1x1x8_50e_kinetics400_rgb.py | 96 +
..._gaussian_r50_1x1x8_50e_kinetics400_rgb.py | 96 +
.../tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py | 7 +
.../tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py | 90 +
.../tsm_r50_1x1x16_100e_kinetics400_rgb.py | 7 +
.../tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py | 95 +
.../tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py | 99 +
.../tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py | 96 +
.../tsm/tsm_r50_1x1x8_100e_kinetics400_rgb.py | 6 +
.../tsm/tsm_r50_1x1x8_50e_jester_rgb.py | 91 +
.../tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py | 87 +
.../tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py | 95 +
.../tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py | 94 +
.../tsm/tsm_r50_cutmix_1x1x8_50e_sthv1_rgb.py | 115 +
...sm_r50_dense_1x1x8_100e_kinetics400_rgb.py | 87 +
...tsm_r50_dense_1x1x8_50e_kinetics400_rgb.py | 7 +
.../tsm/tsm_r50_flip_1x1x8_50e_sthv1_rgb.py | 99 +
...50_flip_randaugment_1x1x8_50e_sthv1_rgb.py | 100 +
...gpu_normalize_1x1x8_50e_kinetics400_rgb.py | 93 +
.../tsm/tsm_r50_mixup_1x1x8_50e_sthv1_rgb.py | 114 +
.../tsm_r50_ptv_augmix_1x1x8_50e_sthv1_rgb.py | 96 +
...r50_ptv_randaugment_1x1x8_50e_sthv1_rgb.py | 96 +
...tsm_r50_randaugment_1x1x8_50e_sthv1_rgb.py | 96 +
.../tsm_r50_video_1x1x16_50e_diving48_rgb.py | 102 +
.../tsm_r50_video_1x1x8_50e_diving48_rgb.py | 100 +
...tsm_r50_video_1x1x8_50e_kinetics400_rgb.py | 94 +
...eo_inference_1x1x8_100e_kinetics400_rgb.py | 31 +
...oral_pool_r50_1x1x8_50e_kinetics400_rgb.py | 8 +
.../configs/recognition/tsn/README.md | 248 +++
.../configs/recognition/tsn/README_zh-CN.md | 234 ++
...ense161_320p_1x1x3_100e_kinetics400_rgb.py | 99 +
...1_32x4d_320p_1x1x3_100e_kinetics400_rgb.py | 108 +
...r_video_320p_1x1x3_100e_kinetics400_rgb.py | 103 +
...alecrop_256p_1x1x3_100e_kinetics400_rgb.py | 89 +
...alecrop_320p_1x1x3_100e_kinetics400_rgb.py | 89 +
...crop_340x256_1x1x3_100e_kinetics400_rgb.py | 88 +
...zedcrop_256p_1x1x3_100e_kinetics400_rgb.py | 83 +
...zedcrop_320p_1x1x3_100e_kinetics400_rgb.py | 83 +
...crop_340x256_1x1x3_100e_kinetics400_rgb.py | 84 +
...256p_1x1x25_10crop_100e_kinetics400_rgb.py | 32 +
..._256p_1x1x25_3crop_100e_kinetics400_rgb.py | 32 +
...320p_1x1x25_10crop_100e_kinetics400_rgb.py | 32 +
..._320p_1x1x25_3crop_100e_kinetics400_rgb.py | 32 +
...x256_1x1x25_10crop_100e_kinetics400_rgb.py | 32 +
...0x256_1x1x25_3crop_100e_kinetics400_rgb.py | 32 +
.../hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py | 102 +
.../tsn_r18_1x1x8_100e_hvu_attribute_rgb.py | 102 +
.../hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py | 102 +
.../hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py | 102 +
.../hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py | 102 +
.../hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py | 102 +
.../configs/recognition/tsn/metafile.yml | 960 +++++++++
...tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py | 89 +
.../tsn/tsn_r101_1x1x5_50e_mmit_rgb.py | 116 +
.../tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py | 94 +
.../tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py | 92 +
.../tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py | 86 +
.../tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py | 91 +
.../tsn/tsn_r50_1x1x6_100e_mit_rgb.py | 95 +
.../tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py | 90 +
...sn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py | 91 +
.../tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py | 90 +
.../tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py | 101 +
.../tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py | 93 +
...tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py | 75 +
...sn_r50_320p_1x1x3_110e_kinetics400_flow.py | 96 +
...tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py | 85 +
...sn_r50_320p_1x1x8_110e_kinetics400_flow.py | 96 +
...0_320p_1x1x8_150e_activitynet_clip_flow.py | 107 +
..._320p_1x1x8_150e_activitynet_video_flow.py | 105 +
...r50_320p_1x1x8_50e_activitynet_clip_rgb.py | 98 +
...50_320p_1x1x8_50e_activitynet_video_rgb.py | 88 +
...n_r50_clip_feature_extraction_1x1x3_rgb.py | 42 +
...sn_r50_dense_1x1x5_100e_kinetics400_rgb.py | 96 +
...sn_r50_dense_1x1x8_100e_kinetics400_rgb.py | 91 +
...50_inference_1x1x3_100e_kinetics400_rgb.py | 29 +
.../tsn_r50_video_1x1x16_100e_diving48_rgb.py | 98 +
.../tsn_r50_video_1x1x8_100e_diving48_rgb.py | 98 +
...sn_r50_video_1x1x8_100e_kinetics400_rgb.py | 87 +
...sn_r50_video_1x1x8_100e_kinetics600_rgb.py | 91 +
...sn_r50_video_1x1x8_100e_kinetics700_rgb.py | 91 +
...0_video_320p_1x1x3_100e_kinetics400_rgb.py | 82 +
..._video_dense_1x1x8_100e_kinetics400_rgb.py | 88 +
...video_imgaug_1x1x8_100e_kinetics400_rgb.py | 126 ++
...eo_inference_1x1x3_100e_kinetics400_rgb.py | 30 +
..._video_mixup_1x1x8_100e_kinetics400_rgb.py | 107 +
.../configs/recognition/x3d/README.md | 68 +
.../configs/recognition/x3d/README_zh-CN.md | 52 +
.../configs/recognition/x3d/metafile.yml | 51 +
.../x3d_m_16x5x1_facebook_kinetics400_rgb.py | 33 +
.../x3d_s_13x6x1_facebook_kinetics400_rgb.py | 33 +
...0_64x1x1_100e_kinetics400_audio_feature.py | 80 +
.../recognition_audio/resnet/README.md | 97 +
.../recognition_audio/resnet/README_zh-CN.md | 81 +
.../recognition_audio/resnet/metafile.yml | 27 +
...8_64x1x1_100e_kinetics400_audio_feature.py | 89 +
.../tsn_r50_64x1x1_100e_kinetics400_audio.py | 84 +
.../2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py | 79 +
.../2sagcn_80e_ntu60_xsub_keypoint_3d.py | 76 +
.../configs/skeleton/2s-agcn/README.md | 90 +
.../configs/skeleton/2s-agcn/README_zh-CN.md | 76 +
.../configs/skeleton/2s-agcn/metafile.yml | 40 +
.../configs/skeleton/posec3d/README.md | 149 ++
.../configs/skeleton/posec3d/README_zh-CN.md | 133 ++
.../posec3d/custom_dataset_training.md | 41 +
.../configs/skeleton/posec3d/metafile.yml | 159 ++
...ned_r50_u48_120e_hmdb51_split1_keypoint.py | 131 ++
...ned_r50_u48_120e_ucf101_split1_keypoint.py | 131 ++
.../slowonly_r50_u48_240e_gym_keypoint.py | 128 ++
.../posec3d/slowonly_r50_u48_240e_gym_limb.py | 134 ++
...wonly_r50_u48_240e_ntu120_xsub_keypoint.py | 130 ++
.../slowonly_r50_u48_240e_ntu120_xsub_limb.py | 136 ++
...owonly_r50_u48_240e_ntu60_xsub_keypoint.py | 128 ++
.../slowonly_r50_u48_240e_ntu60_xsub_limb.py | 134 ++
.../configs/skeleton/stgcn/README.md | 84 +
.../configs/skeleton/stgcn/README_zh-CN.md | 70 +
.../configs/skeleton/stgcn/metafile.yml | 112 +
.../skeleton/stgcn/stgcn_80e_babel120.py | 78 +
.../skeleton/stgcn/stgcn_80e_babel120_wfl.py | 89 +
.../skeleton/stgcn/stgcn_80e_babel60.py | 78 +
.../skeleton/stgcn/stgcn_80e_babel60_wfl.py | 86 +
.../stgcn/stgcn_80e_ntu60_xsub_keypoint.py | 80 +
.../stgcn/stgcn_80e_ntu60_xsub_keypoint_3d.py | 77 +
.../mmaction2-0.24.1/demo/README.md | 674 ++++++
openmmlab_test/mmaction2-0.24.1/demo/demo.gif | Bin 0 -> 1642286 bytes
.../mmaction2-0.24.1/demo/demo.ipynb | 128 ++
openmmlab_test/mmaction2-0.24.1/demo/demo.mp4 | Bin 0 -> 635539 bytes
openmmlab_test/mmaction2-0.24.1/demo/demo.py | 207 ++
.../mmaction2-0.24.1/demo/demo_audio.py | 51 +
.../mmaction2-0.24.1/demo/demo_gradcam.gif | Bin 0 -> 424951 bytes
.../mmaction2-0.24.1/demo/demo_gradcam.py | 208 ++
.../mmaction2-0.24.1/demo/demo_out.mp4 | Bin 0 -> 423540 bytes
.../mmaction2-0.24.1/demo/demo_skeleton.py | 253 +++
.../demo/demo_spatiotemporal_det.py | 421 ++++
.../demo/demo_video_structuralize.py | 786 +++++++
.../demo/faster_rcnn_r50_fpn_2x_coco.py | 182 ++
.../mmaction2-0.24.1/demo/fuse/data_list.txt | 100 +
.../demo/hrnet_w32_coco_256x192.py | 174 ++
.../mmaction2-0.24.1/demo/long_video_demo.py | 265 +++
.../demo/mmaction2_tutorial.ipynb | 1461 +++++++++++++
.../demo/mmaction2_tutorial_zh-CN.ipynb | 1665 ++++++++++++++
.../mmaction2-0.24.1/demo/ntu_sample.avi | Bin 0 -> 1119546 bytes
.../demo/test_video_structuralize.mp4 | Bin 0 -> 579876 bytes
.../demo/visualize_heatmap_volume.ipynb | 403 ++++
.../mmaction2-0.24.1/demo/webcam_demo.py | 223 ++
.../demo/webcam_demo_spatiotemporal_det.py | 856 ++++++++
.../mmaction2-0.24.1/docker/Dockerfile | 25 +
.../mmaction2-0.24.1/docker/serve/Dockerfile | 51 +
.../docker/serve/config.properties | 5 +
.../docker/serve/entrypoint.sh | 12 +
openmmlab_test/mmaction2-0.24.1/docs/Makefile | 20 +
.../docs/_static/css/readthedocs.css | 6 +
.../docs/_static/images/mmaction2.png | Bin 0 -> 31100 bytes
openmmlab_test/mmaction2-0.24.1/docs/api.rst | 101 +
.../mmaction2-0.24.1/docs/benchmark.md | 160 ++
.../mmaction2-0.24.1/docs/changelog.md | 792 +++++++
openmmlab_test/mmaction2-0.24.1/docs/conf.py | 136 ++
.../mmaction2-0.24.1/docs/data_preparation.md | 154 ++
openmmlab_test/mmaction2-0.24.1/docs/faq.md | 132 ++
.../docs/feature_extraction.md | 70 +
.../mmaction2-0.24.1/docs/getting_started.md | 468 ++++
.../mmaction2-0.24.1/docs/index.rst | 75 +
.../mmaction2-0.24.1/docs/install.md | 255 +++
openmmlab_test/mmaction2-0.24.1/docs/make.bat | 35 +
.../mmaction2-0.24.1/docs/merge_docs.sh | 48 +
.../mmaction2-0.24.1/docs/projects.md | 23 +
openmmlab_test/mmaction2-0.24.1/docs/stat.py | 174 ++
.../docs/supported_datasets.md | 36 +
.../mmaction2-0.24.1/docs/switch_language.md | 3 +
.../docs/tutorials/1_config.md | 757 +++++++
.../docs/tutorials/2_finetune.md | 99 +
.../docs/tutorials/3_new_dataset.md | 252 +++
.../docs/tutorials/4_data_pipeline.md | 262 +++
.../docs/tutorials/5_new_modules.md | 291 +++
.../docs/tutorials/6_export_model.md | 74 +
.../docs/tutorials/7_customize_runtime.md | 350 +++
.../mmaction2-0.24.1/docs/useful_tools.md | 230 ++
.../mmaction2-0.24.1/docs_zh_CN/Makefile | 20 +
.../mmaction2-0.24.1/docs_zh_CN/README.md | 1 +
.../mmaction2-0.24.1/docs_zh_CN/api.rst | 101 +
.../mmaction2-0.24.1/docs_zh_CN/benchmark.md | 157 ++
.../mmaction2-0.24.1/docs_zh_CN/conf.py | 132 ++
.../docs_zh_CN/data_preparation.md | 155 ++
.../mmaction2-0.24.1/docs_zh_CN/demo.md | 630 ++++++
.../mmaction2-0.24.1/docs_zh_CN/faq.md | 112 +
.../docs_zh_CN/feature_extraction.md | 70 +
.../docs_zh_CN/getting_started.md | 457 ++++
.../mmaction2-0.24.1/docs_zh_CN/index.rst | 74 +
.../mmaction2-0.24.1/docs_zh_CN/install.md | 244 +++
.../mmaction2-0.24.1/docs_zh_CN/make.bat | 35 +
.../mmaction2-0.24.1/docs_zh_CN/merge_docs.sh | 41 +
.../mmaction2-0.24.1/docs_zh_CN/stat.py | 173 ++
.../docs_zh_CN/supported_datasets.md | 34 +
.../docs_zh_CN/switch_language.md | 3 +
.../docs_zh_CN/tutorials/1_config.md | 748 +++++++
.../docs_zh_CN/tutorials/2_finetune.md | 93 +
.../docs_zh_CN/tutorials/3_new_dataset.md | 245 +++
.../docs_zh_CN/tutorials/4_data_pipeline.md | 257 +++
.../docs_zh_CN/tutorials/5_new_modules.md | 279 +++
.../docs_zh_CN/tutorials/6_export_model.md | 75 +
.../tutorials/7_customize_runtime.md | 347 +++
.../docs_zh_CN/useful_tools.md | 161 ++
.../mmaction2-0.24.1/mmaction/__init__.py | 16 +
.../mmaction/apis/__init__.py | 9 +
.../mmaction/apis/inference.py | 192 ++
.../mmaction2-0.24.1/mmaction/apis/test.py | 206 ++
.../mmaction2-0.24.1/mmaction/apis/train.py | 304 +++
.../mmaction/core/__init__.py | 9 +
.../mmaction/core/bbox/__init__.py | 6 +
.../mmaction/core/bbox/assigners/__init__.py | 4 +
.../bbox/assigners/max_iou_assigner_ava.py | 142 ++
.../mmaction/core/bbox/bbox_target.py | 42 +
.../mmaction/core/bbox/transforms.py | 57 +
.../mmaction/core/dist_utils.py | 43 +
.../mmaction/core/evaluation/__init__.py | 18 +
.../mmaction/core/evaluation/accuracy.py | 568 +++++
.../core/evaluation/ava_evaluation/README.md | 2 +
.../evaluation/ava_evaluation/__init__.py | 1 +
.../core/evaluation/ava_evaluation/metrics.py | 142 ++
.../evaluation/ava_evaluation/np_box_list.py | 139 ++
.../evaluation/ava_evaluation/np_box_ops.py | 98 +
.../object_detection_evaluation.py | 574 +++++
.../ava_evaluation/per_image_evaluation.py | 358 ++++
.../ava_evaluation/standard_fields.py | 115 +
.../mmaction/core/evaluation/ava_utils.py | 240 +++
.../core/evaluation/eval_detection.py | 234 ++
.../mmaction/core/evaluation/eval_hooks.py | 391 ++++
.../mmaction/core/hooks/__init__.py | 4 +
.../mmaction/core/hooks/output.py | 68 +
.../mmaction/core/lr/__init__.py | 4 +
.../mmaction/core/lr/multigridlr.py | 41 +
.../mmaction/core/optimizer/__init__.py | 5 +
.../mmaction/core/optimizer/copy_of_sgd.py | 12 +
.../optimizer/tsm_optimizer_constructor.py | 110 +
.../mmaction/core/runner/__init__.py | 4 +
.../mmaction/core/runner/omnisource_runner.py | 162 ++
.../mmaction/core/scheduler/__init__.py | 4 +
.../mmaction/core/scheduler/lr_updater.py | 40 +
.../mmaction/datasets/__init__.py | 28 +
.../mmaction/datasets/activitynet_dataset.py | 270 +++
.../mmaction/datasets/audio_dataset.py | 70 +
.../datasets/audio_feature_dataset.py | 71 +
.../mmaction/datasets/audio_visual_dataset.py | 77 +
.../mmaction/datasets/ava_dataset.py | 393 ++++
.../mmaction/datasets/base.py | 289 +++
.../mmaction/datasets/blending_utils.py | 143 ++
.../mmaction/datasets/builder.py | 168 ++
.../mmaction/datasets/dataset_wrappers.py | 71 +
.../mmaction/datasets/hvu_dataset.py | 192 ++
.../mmaction/datasets/image_dataset.py | 46 +
.../mmaction/datasets/pipelines/__init__.py | 41 +
.../datasets/pipelines/augmentations.py | 1905 +++++++++++++++++
.../mmaction/datasets/pipelines/compose.py | 61 +
.../mmaction/datasets/pipelines/formatting.py | 490 +++++
.../mmaction/datasets/pipelines/loading.py | 1850 ++++++++++++++++
.../datasets/pipelines/pose_loading.py | 695 ++++++
.../mmaction/datasets/pose_dataset.py | 113 +
.../mmaction/datasets/rawframe_dataset.py | 212 ++
.../mmaction/datasets/rawvideo_dataset.py | 147 ++
.../mmaction/datasets/samplers/__init__.py | 5 +
.../datasets/samplers/distributed_sampler.py | 142 ++
.../mmaction/datasets/ssn_dataset.py | 882 ++++++++
.../mmaction/datasets/video_dataset.py | 61 +
.../mmaction/localization/__init__.py | 11 +
.../mmaction/localization/bsn_utils.py | 268 +++
.../mmaction/localization/proposal_utils.py | 95 +
.../mmaction/localization/ssn_utils.py | 169 ++
.../mmaction/models/__init__.py | 45 +
.../mmaction/models/backbones/__init__.py | 25 +
.../mmaction/models/backbones/agcn.py | 338 +++
.../mmaction/models/backbones/c3d.py | 143 ++
.../mmaction/models/backbones/mobilenet_v2.py | 301 +++
.../models/backbones/mobilenet_v2_tsm.py | 41 +
.../mmaction/models/backbones/resnet.py | 591 +++++
.../models/backbones/resnet2plus1d.py | 50 +
.../mmaction/models/backbones/resnet3d.py | 1034 +++++++++
.../mmaction/models/backbones/resnet3d_csn.py | 157 ++
.../models/backbones/resnet3d_slowfast.py | 531 +++++
.../models/backbones/resnet3d_slowonly.py | 53 +
.../mmaction/models/backbones/resnet_audio.py | 374 ++++
.../mmaction/models/backbones/resnet_tin.py | 377 ++++
.../mmaction/models/backbones/resnet_tsm.py | 295 +++
.../mmaction/models/backbones/stgcn.py | 281 +++
.../mmaction/models/backbones/tanet.py | 115 +
.../mmaction/models/backbones/timesformer.py | 285 +++
.../mmaction/models/backbones/x3d.py | 524 +++++
.../mmaction/models/builder.py | 92 +
.../mmaction/models/common/__init__.py | 14 +
.../mmaction/models/common/conv2plus1d.py | 105 +
.../mmaction/models/common/conv_audio.py | 105 +
.../mmaction/models/common/lfb.py | 189 ++
.../mmaction/models/common/sub_batchnorm3d.py | 75 +
.../mmaction/models/common/tam.py | 122 ++
.../mmaction/models/common/transformer.py | 216 ++
.../mmaction/models/heads/__init__.py | 25 +
.../mmaction/models/heads/audio_tsn_head.py | 74 +
.../mmaction/models/heads/base.py | 117 +
.../mmaction/models/heads/bbox_head.py | 306 +++
.../mmaction/models/heads/fbo_head.py | 401 ++++
.../mmaction/models/heads/i3d_head.py | 74 +
.../mmaction/models/heads/lfb_infer_head.py | 148 ++
.../mmaction/models/heads/misc_head.py | 134 ++
.../mmaction/models/heads/roi_head.py | 128 ++
.../mmaction/models/heads/slowfast_head.py | 80 +
.../mmaction/models/heads/ssn_head.py | 413 ++++
.../mmaction/models/heads/stgcn_head.py | 65 +
.../mmaction/models/heads/timesformer_head.py | 41 +
.../mmaction/models/heads/tpn_head.py | 91 +
.../mmaction/models/heads/trn_head.py | 211 ++
.../mmaction/models/heads/tsm_head.py | 112 +
.../mmaction/models/heads/tsn_head.py | 95 +
.../mmaction/models/heads/x3d_head.py | 90 +
.../mmaction/models/localizers/__init__.py | 7 +
.../mmaction/models/localizers/base.py | 262 +++
.../mmaction/models/localizers/bmn.py | 417 ++++
.../mmaction/models/localizers/bsn.py | 395 ++++
.../mmaction/models/localizers/ssn.py | 136 ++
.../models/localizers/utils/__init__.py | 4 +
.../localizers/utils/post_processing.py | 45 +
.../mmaction/models/losses/__init__.py | 16 +
.../mmaction/models/losses/base.py | 45 +
.../losses/binary_logistic_regression_loss.py | 62 +
.../mmaction/models/losses/bmn_loss.py | 181 ++
.../models/losses/cross_entropy_loss.py | 191 ++
.../mmaction/models/losses/hvu_loss.py | 142 ++
.../mmaction/models/losses/nll_loss.py | 27 +
.../mmaction/models/losses/ohem_hinge_loss.py | 65 +
.../mmaction/models/losses/ssn_loss.py | 180 ++
.../mmaction/models/necks/__init__.py | 4 +
.../mmaction/models/necks/tpn.py | 449 ++++
.../mmaction/models/recognizers/__init__.py | 7 +
.../models/recognizers/audio_recognizer.py | 102 +
.../mmaction/models/recognizers/base.py | 335 +++
.../models/recognizers/recognizer2d.py | 186 ++
.../models/recognizers/recognizer3d.py | 128 ++
.../models/roi_extractors/__init__.py | 4 +
.../roi_extractors/single_straight3d.py | 121 ++
.../mmaction/models/skeleton_gcn/__init__.py | 5 +
.../mmaction/models/skeleton_gcn/base.py | 176 ++
.../models/skeleton_gcn/skeletongcn.py | 30 +
.../models/skeleton_gcn/utils/__init__.py | 4 +
.../models/skeleton_gcn/utils/graph.py | 196 ++
.../mmaction/utils/__init__.py | 15 +
.../mmaction/utils/collect_env.py | 17 +
.../mmaction/utils/distribution_env.py | 94 +
.../mmaction/utils/gradcam_utils.py | 232 ++
.../mmaction2-0.24.1/mmaction/utils/logger.py | 25 +
.../mmaction2-0.24.1/mmaction/utils/misc.py | 27 +
.../mmaction/utils/module_hooks.py | 88 +
.../mmaction/utils/multigrid/__init__.py | 8 +
.../utils/multigrid/longshortcyclehook.py | 257 +++
.../mmaction/utils/multigrid/short_sampler.py | 61 +
.../utils/multigrid/subbn_aggregate.py | 22 +
.../mmaction/utils/precise_bn.py | 155 ++
.../mmaction/utils/setup_env.py | 47 +
.../mmaction2-0.24.1/mmaction/version.py | 18 +
.../mmaction2-0.24.1/model-index.yml | 24 +
.../mmaction2-0.24.1/requirements.txt | 3 +
.../mmaction2-0.24.1/requirements/build.txt | 8 +
.../mmaction2-0.24.1/requirements/docs.txt | 17 +
.../requirements/mminstall.txt | 1 +
.../requirements/optional.txt | 11 +
.../requirements/readthedocs.txt | 4 +
.../mmaction2-0.24.1/requirements/tests.txt | 9 +
.../mmaction2-0.24.1/resources/acc_curve.png | Bin 0 -> 39921 bytes
.../resources/data_pipeline.png | Bin 0 -> 117332 bytes
.../resources/mmaction2_logo.png | Bin 0 -> 31100 bytes
.../resources/mmaction2_overview.gif | Bin 0 -> 1701421 bytes
.../resources/qq_group_qrcode.png | Bin 0 -> 158874 bytes
.../resources/spatio-temporal-det.gif | Bin 0 -> 1302833 bytes
.../resources/zhihu_qrcode.jpg | Bin 0 -> 397245 bytes
openmmlab_test/mmaction2-0.24.1/setup.cfg | 24 +
openmmlab_test/mmaction2-0.24.1/setup.py | 196 ++
.../data/activitynet_features/v_test1.csv | 6 +
.../data/activitynet_features/v_test2.csv | 6 +
.../data/annotations/action_test_anno.json | 34 +
.../annotations/audio_feature_test_list.txt | 2 +
.../data/annotations/audio_test_list.txt | 2 +
.../data/annotations/hvu_frame_test_anno.json | 24 +
.../annotations/hvu_video_eval_test_anno.json | 18 +
.../data/annotations/hvu_video_test_anno.json | 22 +
.../annotations/proposal_normalized_list.txt | 18 +
.../data/annotations/proposal_test_list.txt | 18 +
.../data/annotations/rawframe_test_list.txt | 2 +
.../rawframe_test_list_multi_label.txt | 2 +
.../rawframe_test_list_with_offset.txt | 2 +
.../data/annotations/rawvideo_test_anno.json | 8 +
.../data/annotations/rawvideo_test_anno.txt | 1 +
.../tests/data/annotations/sample.pkl | Bin 0 -> 284438 bytes
.../data/annotations/video_test_list.txt | 2 +
.../video_test_list_multi_label.txt | 2 +
.../tests/data/ava_dataset/action_list.txt | 16 +
.../ava_excluded_timestamps_sample.csv | 2 +
.../data/ava_dataset/ava_proposals_sample.pkl | Bin 0 -> 476 bytes
.../tests/data/ava_dataset/ava_sample.csv | 8 +
.../tests/data/bsp_features/v_test1.npy | Bin 0 -> 170368 bytes
.../tests/data/eval_detection/action_list.txt | 12 +
.../tests/data/eval_detection/gt.csv | 12 +
.../tests/data/eval_detection/pred.csv | 30 +
.../tests/data/eval_detection/proposal.pkl | Bin 0 -> 2085 bytes
.../tests/data/eval_localization/gt.json | 46 +
.../tests/data/eval_localization/result.json | 120 ++
.../tests/data/imgs/img_00001.jpg | Bin 0 -> 19241 bytes
.../tests/data/imgs/img_00002.jpg | Bin 0 -> 20094 bytes
.../tests/data/imgs/img_00003.jpg | Bin 0 -> 20135 bytes
.../tests/data/imgs/img_00004.jpg | Bin 0 -> 20386 bytes
.../tests/data/imgs/img_00005.jpg | Bin 0 -> 20364 bytes
.../tests/data/imgs/img_00006.jpg | Bin 0 -> 20500 bytes
.../tests/data/imgs/img_00007.jpg | Bin 0 -> 20522 bytes
.../tests/data/imgs/img_00008.jpg | Bin 0 -> 20547 bytes
.../tests/data/imgs/img_00009.jpg | Bin 0 -> 20527 bytes
.../tests/data/imgs/img_00010.jpg | Bin 0 -> 20762 bytes
.../tests/data/imgs/x_00001.jpg | Bin 0 -> 3888 bytes
.../tests/data/imgs/x_00002.jpg | Bin 0 -> 5472 bytes
.../tests/data/imgs/x_00003.jpg | Bin 0 -> 5625 bytes
.../tests/data/imgs/x_00004.jpg | Bin 0 -> 5317 bytes
.../tests/data/imgs/x_00005.jpg | Bin 0 -> 4999 bytes
.../tests/data/imgs/y_00001.jpg | Bin 0 -> 3921 bytes
.../tests/data/imgs/y_00002.jpg | Bin 0 -> 5547 bytes
.../tests/data/imgs/y_00003.jpg | Bin 0 -> 5352 bytes
.../tests/data/imgs/y_00004.jpg | Bin 0 -> 5489 bytes
.../tests/data/imgs/y_00005.jpg | Bin 0 -> 4722 bytes
.../tests/data/lfb/lfb_unittest.pkl | Bin 0 -> 81082 bytes
.../tests/data/proposals/v_test1.csv | 10 +
.../tests/data/proposals/v_test2.csv | 7 +
.../tests/data/rawvideo_dataset/part_0.mp4 | Bin 0 -> 158581 bytes
.../tests/data/rawvideo_dataset/part_1.mp4 | Bin 0 -> 158581 bytes
.../tests/data/tem_results/v_test1.csv | 11 +
.../tests/data/tem_results/v_test2.csv | 11 +
.../mmaction2-0.24.1/tests/data/test.avi | Bin 0 -> 294566 bytes
.../mmaction2-0.24.1/tests/data/test.jpg | Bin 0 -> 18486 bytes
.../mmaction2-0.24.1/tests/data/test.mp4 | Bin 0 -> 1352828 bytes
.../mmaction2-0.24.1/tests/data/test.wav | Bin 0 -> 419710 bytes
.../tests/test_data/test_blending.py | 42 +
.../tests/test_data/test_compose.py | 72 +
.../tests/test_data/test_datasets/__init__.py | 4 +
.../tests/test_data/test_datasets/base.py | 150 ++
.../test_datasets/test_activitynet_dataset.py | 176 ++
.../test_datasets/test_audio_dataset.py | 78 +
.../test_audio_feature_dataset.py | 78 +
.../test_audio_visual_dataset.py | 29 +
.../test_datasets/test_ava_dataset.py | 221 ++
.../test_datasets/test_concat_dataset.py | 34 +
.../test_datasets/test_hvu_dataset.py | 82 +
.../test_datasets/test_pose_dataset.py | 62 +
.../test_datasets/test_rawframe_dataset.py | 165 ++
.../test_datasets/test_rawvideo_dataset.py | 30 +
.../test_datasets/test_repeat_dataset.py | 30 +
.../test_datasets/test_ssn_dataset.py | 176 ++
.../test_datasets/test_video_dataset.py | 100 +
.../tests/test_data/test_formating.py | 227 ++
.../test_augmentations/__init__.py | 4 +
.../test_pipelines/test_augmentations/base.py | 70 +
.../test_augmentations/test_audio.py | 54 +
.../test_augmentations/test_color.py | 35 +
.../test_augmentations/test_crop.py | 294 +++
.../test_augmentations/test_flip.py | 136 ++
.../test_augmentations/test_imgaug.py | 101 +
.../test_augmentations/test_lazy.py | 373 ++++
.../test_augmentations/test_misc.py | 19 +
.../test_augmentations/test_normalization.py | 71 +
.../test_augmentations/test_pytorchvideo.py | 71 +
.../test_augmentations/test_transform.py | 160 ++
.../test_pipelines/test_loadings/__init__.py | 4 +
.../test_pipelines/test_loadings/base.py | 93 +
.../test_loadings/test_decode.py | 498 +++++
.../test_pipelines/test_loadings/test_load.py | 152 ++
.../test_loadings/test_localization.py | 28 +
.../test_loadings/test_pose_loading.py | 391 ++++
.../test_loadings/test_sampling.py | 757 +++++++
.../tests/test_data/test_sampler.py | 96 +
.../tests/test_metrics/test_accuracy.py | 343 +++
.../tests/test_metrics/test_losses.py | 332 +++
.../tests/test_models/__init__.py | 13 +
.../tests/test_models/base.py | 167 ++
.../tests/test_models/test_backbones.py | 931 ++++++++
.../tests/test_models/test_common.py | 149 ++
.../test_common_modules/__init__.py | 1 +
.../test_common_modules/test_base_head.py | 73 +
.../test_base_recognizers.py | 66 +
.../test_common_modules/test_mobilenet_v2.py | 218 ++
.../test_common_modules/test_resnet.py | 128 ++
.../test_common_modules/test_resnet3d.py | 335 +++
.../test_models/test_detectors/__init__.py | 1 +
.../test_detectors/test_detectors.py | 42 +
.../tests/test_models/test_gradcam.py | 230 ++
.../tests/test_models/test_head.py | 608 ++++++
.../test_models/test_localizers/__init__.py | 1 +
.../test_models/test_localizers/test_bmn.py | 68 +
.../test_localizers/test_localizers.py | 34 +
.../test_models/test_localizers/test_pem.py | 49 +
.../test_models/test_localizers/test_ssn.py | 206 ++
.../test_models/test_localizers/test_tem.py | 28 +
.../tests/test_models/test_neck.py | 87 +
.../test_models/test_recognizers/__init__.py | 1 +
.../test_recognizers/test_audio_recognizer.py | 29 +
.../test_recognizers/test_recognizer2d.py | 282 +++
.../test_recognizers/test_recognizer3d.py | 314 +++
.../test_recognizers/test_skeletongcn.py | 51 +
.../tests/test_models/test_roi_extractor.py | 58 +
.../tests/test_runtime/test_apis_test.py | 119 +
.../tests/test_runtime/test_config.py | 74 +
.../tests/test_runtime/test_eval_hook.py | 347 +++
.../tests/test_runtime/test_inference.py | 149 ++
.../tests/test_runtime/test_lr.py | 121 ++
.../tests/test_runtime/test_optimizer.py | 214 ++
.../tests/test_runtime/test_precise_bn.py | 205 ++
.../tests/test_runtime/test_train.py | 125 ++
.../tests/test_utils/__init__.py | 1 +
.../tests/test_utils/test_bbox.py | 151 ++
.../test_utils/test_localization_utils.py | 204 ++
.../tests/test_utils/test_module_hooks.py | 144 ++
.../tests/test_utils/test_onnx.py | 33 +
.../tests/test_utils/test_setup_env.py | 68 +
.../mmaction2-0.24.1/tools/__init__.py | 5 +
.../tools/analysis/analyze_logs.py | 167 ++
.../tools/analysis/bench_processing.py | 65 +
.../tools/analysis/benchmark.py | 94 +
.../tools/analysis/check_videos.py | 158 ++
.../tools/analysis/eval_metric.py | 66 +
.../tools/analysis/get_flops.py | 73 +
.../tools/analysis/print_config.py | 27 +
.../tools/analysis/report_accuracy.py | 57 +
.../tools/analysis/report_map.py | 87 +
.../mmaction2-0.24.1/tools/argparse.bash | 103 +
.../tools/data/activitynet/README.md | 171 ++
.../tools/data/activitynet/README_zh-CN.md | 169 ++
.../tools/data/activitynet/action_name.csv | 201 ++
.../activitynet_feature_postprocessing.py | 99 +
.../activitynet/convert_proposal_format.py | 162 ++
.../tools/data/activitynet/download.py | 148 ++
.../data/activitynet/download_annotations.sh | 12 +
.../data/activitynet/download_bsn_videos.sh | 13 +
.../download_feature_annotations.sh | 16 +
.../data/activitynet/download_features.sh | 11 +
.../tools/data/activitynet/download_videos.sh | 13 +
.../tools/data/activitynet/environment.yml | 36 +
.../tools/data/activitynet/extract_frames.sh | 6 +
.../generate_rawframes_filelist.py | 113 +
.../tools/data/activitynet/label_map.txt | 200 ++
.../data/activitynet/process_annotations.py | 54 +
.../activitynet/tsn_feature_extraction.py | 149 ++
.../tools/data/anno_txt2json.py | 103 +
.../data/ava/AVA_annotation_explained.md | 34 +
.../mmaction2-0.24.1/tools/data/ava/README.md | 148 ++
.../tools/data/ava/README_zh-CN.md | 134 ++
.../tools/data/ava/cut_videos.sh | 34 +
.../tools/data/ava/download_annotations.sh | 15 +
.../tools/data/ava/download_videos.sh | 19 +
.../data/ava/download_videos_gnu_parallel.sh | 20 +
.../data/ava/download_videos_parallel.py | 66 +
.../data/ava/download_videos_parallel.sh | 15 +
.../tools/data/ava/extract_frames.sh | 6 +
.../tools/data/ava/extract_rgb_frames.sh | 7 +
.../data/ava/extract_rgb_frames_ffmpeg.sh | 44 +
.../tools/data/ava/fetch_ava_proposals.sh | 9 +
.../tools/data/ava/label_map.txt | 60 +
.../tools/data/build_audio_features.py | 316 +++
.../tools/data/build_file_list.py | 269 +++
.../tools/data/build_rawframes.py | 278 +++
.../tools/data/build_videos.py | 127 ++
.../tools/data/denormalize_proposal_file.py | 82 +
.../tools/data/diving48/README.md | 123 ++
.../tools/data/diving48/README_zh-CN.md | 123 ++
.../data/diving48/download_annotations.sh | 16 +
.../tools/data/diving48/download_videos.sh | 16 +
.../tools/data/diving48/extract_frames.sh | 6 +
.../tools/data/diving48/extract_rgb_frames.sh | 7 +
.../diving48/extract_rgb_frames_opencv.sh | 7 +
.../diving48/generate_rawframes_filelist.sh | 8 +
.../data/diving48/generate_videos_filelist.sh | 8 +
.../tools/data/diving48/label_map.txt | 48 +
.../tools/data/extract_audio.py | 61 +
.../mmaction2-0.24.1/tools/data/gym/README.md | 109 +
.../tools/data/gym/README_zh-CN.md | 109 +
.../tools/data/gym/download.py | 100 +
.../tools/data/gym/download_annotations.sh | 14 +
.../tools/data/gym/download_videos.sh | 14 +
.../tools/data/gym/environment.yml | 36 +
.../tools/data/gym/extract_frames.sh | 7 +
.../tools/data/gym/generate_file_list.py | 49 +
.../tools/data/gym/label_map.txt | 99 +
.../tools/data/gym/trim_event.py | 58 +
.../tools/data/gym/trim_subaction.py | 52 +
.../tools/data/hmdb51/README.md | 125 ++
.../tools/data/hmdb51/README_zh-CN.md | 121 ++
.../tools/data/hmdb51/download_annotations.sh | 22 +
.../tools/data/hmdb51/download_videos.sh | 27 +
.../tools/data/hmdb51/extract_frames.sh | 6 +
.../tools/data/hmdb51/extract_rgb_frames.sh | 7 +
.../data/hmdb51/extract_rgb_frames_opencv.sh | 7 +
.../hmdb51/generate_rawframes_filelist.sh | 8 +
.../data/hmdb51/generate_videos_filelist.sh | 8 +
.../tools/data/hmdb51/label_map.txt | 51 +
.../mmaction2-0.24.1/tools/data/hvu/README.md | 123 ++
.../tools/data/hvu/README_zh-CN.md | 110 +
.../tools/data/hvu/download.py | 203 ++
.../tools/data/hvu/download_annotations.sh | 22 +
.../tools/data/hvu/download_videos.sh | 15 +
.../tools/data/hvu/environment.yml | 36 +
.../tools/data/hvu/extract_frames.sh | 10 +
.../tools/data/hvu/generate_file_list.py | 152 ++
.../data/hvu/generate_rawframes_filelist.sh | 5 +
.../tools/data/hvu/generate_sub_file_list.py | 42 +
.../data/hvu/generate_videos_filelist.sh | 5 +
.../tools/data/hvu/label_map.json | 1 +
.../tools/data/hvu/parse_tag_list.py | 16 +
.../tools/data/jester/README.md | 143 ++
.../tools/data/jester/README_zh-CN.md | 143 ++
.../tools/data/jester/encode_videos.sh | 7 +
.../tools/data/jester/extract_flow.sh | 6 +
.../jester/generate_rawframes_filelist.sh | 8 +
.../data/jester/generate_videos_filelist.sh | 8 +
.../tools/data/jester/label_map.txt | 27 +
.../tools/data/jhmdb/README.md | 101 +
.../tools/data/jhmdb/README_zh-CN.md | 98 +
.../tools/data/kinetics/README.md | 150 ++
.../tools/data/kinetics/README_zh-CN.md | 142 ++
.../tools/data/kinetics/download.py | 230 ++
.../data/kinetics/download_annotations.sh | 26 +
.../kinetics/download_backup_annotations.sh | 25 +
.../tools/data/kinetics/download_videos.sh | 22 +
.../tools/data/kinetics/environment.yml | 36 +
.../tools/data/kinetics/extract_frames.sh | 18 +
.../tools/data/kinetics/extract_rgb_frames.sh | 18 +
.../kinetics/extract_rgb_frames_opencv.sh | 18 +
.../kinetics/generate_rawframes_filelist.sh | 17 +
.../data/kinetics/generate_videos_filelist.sh | 17 +
.../tools/data/kinetics/label_map_k400.txt | 400 ++++
.../tools/data/kinetics/label_map_k600.txt | 600 ++++++
.../tools/data/kinetics/label_map_k700.txt | 700 ++++++
.../tools/data/kinetics/rename_classnames.sh | 29 +
.../mmaction2-0.24.1/tools/data/mit/README.md | 128 ++
.../tools/data/mit/README_zh-CN.md | 130 ++
.../tools/data/mit/extract_frames.sh | 10 +
.../tools/data/mit/extract_rgb_frames.sh | 10 +
.../data/mit/extract_rgb_frames_opencv.sh | 10 +
.../data/mit/generate_rawframes_filelist.sh | 9 +
.../data/mit/generate_videos_filelist.sh | 9 +
.../tools/data/mit/label_map.txt | 339 +++
.../tools/data/mit/preprocess_data.sh | 27 +
.../tools/data/mmit/README.md | 113 +
.../tools/data/mmit/README_zh-CN.md | 115 +
.../tools/data/mmit/extract_frames.sh | 6 +
.../tools/data/mmit/extract_rgb_frames.sh | 8 +
.../data/mmit/extract_rgb_frames_opencv.sh | 8 +
.../data/mmit/generate_rawframes_filelist.sh | 9 +
.../data/mmit/generate_videos_filelist.sh | 9 +
.../tools/data/mmit/label_map.txt | 313 +++
.../tools/data/mmit/preprocess_data.sh | 20 +
.../tools/data/omnisource/README.md | 150 ++
.../tools/data/omnisource/README_zh-CN.md | 149 ++
.../tools/data/omnisource/trim_raw_video.py | 45 +
.../tools/data/parse_file_list.py | 535 +++++
.../tools/data/resize_videos.py | 126 ++
...RGBD120_samples_with_missing_skeletons.txt | 535 +++++
...TU_RGBD_samples_with_missing_skeletons.txt | 302 +++
.../tools/data/skeleton/README.md | 131 ++
.../tools/data/skeleton/README_zh-CN.md | 135 ++
.../skeleton/S001C001P001R001A001_rgb.avi | Bin 0 -> 987146 bytes
.../tools/data/skeleton/babel2mma2.py | 25 +
.../data/skeleton/download_annotations.sh | 22 +
.../tools/data/skeleton/gen_ntu_rgbd_raw.py | 355 +++
.../tools/data/skeleton/label_map_gym99.txt | 99 +
.../tools/data/skeleton/label_map_ntu120.txt | 120 ++
.../data/skeleton/ntu_pose_extraction.py | 347 +++
.../tools/data/sthv1/README.md | 144 ++
.../tools/data/sthv1/README_zh-CN.md | 142 ++
.../tools/data/sthv1/encode_videos.sh | 7 +
.../tools/data/sthv1/extract_flow.sh | 6 +
.../data/sthv1/generate_rawframes_filelist.sh | 8 +
.../data/sthv1/generate_videos_filelist.sh | 8 +
.../tools/data/sthv1/label_map.txt | 174 ++
.../tools/data/sthv2/README.md | 118 +
.../tools/data/sthv2/README_zh-CN.md | 118 +
.../tools/data/sthv2/extract_frames.sh | 6 +
.../tools/data/sthv2/extract_rgb_frames.sh | 7 +
.../data/sthv2/extract_rgb_frames_opencv.sh | 7 +
.../data/sthv2/generate_rawframes_filelist.sh | 8 +
.../data/sthv2/generate_videos_filelist.sh | 8 +
.../tools/data/sthv2/label_map.txt | 174 ++
.../tools/data/thumos14/README.md | 142 ++
.../tools/data/thumos14/README_zh-CN.md | 139 ++
.../thumos14/denormalize_proposal_file.sh | 10 +
.../data/thumos14/download_annotations.sh | 27 +
.../tools/data/thumos14/download_videos.sh | 25 +
.../tools/data/thumos14/extract_frames.sh | 10 +
.../tools/data/thumos14/extract_rgb_frames.sh | 10 +
.../thumos14/extract_rgb_frames_opencv.sh | 10 +
.../data/thumos14/fetch_tag_proposals.sh | 11 +
.../tools/data/ucf101/README.md | 127 ++
.../tools/data/ucf101/README_zh-CN.md | 125 ++
.../tools/data/ucf101/download_annotations.sh | 13 +
.../tools/data/ucf101/download_videos.sh | 16 +
.../tools/data/ucf101/extract_frames.sh | 6 +
.../tools/data/ucf101/extract_rgb_frames.sh | 7 +
.../data/ucf101/extract_rgb_frames_opencv.sh | 7 +
.../ucf101/generate_rawframes_filelist.sh | 8 +
.../data/ucf101/generate_videos_filelist.sh | 8 +
.../tools/data/ucf101/label_map.txt | 101 +
.../tools/data/ucf101_24/README.md | 89 +
.../tools/data/ucf101_24/README_zh-CN.md | 84 +
.../tools/deployment/mmaction2torchserve.py | 109 +
.../tools/deployment/mmaction_handler.py | 79 +
.../tools/deployment/publish_model.py | 47 +
.../tools/deployment/pytorch2onnx.py | 183 ++
.../mmaction2-0.24.1/tools/dist_test.sh | 14 +
.../mmaction2-0.24.1/tools/dist_train.sh | 13 +
.../tools/misc/bsn_proposal_generation.py | 198 ++
.../tools/misc/clip_feature_extraction.py | 229 ++
.../misc/dist_clip_feature_extraction.sh | 12 +
.../tools/misc/flow_extraction.py | 187 ++
.../mmaction2-0.24.1/tools/slurm_test.sh | 24 +
.../mmaction2-0.24.1/tools/slurm_train | 56 +
.../mmaction2-0.24.1/tools/slurm_train.sh | 24 +
openmmlab_test/mmaction2-0.24.1/tools/test.py | 371 ++++
.../mmaction2-0.24.1/tools/train.py | 222 ++
openmmlab_test/mmaction2-0.24.1/train.md | 65 +
952 files changed, 110142 insertions(+), 4 deletions(-)
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/CODE_OF_CONDUCT.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/CONTRIBUTING.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/config.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/error-report.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/feature_request.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/general_questions.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/reimplementation_questions.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/pull_request_template.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/workflows/build.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/workflows/deploy.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/workflows/lint.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/.github/workflows/test_mim.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/.gitignore
create mode 100644 openmmlab_test/mmaction2-0.24.1/.pre-commit-config.yaml
create mode 100644 openmmlab_test/mmaction2-0.24.1/.pylintrc
create mode 100644 openmmlab_test/mmaction2-0.24.1/.readthedocs.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/CITATION.cff
create mode 100644 openmmlab_test/mmaction2-0.24.1/LICENSE
create mode 100644 openmmlab_test/mmaction2-0.24.1/MANIFEST.in
create mode 100644 openmmlab_test/mmaction2-0.24.1/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/default_runtime.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/audioonly_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bmn_400x100.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_pem.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_tem.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/c3d_sports1m_pretrained.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/i3d_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/ircsn_r152.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/r2plus1d_r34.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowfast_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowonly_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tanet_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tin_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_slowonly_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_tsm_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/trn_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_mobilenet_v2.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/models/x3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/adam_20e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_100e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_150e_warmup.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_50e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_100e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_50e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_100e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_50e.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50_nl.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_imgaug_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/pipeline.png
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/r2plus1d/r2plus1d_r34_video_inference_8x8x1_180e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_multigrid_r50_8x8x1_358e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_prebn_r50_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_prebn_r50_8x8x1_256e_kinetics400_rgb_steplr.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r101_8x8x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r101_r50_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r152_r50_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r50_16x8x1_22e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb_steplr.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x4x1_64e_hmdb51_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x4x1_64e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x4x1_64e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x4x1_64e_ucf101_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_64e_jester_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_k400_pretrained_r50_4x16x1_120e_gym99_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_k400_pretrained_r50_8x4x1_40e_hmdb51_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_k400_pretrained_r50_8x4x1_40e_ucf101_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_nl_embedded_gaussian_r50_4x16x1_150e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_nl_embedded_gaussian_r50_8x8x1_150e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_clip_feature_extraction_4x16x1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/slowonly/slowonly_r50_video_inference_4x16x1_256e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tanet/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tanet/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tanet/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tanet/tanet_r50_1x1x16_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tanet/tanet_r50_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/timesformer/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/timesformer/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/timesformer/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/timesformer/timesformer_divST_8x32x1_15e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/timesformer/timesformer_jointST_8x32x1_15e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/timesformer/timesformer_spaceOnly_8x32x1_15e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tin/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tin/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tin/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tpn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tpn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tpn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/trn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/trn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/trn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/trn/trn_r50_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/trn/trn_r50_1x1x8_50e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_k400_pretrained_r50_1x1x16_25e_hmdb51_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_k400_pretrained_r50_1x1x16_25e_ucf101_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_k400_pretrained_r50_1x1x8_25e_hmdb51_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_k400_pretrained_r50_1x1x8_25e_ucf101_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_mobilenetv2_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_mobilenetv2_video_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_mobilenetv2_video_inference_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x16_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x8_50e_jester_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_cutmix_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_dense_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_flip_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_flip_randaugment_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_gpu_normalize_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_mixup_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_ptv_augmix_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_ptv_randaugment_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_randaugment_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_video_1x1x16_50e_diving48_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_diving48_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_r50_video_inference_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsm/tsm_temporal_pool_r50_1x1x8_50e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/custom_backbones/tsn_dense161_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/custom_backbones/tsn_rn101_32x4d_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/custom_backbones/tsn_swin_transformer_video_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_10crop_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_3crop_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_10crop_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_3crop_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_10crop_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_3crop_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_clip_feature_extraction_1x1x3_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_1x1x16_100e_diving48_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_diving48_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_imgaug_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/tsn/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/x3d/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/x3d/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/x3d/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition_audio/audioonly/audioonly_r50_64x1x1_100e_kinetics400_audio_feature.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition_audio/resnet/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition_audio/resnet/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition_audio/resnet/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/recognition_audio/resnet/tsn_r50_64x1x1_100e_kinetics400_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_keypoint_3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/2s-agcn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/2s-agcn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/2s-agcn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/custom_dataset_training.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_kinetics400_pretrained_r50_u48_120e_ucf101_split1_keypoint.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_r50_u48_240e_gym_keypoint.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_r50_u48_240e_gym_limb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_keypoint.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_limb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu60_xsub_keypoint.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu60_xsub_limb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/metafile.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/stgcn_80e_babel120.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/stgcn_80e_babel120_wfl.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/stgcn_80e_babel60.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/stgcn_80e_babel60_wfl.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint_3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo.gif
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo.ipynb
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo.mp4
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_gradcam.gif
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_gradcam.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_out.mp4
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_skeleton.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_spatiotemporal_det.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/demo_video_structuralize.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/faster_rcnn_r50_fpn_2x_coco.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/fuse/data_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/hrnet_w32_coco_256x192.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/long_video_demo.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/mmaction2_tutorial.ipynb
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/mmaction2_tutorial_zh-CN.ipynb
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/ntu_sample.avi
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/test_video_structuralize.mp4
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/visualize_heatmap_volume.ipynb
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/webcam_demo.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/demo/webcam_demo_spatiotemporal_det.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/docker/Dockerfile
create mode 100644 openmmlab_test/mmaction2-0.24.1/docker/serve/Dockerfile
create mode 100644 openmmlab_test/mmaction2-0.24.1/docker/serve/config.properties
create mode 100644 openmmlab_test/mmaction2-0.24.1/docker/serve/entrypoint.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/Makefile
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/_static/css/readthedocs.css
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/_static/images/mmaction2.png
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/api.rst
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/benchmark.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/changelog.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/conf.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/data_preparation.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/faq.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/feature_extraction.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/getting_started.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/index.rst
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/install.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/make.bat
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/merge_docs.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/projects.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/stat.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/supported_datasets.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/switch_language.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/1_config.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/2_finetune.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/3_new_dataset.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/4_data_pipeline.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/5_new_modules.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/6_export_model.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/tutorials/7_customize_runtime.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs/useful_tools.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/Makefile
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/api.rst
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/benchmark.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/conf.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/data_preparation.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/demo.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/faq.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/feature_extraction.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/getting_started.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/index.rst
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/install.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/make.bat
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/merge_docs.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/stat.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/supported_datasets.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/switch_language.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/1_config.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/2_finetune.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/3_new_dataset.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/4_data_pipeline.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/5_new_modules.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/6_export_model.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/tutorials/7_customize_runtime.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/docs_zh_CN/useful_tools.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/apis/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/apis/inference.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/apis/test.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/apis/train.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/bbox/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/bbox/assigners/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/bbox/assigners/max_iou_assigner_ava.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/bbox/bbox_target.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/bbox/transforms.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/dist_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/accuracy.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/metrics.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/np_box_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/np_box_ops.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/object_detection_evaluation.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/per_image_evaluation.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_evaluation/standard_fields.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/ava_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/eval_detection.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/evaluation/eval_hooks.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/hooks/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/hooks/output.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/lr/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/lr/multigridlr.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/optimizer/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/optimizer/copy_of_sgd.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/optimizer/tsm_optimizer_constructor.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/runner/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/runner/omnisource_runner.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/scheduler/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/core/scheduler/lr_updater.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/activitynet_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/audio_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/audio_feature_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/audio_visual_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/ava_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/blending_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/builder.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/dataset_wrappers.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/hvu_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/image_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pipelines/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pipelines/augmentations.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pipelines/compose.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pipelines/formatting.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pipelines/loading.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pipelines/pose_loading.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/pose_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/rawframe_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/rawvideo_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/samplers/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/samplers/distributed_sampler.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/ssn_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/datasets/video_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/localization/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/localization/bsn_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/localization/proposal_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/localization/ssn_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/agcn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/c3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/mobilenet_v2.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/mobilenet_v2_tsm.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet2plus1d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet3d_csn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet3d_slowfast.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet3d_slowonly.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet_tin.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/resnet_tsm.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/stgcn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/tanet.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/timesformer.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/backbones/x3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/builder.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/conv2plus1d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/conv_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/lfb.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/sub_batchnorm3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/tam.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/common/transformer.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/audio_tsn_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/bbox_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/fbo_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/i3d_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/lfb_infer_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/misc_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/roi_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/slowfast_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/ssn_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/stgcn_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/timesformer_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/tpn_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/trn_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/tsm_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/tsn_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/heads/x3d_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/bmn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/bsn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/ssn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/utils/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/localizers/utils/post_processing.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/binary_logistic_regression_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/bmn_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/cross_entropy_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/hvu_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/nll_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/ohem_hinge_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/losses/ssn_loss.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/necks/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/necks/tpn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/recognizers/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/recognizers/audio_recognizer.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/recognizers/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/recognizers/recognizer2d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/recognizers/recognizer3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/roi_extractors/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/roi_extractors/single_straight3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/skeleton_gcn/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/skeleton_gcn/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/skeleton_gcn/skeletongcn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/skeleton_gcn/utils/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/models/skeleton_gcn/utils/graph.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/collect_env.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/distribution_env.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/gradcam_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/logger.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/misc.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/module_hooks.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/multigrid/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/multigrid/longshortcyclehook.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/multigrid/short_sampler.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/multigrid/subbn_aggregate.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/precise_bn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/utils/setup_env.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/mmaction/version.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/model-index.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements/build.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements/docs.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements/mminstall.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements/optional.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements/readthedocs.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/requirements/tests.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/acc_curve.png
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/data_pipeline.png
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/mmaction2_logo.png
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/mmaction2_overview.gif
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/qq_group_qrcode.png
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/spatio-temporal-det.gif
create mode 100644 openmmlab_test/mmaction2-0.24.1/resources/zhihu_qrcode.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/setup.cfg
create mode 100644 openmmlab_test/mmaction2-0.24.1/setup.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/activitynet_features/v_test1.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/activitynet_features/v_test2.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/action_test_anno.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/audio_feature_test_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/audio_test_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/hvu_frame_test_anno.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/hvu_video_eval_test_anno.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/hvu_video_test_anno.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/proposal_normalized_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/proposal_test_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/rawframe_test_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/rawframe_test_list_multi_label.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/rawframe_test_list_with_offset.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/rawvideo_test_anno.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/rawvideo_test_anno.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/sample.pkl
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/video_test_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/annotations/video_test_list_multi_label.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/ava_dataset/action_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/ava_dataset/ava_excluded_timestamps_sample.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/ava_dataset/ava_proposals_sample.pkl
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/ava_dataset/ava_sample.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/bsp_features/v_test1.npy
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/eval_detection/action_list.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/eval_detection/gt.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/eval_detection/pred.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/eval_detection/proposal.pkl
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/eval_localization/gt.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/eval_localization/result.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00001.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00002.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00003.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00004.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00005.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00006.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00007.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00008.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00009.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/img_00010.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/x_00001.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/x_00002.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/x_00003.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/x_00004.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/x_00005.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/y_00001.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/y_00002.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/y_00003.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/y_00004.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/imgs/y_00005.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/lfb/lfb_unittest.pkl
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/proposals/v_test1.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/proposals/v_test2.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/rawvideo_dataset/part_0.mp4
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/rawvideo_dataset/part_1.mp4
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/tem_results/v_test1.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/tem_results/v_test2.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/test.avi
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/test.jpg
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/test.mp4
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/data/test.wav
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_blending.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_compose.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_activitynet_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_audio_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_audio_feature_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_audio_visual_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_ava_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_concat_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_hvu_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_pose_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_rawframe_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_rawvideo_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_repeat_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_ssn_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_datasets/test_video_dataset.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_formating.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_color.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_crop.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_flip.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_imgaug.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_lazy.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_misc.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_normalization.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_pytorchvideo.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_augmentations/test_transform.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/test_decode.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/test_load.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/test_localization.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/test_pose_loading.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_pipelines/test_loadings/test_sampling.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_data/test_sampler.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_metrics/test_accuracy.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_metrics/test_losses.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/base.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_backbones.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common_modules/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common_modules/test_base_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common_modules/test_base_recognizers.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common_modules/test_mobilenet_v2.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common_modules/test_resnet.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_common_modules/test_resnet3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_detectors/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_detectors/test_detectors.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_gradcam.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_head.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_localizers/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_localizers/test_bmn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_localizers/test_localizers.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_localizers/test_pem.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_localizers/test_ssn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_localizers/test_tem.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_neck.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_recognizers/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_recognizers/test_audio_recognizer.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_recognizers/test_recognizer2d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_recognizers/test_recognizer3d.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_recognizers/test_skeletongcn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_models/test_roi_extractor.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_apis_test.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_config.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_eval_hook.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_inference.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_lr.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_optimizer.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_precise_bn.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_runtime/test_train.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_utils/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_utils/test_bbox.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_utils/test_localization_utils.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_utils/test_module_hooks.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_utils/test_onnx.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tests/test_utils/test_setup_env.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/__init__.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/analyze_logs.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/bench_processing.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/benchmark.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/check_videos.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/eval_metric.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/get_flops.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/print_config.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/report_accuracy.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/analysis/report_map.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/argparse.bash
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/action_name.csv
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/activitynet_feature_postprocessing.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/convert_proposal_format.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/download.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/download_bsn_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/download_feature_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/download_features.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/environment.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/generate_rawframes_filelist.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/process_annotations.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/activitynet/tsn_feature_extraction.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/anno_txt2json.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/AVA_annotation_explained.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/cut_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/download_videos_gnu_parallel.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/download_videos_parallel.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/download_videos_parallel.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/extract_rgb_frames_ffmpeg.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/fetch_ava_proposals.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ava/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/build_audio_features.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/build_file_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/build_rawframes.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/build_videos.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/denormalize_proposal_file.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/diving48/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/extract_audio.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/download.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/environment.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/generate_file_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/trim_event.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/gym/trim_subaction.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hmdb51/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/download.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/environment.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/generate_file_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/generate_sub_file_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/label_map.json
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/hvu/parse_tag_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/encode_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/extract_flow.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jester/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jhmdb/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/jhmdb/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/download.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/download_backup_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/environment.yml
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/label_map_k400.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/label_map_k600.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/label_map_k700.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/kinetics/rename_classnames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mit/preprocess_data.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/mmit/preprocess_data.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/omnisource/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/omnisource/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/omnisource/trim_raw_video.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/parse_file_list.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/resize_videos.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/NTU_RGBD120_samples_with_missing_skeletons.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/NTU_RGBD_samples_with_missing_skeletons.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/S001C001P001R001A001_rgb.avi
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/babel2mma2.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/gen_ntu_rgbd_raw.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/label_map_gym99.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/label_map_ntu120.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/skeleton/ntu_pose_extraction.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/encode_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/extract_flow.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv1/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/sthv2/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/denormalize_proposal_file.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/thumos14/fetch_tag_proposals.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/download_annotations.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/download_videos.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/extract_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/extract_rgb_frames.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/extract_rgb_frames_opencv.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/generate_rawframes_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/generate_videos_filelist.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101/label_map.txt
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101_24/README.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/data/ucf101_24/README_zh-CN.md
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/deployment/mmaction2torchserve.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/deployment/mmaction_handler.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/deployment/publish_model.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/deployment/pytorch2onnx.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/dist_test.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/dist_train.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/misc/bsn_proposal_generation.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/misc/clip_feature_extraction.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/misc/dist_clip_feature_extraction.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/misc/flow_extraction.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/slurm_test.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/slurm_train
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/slurm_train.sh
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/test.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/tools/train.py
create mode 100644 openmmlab_test/mmaction2-0.24.1/train.md
diff --git a/README.md b/README.md
index 7a7ca50f..f93ae9e2 100644
--- a/README.md
+++ b/README.md
@@ -95,8 +95,9 @@ DCU深度学习样例
| 类别 | 版本 | DCU | 精度 | 多DCU | 支持网络 | 代码位置|
| :----------: | :----------: | :----------: | :----------: | :----------: | :----------: | :----------: |
-| mmclassification | v0.24.0 | Yes | FP32/FP16 | Yes | ResNet18/ResNet34/ResNet50/ResNet152/Vgg11/SeresNet50/ResNext50/MobileNet-v2/ShuffleNet-v1/ShuffleNet-v2 | [mmclassfication](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmclassification-speed-benchmark) |
-| mmdetection | v2.25.2 | Yes | FP32/FP16 | Yes | Faster-Rcnn/Mask-Rcnn/Double-Heads/Cascade-Mask-Rcnn/ResNest/Dcn/RetinaNet/VfNet/Ssd/Yolov3 | [mmdetection](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmdetection-speed_xinpian) |
-| mmpose | v0.28.1 | Yes | FP32/FP16 | Yes | ResNet50-Top-Down/ResNet50-Bottom-Up/HrNet-Top-Down | [mmpose](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmpose-speed_test) |
-| mmsegmentation | v0.29.1 | Yes | FP32/FP16 | Yes | PspNet-R50/DeepLab-V3-R50/Fcn-R50/UperNet-R50/DeepLab-V3plus-R50 | [mmsegmentation](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmsegmentation) |
+| mmclassification | v0.24.0 | Yes | FP32/FP16 | Yes | ResNet18/ResNet34/ResNet50/ResNet152/Vgg11/SeresNet50/ResNext50/MobileNet-v2/ShuffleNet-v1/ShuffleNet-v2 | [mmclassfication](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmclassification-0.24.1) |
+| mmdetection | v2.25.2 | Yes | FP32/FP16 | Yes | Faster-Rcnn/Mask-Rcnn/Double-Heads/Cascade-Mask-Rcnn/ResNest/Dcn/RetinaNet/VfNet/Ssd/Yolov3 | [mmdetection](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmdetection-2.25.2) |
+| mmpose | v0.28.1 | Yes | FP32/FP16 | Yes | ResNet50-Top-Down/ResNet50-Bottom-Up/HrNet-Top-Down | [mmpose](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmpose-0.28.1) |
+| mmsegmentation | v0.29.1 | Yes | FP32/FP16 | Yes | PspNet-R50/DeepLab-V3-R50/Fcn-R50/UperNet-R50/DeepLab-V3plus-R50 | [mmsegmentation](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmsegmentation-0.29.1) |
+| mmaction2 | v0.24.1 | Yes | FP32/FP16 | Yes | ST-GCN/C3D/R(2+1)D | [mmaction2](http://10.0.50.24/dcutoolkit/deeplearing/dlexamples_new/-/tree/main/openmmlab_test/mmaction2-0.24.1) |
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/CODE_OF_CONDUCT.md b/openmmlab_test/mmaction2-0.24.1/.github/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..92afad1c
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+- The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+- Trolling, insulting/derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at chenkaidev@gmail.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
+
+[homepage]: https://www.contributor-covenant.org
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/CONTRIBUTING.md b/openmmlab_test/mmaction2-0.24.1/.github/CONTRIBUTING.md
new file mode 100644
index 00000000..fb894baf
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/CONTRIBUTING.md
@@ -0,0 +1 @@
+We appreciate all contributions to improve MMAction2. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/config.yml b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000..a7722204
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,9 @@
+blank_issues_enabled: false
+
+contact_links:
+ - name: Common Issues
+ url: https://mmaction2.readthedocs.io/en/latest/faq.html
+ about: Check if your issue already has solutions
+ - name: MMAction2 Documentation
+ url: https://mmaction2.readthedocs.io/en/latest/
+ about: Check if your question is answered in docs
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/error-report.md b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/error-report.md
new file mode 100644
index 00000000..cab4b1b5
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/error-report.md
@@ -0,0 +1,49 @@
+---
+name: Error report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+---
+
+Thanks for your error report and we appreciate it a lot.
+If you feel we have help you, give us a STAR! :satisfied:
+
+**Checklist**
+
+1. I have searched related issues but cannot get the expected help.
+2. The bug has not been fixed in the latest version.
+
+**Describe the bug**
+
+A clear and concise description of what the bug is.
+
+**Reproduction**
+
+1. What command or script did you run?
+
+```
+A placeholder for the command.
+```
+
+2. Did you make any modifications on the code or config? Did you understand what you have modified?
+3. What dataset did you use?
+
+**Environment**
+
+1. Please run `PYTHONPATH=${PWD}:$PYTHONPATH python mmaction/utils/collect_env.py` to collect necessary environment information and paste it here.
+2. You may add addition that may be helpful for locating the problem, such as
+ - How you installed PyTorch \[e.g., pip, conda, source\]
+ - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
+
+**Error traceback**
+
+If applicable, paste the error traceback here.
+
+```
+A placeholder for traceback.
+```
+
+**Bug fix**
+
+If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/feature_request.md b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..9b5bc408
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,27 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+---
+
+Thanks for your feature request and we will review and plan for it when necessary.
+If you feel we have help you, give us a STAR! :satisfied:
+
+**Describe the feature**
+
+**Motivation**
+
+A clear and concise description of the motivation of the feature.
+Ex1. It is inconvenient when \[....\].
+Ex2. There is a recent paper \[....\], which is very helpful for \[....\].
+
+**Related resources**
+
+If there is an official code released or third-party implementations, please also provide the information here, which would be very helpful.
+
+**Additional context**
+
+Add any other context or screenshots about the feature request here.
+If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated.
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/general_questions.md b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/general_questions.md
new file mode 100644
index 00000000..5aa583cb
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/general_questions.md
@@ -0,0 +1,14 @@
+---
+name: General questions
+about: Ask general questions to get help
+title: ''
+labels: ''
+assignees: ''
+---
+
+Before raising a question, you may need to check the following listed items.
+
+**Checklist**
+
+1. I have searched related issues but cannot get the expected help.
+2. I have read the [FAQ documentation](https://mmaction2.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/reimplementation_questions.md b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/reimplementation_questions.md
new file mode 100644
index 00000000..babbaeb8
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/ISSUE_TEMPLATE/reimplementation_questions.md
@@ -0,0 +1,69 @@
+---
+name: Reimplementation Questions
+about: Ask about questions during model reimplementation
+title: ''
+labels: reimplementation
+assignees: ''
+---
+
+If you feel we have help you, give us a STAR! :satisfied:
+
+**Notice**
+
+There are several common situations in the reimplementation issues as below
+
+1. Reimplement a model in the model zoo using the provided configs
+2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets)
+3. Reimplement a custom model but all the components are implemented in MMAction2
+4. Reimplement a custom model with new modules implemented by yourself
+
+There are several things to do for different cases as below.
+
+- For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue.
+- For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write.
+- One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you.
+
+**Checklist**
+
+1. I have searched related issues but cannot get the expected help.
+2. The issue has not been fixed in the latest version.
+
+**Describe the issue**
+
+A clear and concise description of what the problem you meet and what have you done.
+
+**Reproduction**
+
+1. What command or script did you run?
+
+```
+A placeholder for the command.
+```
+
+2. What config dir you run?
+
+```
+A placeholder for the config.
+```
+
+3. Did you make any modifications on the code or config? Did you understand what you have modified?
+4. What dataset did you use?
+
+**Environment**
+
+1. Please run `PYTHONPATH=${PWD}:$PYTHONPATH python mmaction/utils/collect_env.py` to collect necessary environment information and paste it here.
+2. You may add addition that may be helpful for locating the problem, such as
+ 1. How you installed PyTorch \[e.g., pip, conda, source\]
+ 2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
+
+**Results**
+
+If applicable, paste the related results here, e.g., what you expect and what you get.
+
+```
+A placeholder for results comparison
+```
+
+**Issue fix**
+
+If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/pull_request_template.md b/openmmlab_test/mmaction2-0.24.1/.github/pull_request_template.md
new file mode 100644
index 00000000..63052769
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/pull_request_template.md
@@ -0,0 +1,26 @@
+Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily got feedback.
+If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.
+
+## Motivation
+
+Please describe the motivation of this PR and the goal you want to achieve through this PR.
+
+## Modification
+
+Please briefly describe what modification is made in this PR.
+
+## BC-breaking (Optional)
+
+Does the modification introduces changes that break the back-compatibility of this repo?
+If so, please describe how it breaks the compatibility and how users should modify their codes to keep compatibility with this PR.
+
+## Use cases (Optional)
+
+If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.
+
+## Checklist
+
+1. Pre-commit or other linting tools should be used to fix the potential lint issues.
+2. The modification should be covered by complete unit tests. If not, please add more unit tests to ensure the correctness.
+3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMCls.
+4. The documentation should be modified accordingly, like docstring or example tutorials.
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/workflows/build.yml b/openmmlab_test/mmaction2-0.24.1/.github/workflows/build.yml
new file mode 100644
index 00000000..30d72c90
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/workflows/build.yml
@@ -0,0 +1,248 @@
+name: build
+
+on:
+ push:
+ paths-ignore:
+ - ".github/**.md"
+ - "demo/**"
+ - "docker/**"
+ - "tools/**"
+ - "README.md"
+ - "README_zh-CN.md"
+
+ pull_request:
+ paths-ignore:
+ - ".github/**.md"
+ - "demo/**"
+ - "docker/**"
+ - "docs/**"
+ - "docs_zh-CN/**"
+ - "tools/**"
+ - "README.md"
+ - "README_zh-CN.md"
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build_cpu:
+ runs-on: ubuntu-18.04
+ strategy:
+ matrix:
+ python-version: [3.7]
+ torch: [1.5.0, 1.7.0, 1.9.0]
+ include:
+ - torch: 1.5.0
+ torchvision: 0.6.0
+ - torch: 1.7.0
+ torchvision: 0.8.1
+ - torch: 1.9.0
+ torchvision: 0.10.0
+ python-version: 3.7
+ - torch: 1.9.0
+ torchvision: 0.10.0
+ python-version: 3.8
+ - torch: 1.9.0
+ torchvision: 0.10.0
+ python-version: 3.9
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Upgrade pip
+ run: pip install pip --upgrade
+ - name: Install soundfile lib
+ run: sudo apt-get install -y libsndfile1
+ - name: Install onnx
+ run: pip install onnx
+ - name: Install librosa and soundfile
+ run: pip install librosa soundfile
+ - name: Install lmdb
+ run: pip install lmdb
+ - name: Install TurboJpeg lib
+ run: sudo apt-get install -y libturbojpeg
+ - name: Install PyTorch
+ run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
+ - name: Install MMCV
+ run: pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${{matrix.torch}}/index.html
+ - name: Install MMDet
+ run: pip install git+https://github.com/open-mmlab/mmdetection/
+ - name: Install MMCls
+ run: pip install git+https://github.com/open-mmlab/mmclassification/
+ - name: Install unittest dependencies
+ run: pip install -r requirements/tests.txt -r requirements/optional.txt
+ - name: Install PytorchVideo
+ run: pip install pytorchvideo
+ if: ${{matrix.torchvision == '0.10.0'}}
+ - name: Build and install
+ run: rm -rf .eggs && pip install -e .
+ - name: Run unittests and generate coverage report
+ run: |
+ coverage run --branch --source mmaction -m pytest tests/
+ coverage xml
+ coverage report -m
+ build_cu101:
+ runs-on: ubuntu-18.04
+ container:
+ image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
+
+ strategy:
+ matrix:
+ python-version: [3.7]
+ torch: [1.5.0+cu101, 1.6.0+cu101, 1.7.0+cu101]
+ include:
+ - torch: 1.5.0+cu101
+ torch_version: torch1.5
+ torchvision: 0.6.0+cu101
+ - torch: 1.6.0+cu101
+ torch_version: torch1.6
+ torchvision: 0.7.0+cu101
+ - torch: 1.7.0+cu101
+ torch_version: torch1.7
+ torchvision: 0.8.1+cu101
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Upgrade pip
+ run: pip install pip --upgrade
+ - name: Fetch GPG keys
+ run: |
+ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
+ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
+ - name: Install CUDA
+ run: |
+ apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libturbojpeg libsndfile1 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
+ apt-get clean
+ rm -rf /var/lib/apt/lists/*
+ - name: Install librosa and soundfile
+ run: python -m pip install librosa soundfile
+ - name: Install lmdb
+ run: python -m pip install lmdb
+ - name: Install PyTorch
+ run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
+ - name: Install mmaction dependencies
+ run: |
+ python -V
+ python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html
+ python -m pip install -q git+https://github.com/open-mmlab/mmdetection/
+ python -m pip install -q git+https://github.com/open-mmlab/mmclassification/
+ python -m pip install -r requirements.txt
+ python -c 'import mmcv; print(mmcv.__version__)'
+ - name: Build and install
+ run: rm -rf .eggs && pip install -e .
+ - name: Run unittests and generate coverage report
+ run: |
+ coverage run --branch --source mmaction -m pytest tests/
+ coverage xml
+ coverage report -m
+ # Only upload coverage report for python3.7 && pytorch1.5
+ - name: Upload coverage to Codecov
+ if: ${{matrix.torch == '1.5.0+cu101' && matrix.python-version == '3.7'}}
+ uses: codecov/codecov-action@v1.0.14
+ with:
+ file: ./coverage.xml
+ flags: unittests
+ env_vars: OS,PYTHON
+ name: codecov-umbrella
+ fail_ci_if_error: false
+
+ build_cu102:
+ runs-on: ubuntu-18.04
+ container:
+ image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
+
+ strategy:
+ matrix:
+ python-version: [3.7]
+ torch: [1.9.0+cu102]
+ include:
+ - torch: 1.9.0+cu102
+ torch_version: torch1.9
+ torchvision: 0.10.0+cu102
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Upgrade pip
+ run: pip install pip --upgrade
+ - name: Fetch GPG keys
+ run: |
+ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
+ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
+ - name: Install CUDA
+ run: |
+ apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libturbojpeg libsndfile1 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
+ apt-get clean
+ rm -rf /var/lib/apt/lists/*
+ - name: Install librosa and soundfile
+ run: python -m pip install librosa soundfile
+ - name: Install lmdb
+ run: python -m pip install lmdb
+ - name: Install PyTorch
+ run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
+ - name: Install mmaction dependencies
+ run: |
+ python -V
+ python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/${{matrix.torch_version}}/index.html
+ python -m pip install -q git+https://github.com/open-mmlab/mmdetection/
+ python -m pip install -q git+https://github.com/open-mmlab/mmclassification/
+ python -m pip install -r requirements.txt
+ python -c 'import mmcv; print(mmcv.__version__)'
+ - name: Install PytorchVideo
+ run: python -m pip install pytorchvideo
+ if: ${{matrix.torchvision == '0.10.0+cu102'}}
+ - name: Build and install
+ run: rm -rf .eggs && pip install -e .
+ - name: Run unittests and generate coverage report
+ run: |
+ coverage run --branch --source mmaction -m pytest tests/
+ coverage xml
+ coverage report -m
+
+ test_windows:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [windows-2022]
+ python: [3.8]
+ platform: [cpu]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Upgrade pip
+ run: python -m pip install pip --upgrade --user
+ - name: Install librosa and soundfile
+ run: python -m pip install librosa soundfile
+ - name: Install lmdb
+ run: python -m pip install lmdb
+ - name: Install PyTorch
+ # As a complement to Linux CI, we test on PyTorch LTS version
+ run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
+ - name: Install MMCV
+ run: pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
+ - name: Install mmaction dependencies
+ run: |
+ python -V
+ python -m pip install -q git+https://github.com/open-mmlab/mmdetection/
+ python -m pip install -q git+https://github.com/open-mmlab/mmclassification/
+ python -m pip install -r requirements.txt
+ python -c 'import mmcv; print(mmcv.__version__)'
+ - name: Install PytorchVideo
+ run: python -m pip install pytorchvideo
+ - name: Show pip list
+ run: pip list
+ - name: Build and install
+ run: pip install -e .
+ - name: Run unittests
+ run: coverage run --branch --source mmedit -m pytest tests -sv
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/workflows/deploy.yml b/openmmlab_test/mmaction2-0.24.1/.github/workflows/deploy.yml
new file mode 100644
index 00000000..a136e0cc
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/workflows/deploy.yml
@@ -0,0 +1,26 @@
+name: deploy
+
+on: push
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build-n-publish:
+ runs-on: ubuntu-latest
+ if: startsWith(github.event.ref, 'refs/tags')
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.7
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.7
+ - name: Build MMAction2
+ run: |
+ pip install wheel
+ python setup.py sdist bdist_wheel
+ - name: Publish distribution to PyPI
+ run: |
+ pip install twine
+ twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/workflows/lint.yml b/openmmlab_test/mmaction2-0.24.1/.github/workflows/lint.yml
new file mode 100644
index 00000000..68b58a2b
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/workflows/lint.yml
@@ -0,0 +1,27 @@
+name: lint
+
+on: [push, pull_request]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.7
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.7
+ - name: Install pre-commit hook
+ run: |
+ pip install pre-commit
+ pre-commit install
+ - name: Linting
+ run: pre-commit run --all-files
+ - name: Check docstring coverage
+ run: |
+ pip install interrogate
+ interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmaction
diff --git a/openmmlab_test/mmaction2-0.24.1/.github/workflows/test_mim.yml b/openmmlab_test/mmaction2-0.24.1/.github/workflows/test_mim.yml
new file mode 100644
index 00000000..88594d0e
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.github/workflows/test_mim.yml
@@ -0,0 +1,47 @@
+name: test-mim
+
+on:
+ push:
+ paths:
+ - 'model-index.yml'
+ - 'configs/**'
+
+ pull_request:
+ paths:
+ - 'model-index.yml'
+ - 'configs/**'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build_cpu:
+ runs-on: ubuntu-18.04
+ strategy:
+ matrix:
+ python-version: [3.7]
+ torch: [1.8.0]
+ include:
+ - torch: 1.8.0
+ torch_version: torch1.8
+ torchvision: 0.9.0
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Upgrade pip
+ run: pip install pip --upgrade
+ - name: Install Pillow
+ run: pip install Pillow==6.2.2
+ if: ${{matrix.torchvision == '0.4.2'}}
+ - name: Install PyTorch
+ run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
+ - name: Install openmim
+ run: pip install openmim
+ - name: Build and install
+ run: rm -rf .eggs && mim install -e .
+ - name: test commands of mim
+ run: mim search mmaction2
diff --git a/openmmlab_test/mmaction2-0.24.1/.gitignore b/openmmlab_test/mmaction2-0.24.1/.gitignore
new file mode 100644
index 00000000..587b2964
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.gitignore
@@ -0,0 +1,140 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+**/*.pyc
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+# custom
+/data
+.vscode
+.idea
+*.pkl
+*.pkl.json
+*.log.json
+benchlist.txt
+work_dirs/
+
+# Pytorch
+*.pth
+
+# Profile
+*.prof
+
+# lmdb
+*.mdb
+
+# unignore some data file in tests/data
+!tests/data/**/*.pkl
+!tests/data/**/*.pkl.json
+!tests/data/**/*.log.json
+!tests/data/**/*.pth
+
+# avoid soft links created by MIM
+mmaction/configs/*
+mmaction/tools/*
+
+*.ipynb
+
+# unignore ipython notebook files in demo
+!demo/*.ipynb
+mmaction/.mim
diff --git a/openmmlab_test/mmaction2-0.24.1/.pre-commit-config.yaml b/openmmlab_test/mmaction2-0.24.1/.pre-commit-config.yaml
new file mode 100644
index 00000000..5b8740eb
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.pre-commit-config.yaml
@@ -0,0 +1,52 @@
+exclude: ^tests/data/
+repos:
+ - repo: https://github.com/PyCQA/flake8
+ rev: 3.8.3
+ hooks:
+ - id: flake8
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.10.1
+ hooks:
+ - id: isort
+ - repo: https://github.com/pre-commit/mirrors-yapf
+ rev: v0.30.0
+ hooks:
+ - id: yapf
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.1.0
+ hooks:
+ - id: trailing-whitespace
+ - id: check-yaml
+ - id: end-of-file-fixer
+ - id: requirements-txt-fixer
+ - id: double-quote-string-fixer
+ - id: check-merge-conflict
+ - id: fix-encoding-pragma
+ args: ["--remove"]
+ - id: mixed-line-ending
+ args: ["--fix=lf"]
+ - repo: https://github.com/executablebooks/mdformat
+ rev: 0.7.9
+ hooks:
+ - id: mdformat
+ args: ["--number"]
+ additional_dependencies:
+ - mdformat-openmmlab
+ - mdformat_frontmatter
+ - linkify-it-py
+ - repo: https://github.com/myint/docformatter
+ rev: v1.3.1
+ hooks:
+ - id: docformatter
+ args: ["--in-place", "--wrap-descriptions", "79"]
+ - repo: https://github.com/codespell-project/codespell
+ rev: v2.1.0
+ hooks:
+ - id: codespell
+ args: ["--skip", "*.ipynb,tools/data/hvu/label_map.json,docs_zh_CN/*", "-L", "te,nd,thre,Gool,gool"]
+ - repo: https://github.com/open-mmlab/pre-commit-hooks
+ rev: v0.2.0 # Use the ref you want to point at
+ hooks:
+ - id: check-algo-readme
+ - id: check-copyright
+ args: ["mmaction", "tests", "demo", "tools"] # these directories will be checked
diff --git a/openmmlab_test/mmaction2-0.24.1/.pylintrc b/openmmlab_test/mmaction2-0.24.1/.pylintrc
new file mode 100644
index 00000000..b1add44f
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.pylintrc
@@ -0,0 +1,624 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code.
+extension-pkg-whitelist=
+
+# Specify a score threshold to be exceeded before program exits with error.
+fail-under=10
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS,configs
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
+# number of processors available to use.
+jobs=1
+
+# Control the amount of potential inferred values when inferring a single
+# object. This can help the performance when dealing with large functions or
+# complex, nested conditions.
+limit-inference-results=100
+
+# List of plugins (as comma separated values of python module names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages.
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once). You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use "--disable=all --enable=classes
+# --disable=W".
+disable=import-outside-toplevel
+ redefined-outer-name
+ print-statement,
+ parameter-unpacking,
+ unpacking-in-except,
+ old-raise-syntax,
+ backtick,
+ long-suffix,
+ old-ne-operator,
+ old-octal-literal,
+ import-star-module-level,
+ non-ascii-bytes-literal,
+ raw-checker-failed,
+ bad-inline-option,
+ locally-disabled,
+ file-ignored,
+ suppressed-message,
+ useless-suppression,
+ deprecated-pragma,
+ use-symbolic-message-instead,
+ apply-builtin,
+ basestring-builtin,
+ buffer-builtin,
+ cmp-builtin,
+ coerce-builtin,
+ execfile-builtin,
+ file-builtin,
+ long-builtin,
+ raw_input-builtin,
+ reduce-builtin,
+ standarderror-builtin,
+ unicode-builtin,
+ xrange-builtin,
+ coerce-method,
+ delslice-method,
+ getslice-method,
+ setslice-method,
+ no-absolute-import,
+ old-division,
+ dict-iter-method,
+ dict-view-method,
+ next-method-called,
+ metaclass-assignment,
+ indexing-exception,
+ raising-string,
+ reload-builtin,
+ oct-method,
+ hex-method,
+ nonzero-method,
+ cmp-method,
+ input-builtin,
+ round-builtin,
+ intern-builtin,
+ unichr-builtin,
+ map-builtin-not-iterating,
+ zip-builtin-not-iterating,
+ range-builtin-not-iterating,
+ filter-builtin-not-iterating,
+ using-cmp-argument,
+ eq-without-hash,
+ div-method,
+ idiv-method,
+ rdiv-method,
+ exception-message-attribute,
+ invalid-str-codec,
+ sys-max-int,
+ bad-python3-import,
+ deprecated-string-function,
+ deprecated-str-translate-call,
+ deprecated-itertools-function,
+ deprecated-types-field,
+ next-method-defined,
+ dict-items-not-iterating,
+ dict-keys-not-iterating,
+ dict-values-not-iterating,
+ deprecated-operator-function,
+ deprecated-urllib-function,
+ xreadlines-attribute,
+ deprecated-sys-function,
+ exception-escape,
+ comprehension-escape,
+ no-member,
+ invalid-name,
+ too-many-branches,
+ wrong-import-order,
+ too-many-arguments,
+ missing-function-docstring,
+ missing-module-docstring,
+ too-many-locals,
+ too-few-public-methods,
+ abstract-method,
+ broad-except,
+ too-many-nested-blocks,
+ too-many-instance-attributes,
+ missing-class-docstring,
+ duplicate-code,
+ not-callable,
+ protected-access,
+ dangerous-default-value,
+ no-name-in-module,
+ logging-fstring-interpolation,
+ super-init-not-called,
+ redefined-builtin,
+ attribute-defined-outside-init,
+ arguments-differ,
+ cyclic-import,
+ bad-super-call,
+ too-many-statements,
+ line-too-long
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a score less than or equal to 10. You
+# have access to the variables 'error', 'warning', 'refactor', and 'convention'
+# which contain the number of messages in each category, as well as 'statement'
+# which is the total number of statements analyzed. This score is used by the
+# global evaluation report (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details.
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio). You can also give a reporter class, e.g.
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages.
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=sys.exit
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# Tells whether to warn about missing members when the owner of the attribute
+# is inferred to be None.
+ignore-none=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis). It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+# List of decorators that change the signature of a decorated function.
+signature-mutators=
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes.
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it work,
+# install the python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains the private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to the private dictionary (see the
+# --spelling-private-dict-file option) instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[LOGGING]
+
+# The type of string formatting that logging methods do. `old` means using %
+# formatting, `new` is for `{}` formatting.
+logging-format-style=old
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format.
+logging-modules=logging
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid defining new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expected to
+# not be used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore.
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )??$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Maximum number of lines in a module.
+max-module-lines=1000
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[STRING]
+
+# This flag controls whether inconsistent-quotes generates a warning when the
+# character used as a quote delimiter is used inconsistently within a module.
+check-quote-consistency=no
+
+# This flag controls whether the implicit-str-concat should generate a warning
+# on implicit string concatenation in sequences defined over several lines.
+check-str-concat-over-line-jumps=no
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+# Regular expression of note tags to take in consideration.
+#notes-rgx=
+
+
+[BASIC]
+
+# Naming style matching correct argument names.
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style.
+#argument-rgx=
+
+# Naming style matching correct attribute names.
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style.
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma.
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata
+
+# Bad variable names regexes, separated by a comma. If names match any regex,
+# they will always be refused
+bad-names-rgxs=
+
+# Naming style matching correct class attribute names.
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style.
+#class-attribute-rgx=
+
+# Naming style matching correct class names.
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-
+# style.
+#class-rgx=
+
+# Naming style matching correct constant names.
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style.
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names.
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style.
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+ _,
+ x,
+ y,
+ w,
+ h,
+ a,
+ b
+
+# Good variable names regexes, separated by a comma. If names match any regex,
+# they will always be accepted
+good-names-rgxs=
+
+# Include a hint for the correct naming format with invalid-name.
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names.
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style.
+#inlinevar-rgx=
+
+# Naming style matching correct method names.
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style.
+#method-rgx=
+
+# Naming style matching correct module names.
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style.
+#module-rgx=
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+# These decorators are taken in consideration only for invalid-name.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names.
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style.
+#variable-rgx=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method.
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in an if statement (see R0916).
+max-bool-expr=5
+
+# Maximum number of branch for function / method body.
+max-branches=12
+
+# Maximum number of locals for function / method body.
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body.
+max-returns=6
+
+# Maximum number of statements in function / method body.
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[IMPORTS]
+
+# List of modules that can be imported at any level, not just the top level
+# one.
+allow-any-import-level=
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma.
+deprecated-modules=optparse,tkinter.tix
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled).
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled).
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled).
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+# Couples of modules and preferred modules, separated by a comma.
+preferred-modules=
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp,
+ __post_init__
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=cls
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "BaseException, Exception".
+overgeneral-exceptions=BaseException,
+ Exception
diff --git a/openmmlab_test/mmaction2-0.24.1/.readthedocs.yml b/openmmlab_test/mmaction2-0.24.1/.readthedocs.yml
new file mode 100644
index 00000000..73ea4cb7
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/.readthedocs.yml
@@ -0,0 +1,7 @@
+version: 2
+
+python:
+ version: 3.7
+ install:
+ - requirements: requirements/docs.txt
+ - requirements: requirements/readthedocs.txt
diff --git a/openmmlab_test/mmaction2-0.24.1/CITATION.cff b/openmmlab_test/mmaction2-0.24.1/CITATION.cff
new file mode 100644
index 00000000..93a03304
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/CITATION.cff
@@ -0,0 +1,8 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it as below."
+authors:
+ - name: "MMAction2 Contributors"
+title: "OpenMMLab's Next Generation Video Understanding Toolbox and Benchmark"
+date-released: 2020-07-21
+url: "https://github.com/open-mmlab/mmaction2"
+license: Apache-2.0
diff --git a/openmmlab_test/mmaction2-0.24.1/LICENSE b/openmmlab_test/mmaction2-0.24.1/LICENSE
new file mode 100644
index 00000000..04adf5cb
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/LICENSE
@@ -0,0 +1,203 @@
+Copyright 2018-2019 Open-MMLab. All rights reserved.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018-2019 Open-MMLab.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/openmmlab_test/mmaction2-0.24.1/MANIFEST.in b/openmmlab_test/mmaction2-0.24.1/MANIFEST.in
new file mode 100644
index 00000000..258c4e01
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/MANIFEST.in
@@ -0,0 +1,3 @@
+include mmaction/.mim/model-index.yml
+recursive-include mmaction/.mim/configs *.py *.yml
+recursive-include mmaction/.mim/tools *.sh *.py
diff --git a/openmmlab_test/mmaction2-0.24.1/README.md b/openmmlab_test/mmaction2-0.24.1/README.md
new file mode 100644
index 00000000..95617f05
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/README.md
@@ -0,0 +1,320 @@
+
+

+
+
+
+[](https://mmaction2.readthedocs.io/en/latest/)
+[](https://github.com/open-mmlab/mmaction2/actions)
+[](https://codecov.io/gh/open-mmlab/mmaction2)
+[](https://pypi.org/project/mmaction2/)
+[](https://github.com/open-mmlab/mmaction2/blob/master/LICENSE)
+[](https://github.com/open-mmlab/mmaction2/issues)
+[](https://github.com/open-mmlab/mmaction2/issues)
+
+[📘Documentation](https://mmaction2.readthedocs.io/en/latest/) |
+[🛠️Installation](https://mmaction2.readthedocs.io/en/latest/install.html) |
+[👀Model Zoo](https://mmaction2.readthedocs.io/en/latest/modelzoo.html) |
+[🆕Update News](https://mmaction2.readthedocs.io/en/latest/changelog.html) |
+[🚀Ongoing Projects](https://github.com/open-mmlab/mmaction2/projects) |
+[🤔Reporting Issues](https://github.com/open-mmlab/mmaction2/issues/new/choose)
+
+
+
+English | [简体中文](/README_zh-CN.md)
+
+## Introduction
+
+MMAction2 is an open-source toolbox for video understanding based on PyTorch.
+It is a part of the [OpenMMLab](http://openmmlab.org/) project.
+
+The master branch works with **PyTorch 1.5+**.
+
+
+
+

+
Action Recognition Results on Kinetics-400
+
+
+

+
Skeleton-base Action Recognition Results on NTU-RGB+D-120
+
+
+
+

+
Skeleton-based Spatio-Temporal Action Detection and Action Recognition Results on Kinetics-400
+
+
+

+
Spatio-Temporal Action Detection Results on AVA-2.1
+
+
+## Major Features
+
+- **Modular design**: We decompose a video understanding framework into different components. One can easily construct a customized video understanding framework by combining different modules.
+
+- **Support four major video understanding tasks**: MMAction2 implements various algorithms for multiple video understanding tasks, including action recognition, action localization, spatio-temporal action detection, and skeleton-based action detection. We support **27** different algorithms and **20** different datasets for the four major tasks.
+
+- **Well tested and documented**: We provide detailed documentation and API reference, as well as unit tests.
+
+## What's New
+
+- (2022-03-04) We support **Multigrid** on Kinetics400, achieve 76.07% Top-1 accuracy and accelerate training speed.
+- (2021-11-24) We support **2s-AGCN** on NTU60 XSub, achieve 86.06% Top-1 accuracy on joint stream and 86.89% Top-1 accuracy on bone stream respectively.
+- (2021-10-29) We provide a demo for skeleton-based and rgb-based spatio-temporal detection and action recognition (demo/demo_video_structuralize.py).
+- (2021-10-26) We train and test **ST-GCN** on NTU60 with 3D keypoint annotations, achieve 84.61% Top-1 accuracy (higher than 81.5% in the [paper](https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/17135)).
+- (2021-10-25) We provide a script(tools/data/skeleton/gen_ntu_rgbd_raw.py) to convert the NTU60 and NTU120 3D raw skeleton data to our format.
+- (2021-10-25) We provide a [guide](https://github.com/open-mmlab/mmaction2/blob/master/configs/skeleton/posec3d/custom_dataset_training.md) on how to train PoseC3D with custom datasets, [bit-scientist](https://github.com/bit-scientist) authored this PR!
+- (2021-10-16) We support **PoseC3D** on UCF101 and HMDB51, achieves 87.0% and 69.3% Top-1 accuracy with 2D skeletons only. Pre-extracted 2D skeletons are also available.
+
+**Release**: v0.24.0 was released in 05/05/2022. Please refer to [changelog.md](docs/changelog.md) for details and release history.
+
+## Installation
+
+MMAction2 depends on [PyTorch](https://pytorch.org/), [MMCV](https://github.com/open-mmlab/mmcv), [MMDetection](https://github.com/open-mmlab/mmdetection) (optional), and [MMPose](https://github.com/open-mmlab/mmdetection)(optional).
+Below are quick steps for installation.
+Please refer to [install.md](docs/install.md) for more detailed instruction.
+
+```shell
+conda create -n open-mmlab python=3.8 pytorch=1.10 cudatoolkit=11.3 torchvision -c pytorch -y
+conda activate open-mmlab
+pip3 install openmim
+mim install mmcv-full
+mim install mmdet # optional
+mim install mmpose # optional
+git clone https://github.com/open-mmlab/mmaction2.git
+cd mmaction2
+pip3 install -e .
+```
+
+## Get Started
+
+Please see [getting_started.md](docs/getting_started.md) for the basic usage of MMAction2.
+There are also tutorials:
+
+- [learn about configs](docs/tutorials/1_config.md)
+- [finetuning models](docs/tutorials/2_finetune.md)
+- [adding new dataset](docs/tutorials/3_new_dataset.md)
+- [designing data pipeline](docs/tutorials/4_data_pipeline.md)
+- [adding new modules](docs/tutorials/5_new_modules.md)
+- [exporting model to onnx](docs/tutorials/6_export_model.md)
+- [customizing runtime settings](docs/tutorials/7_customize_runtime.md)
+
+A Colab tutorial is also provided. You may preview the notebook [here](demo/mmaction2_tutorial.ipynb) or directly [run](https://colab.research.google.com/github/open-mmlab/mmaction2/blob/master/demo/mmaction2_tutorial.ipynb) on Colab.
+
+## Supported Methods
+
+
+
+Results and models are available in the *README.md* of each method's config directory.
+A summary can be found on the [**model zoo**](https://mmaction2.readthedocs.io/en/latest/recognition_models.html) page.
+
+We will keep up with the latest progress of the community and support more popular algorithms and frameworks.
+If you have any feature requests, please feel free to leave a comment in [Issues](https://github.com/open-mmlab/mmaction2/issues/19).
+
+## Supported Datasets
+
+
+
+Datasets marked with * are not fully supported yet, but related dataset preparation steps are provided. A summary can be found on the [**Supported Datasets**](https://mmaction2.readthedocs.io/en/latest/supported_datasets.html) page.
+
+## Benchmark
+
+To demonstrate the efficacy and efficiency of our framework, we compare MMAction2 with some other popular frameworks and official releases in terms of speed. Details can be found in [benchmark](docs/benchmark.md).
+
+## Data Preparation
+
+Please refer to [data_preparation.md](docs/data_preparation.md) for a general knowledge of data preparation.
+The supported datasets are listed in [supported_datasets.md](docs/supported_datasets.md)
+
+## FAQ
+
+Please refer to [FAQ](docs/faq.md) for frequently asked questions.
+
+## Projects built on MMAction2
+
+Currently, there are many research works and projects built on MMAction2 by users from community, such as:
+
+- Video Swin Transformer. [\[paper\]](https://arxiv.org/abs/2106.13230)[\[github\]](https://github.com/SwinTransformer/Video-Swin-Transformer)
+- Evidential Deep Learning for Open Set Action Recognition, ICCV 2021 **Oral**. [\[paper\]](https://arxiv.org/abs/2107.10161)[\[github\]](https://github.com/Cogito2012/DEAR)
+- Rethinking Self-supervised Correspondence Learning: A Video Frame-level Similarity Perspective, ICCV 2021 **Oral**. [\[paper\]](https://arxiv.org/abs/2103.17263)[\[github\]](https://github.com/xvjiarui/VFS)
+
+etc., check [projects.md](docs/projects.md) to see all related projects.
+
+## Contributing
+
+We appreciate all contributions to improve MMAction2. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.
+
+## Acknowledgement
+
+MMAction2 is an open-source project that is contributed by researchers and engineers from various colleges and companies.
+We appreciate all the contributors who implement their methods or add new features and users who give valuable feedback.
+We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their new models.
+
+## Citation
+
+If you find this project useful in your research, please consider cite:
+
+```BibTeX
+@misc{2020mmaction2,
+ title={OpenMMLab's Next Generation Video Understanding Toolbox and Benchmark},
+ author={MMAction2 Contributors},
+ howpublished = {\url{https://github.com/open-mmlab/mmaction2}},
+ year={2020}
+}
+```
+
+## License
+
+This project is released under the [Apache 2.0 license](LICENSE).
+
+## Projects in OpenMMLab
+
+- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
+- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
+- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
+- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
+- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
+- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
+- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
+- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
+- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
+- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
+- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
+- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
+- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
+- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
+- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
+- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
+- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
+- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework.
diff --git a/openmmlab_test/mmaction2-0.24.1/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/README_zh-CN.md
new file mode 100644
index 00000000..d6a1e2af
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/README_zh-CN.md
@@ -0,0 +1,331 @@
+
+

+
+
+
+[](https://mmaction2.readthedocs.io/zh_CN/latest/)
+[](https://github.com/open-mmlab/mmaction2/actions)
+[](https://codecov.io/gh/open-mmlab/mmaction2)
+[](https://pypi.org/project/mmaction2/)
+[](https://github.com/open-mmlab/mmaction2/blob/master/LICENSE)
+[](https://github.com/open-mmlab/mmaction2/issues)
+[](https://github.com/open-mmlab/mmaction2/issues)
+
+[📘文档](https://mmaction2.readthedocs.io/en/latest/) |
+[🛠️安装指南](https://mmaction2.readthedocs.io/en/latest/install.html) |
+[👀模型库](https://mmaction2.readthedocs.io/en/latest/modelzoo.html) |
+[🆕更新](https://mmaction2.readthedocs.io/en/latest/changelog.html) |
+[🚀进行中项目](https://github.com/open-mmlab/mmaction2/projects) |
+[🤔问题反馈](https://github.com/open-mmlab/mmaction2/issues/new/choose)
+
+
+
+[English](/README.md) | 简体中文
+
+## 简介
+
+MMAction2 是一款基于 PyTorch 的视频理解开源工具箱,是 [OpenMMLab](http://openmmlab.org/) 项目的成员之一
+
+主分支代码目前支持 **PyTorch 1.5 以上**的版本
+
+
+
+

+
Kinetics-400 上的动作识别
+
+
+

+
NTURGB+D-120 上的基于人体姿态的动作识别
+
+
+
+

+
Kinetics-400 上的基于 skeleton 的时空动作检测和动作识别
+
+
+

+
AVA-2.1 上的时空动作检测
+
+
+## 主要特性
+
+- **模块化设计**:MMAction2 将统一的视频理解框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的视频理解模型
+
+- **支持多种任务和数据集**:MMAction2 支持多种视频理解任务,包括动作识别,时序动作检测,时空动作检测以及基于人体姿态的动作识别,总共支持 **27** 种算法和 **20** 种数据集
+
+- **详尽的单元测试和文档**:MMAction2 提供了详尽的说明文档,API 接口说明,全面的单元测试,以供社区参考
+
+## 更新记录
+
+- (2021-11-24) 在 NTU60 XSub 上支持 **2s-AGCN**, 在 joint stream 和 bone stream 上分别达到 86.06% 和 86.89% 的识别准确率。
+- (2021-10-29) 支持基于 skeleton 模态和 rgb 模态的时空动作检测和行为识别 demo (demo/demo_video_structuralize.py)。
+- (2021-10-26) 在 NTU60 3d 关键点标注数据集上训练测试 **STGCN**, 可达到 84.61% (高于 [paper](https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/17135) 中的 81.5%) 的识别准确率。
+- (2021-10-25) 提供将 NTU60 和 NTU120 的 3d 骨骼点数据转换成我们项目的格式的脚本(tools/data/skeleton/gen_ntu_rgbd_raw.py)。
+- (2021-10-25) 提供使用自定义数据集训练 PoseC3D 的 [教程](https://github.com/open-mmlab/mmaction2/blob/master/configs/skeleton/posec3d/custom_dataset_training.md),此 PR 由用户 [bit-scientist](https://github.com/bit-scientist) 完成!
+- (2021-10-16) 在 UCF101, HMDB51 上支持 **PoseC3D**,仅用 2D 关键点就可分别达到 87.0% 和 69.3% 的识别准确率。两数据集的预提取骨架特征可以公开下载。
+
+v0.24.0 版本已于 2022 年 5 月 5 日发布,可通过查阅 [更新日志](/docs/changelog.md) 了解更多细节以及发布历史
+
+## 安装
+
+MMAction2 依赖 [PyTorch](https://pytorch.org/), [MMCV](https://github.com/open-mmlab/mmcv), [MMDetection](https://github.com/open-mmlab/mmdetection)(可选), [MMPose](https://github.com/open-mmlab/mmpose)(可选),以下是安装的简要步骤。
+更详细的安装指南请参考 [install.md](docs_zh_CN/install.md)。
+
+```shell
+conda create -n open-mmlab python=3.8 pytorch=1.10 cudatoolkit=11.3 torchvision -c pytorch -y
+conda activate open-mmlab
+pip3 install openmim
+mim install mmcv-full
+mim install mmdet # 可选
+mim install mmpose # 可选
+git clone https://github.com/open-mmlab/mmaction2.git
+cd mmaction2
+pip3 install -e .
+```
+
+## 教程
+
+请参考 [基础教程](/docs_zh_CN/getting_started.md) 了解 MMAction2 的基本使用。MMAction2也提供了其他更详细的教程:
+
+- [如何编写配置文件](/docs_zh_CN/tutorials/1_config.md)
+- [如何微调模型](/docs_zh_CN/tutorials/2_finetune.md)
+- [如何增加新数据集](/docs_zh_CN/tutorials/3_new_dataset.md)
+- [如何设计数据处理流程](/docs_zh_CN/tutorials/4_data_pipeline.md)
+- [如何增加新模块](/docs_zh_CN/tutorials/5_new_modules.md)
+- [如何导出模型为 onnx 格式](/docs_zh_CN/tutorials/6_export_model.md)
+- [如何自定义模型运行参数](/docs_zh_CN/tutorials/7_customize_runtime.md)
+
+MMAction2 也提供了相应的中文 Colab 教程,可以点击 [这里](https://colab.research.google.com/github/open-mmlab/mmaction2/blob/master/demo/mmaction2_tutorial_zh-CN.ipynb) 进行体验!
+
+## 模型库
+
+
+
+各个模型的结果和设置都可以在对应的 config 目录下的 *README_zh-CN.md* 中查看。整体的概况也可也在 [**模型库**](https://mmaction2.readthedocs.io/zh_CN/latest/recognition_models.html) 页面中查看
+
+MMAction2 将跟进学界的最新进展,并支持更多算法和框架。如果您对 MMAction2 有任何功能需求,请随时在 [问题](https://github.com/open-mmlab/mmaction2/issues/19) 中留言。
+
+## 数据集
+
+
+
+标记 * 代表对应数据集并未被完全支持,但提供相应的数据准备步骤。整体的概况也可也在 [**数据集**](https://mmaction2.readthedocs.io/en/latest/supported_datasets.html) 页面中查看
+
+## 基准测试
+
+为了验证 MMAction2 框架的高精度和高效率,开发成员将其与当前其他主流框架进行速度对比。更多详情可见 [基准测试](/docs_zh_CN/benchmark.md)
+
+## 数据集准备
+
+请参考 [数据准备](/docs_zh_CN/data_preparation.md) 了解数据集准备概况。所有支持的数据集都列于 [数据集清单](/docs_zh_CN/supported_datasets.md) 中
+
+## 常见问题
+
+请参考 [FAQ](/docs_zh_CN/faq.md) 了解其他用户的常见问题
+
+## 相关工作
+
+目前有许多研究工作或工程项目基于 MMAction2 搭建,例如:
+
+- Evidential Deep Learning for Open Set Action Recognition, ICCV 2021 **Oral**. [\[论文\]](https://arxiv.org/abs/2107.10161)[\[代码\]](https://github.com/Cogito2012/DEAR)
+- Rethinking Self-supervised Correspondence Learning: A Video Frame-level Similarity Perspective, ICCV 2021 **Oral**. [\[论文\]](https://arxiv.org/abs/2103.17263)[\[代码\]](https://github.com/xvjiarui/VFS)
+- Video Swin Transformer. [\[论文\]](https://arxiv.org/abs/2106.13230)[\[代码\]](https://github.com/SwinTransformer/Video-Swin-Transformer)
+
+更多详情可见 [相关工作](docs/projects.md)
+
+## 参与贡献
+
+我们非常欢迎用户对于 MMAction2 做出的任何贡献,可以参考 [贡献指南](/.github/CONTRIBUTING.md) 文件了解更多细节
+
+## 致谢
+
+MMAction2 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。
+我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。
+
+## 引用
+
+如果你觉得 MMAction2 对你的研究有所帮助,可以考虑引用它:
+
+```BibTeX
+@misc{2020mmaction2,
+ title={OpenMMLab's Next Generation Video Understanding Toolbox and Benchmark},
+ author={MMAction2 Contributors},
+ howpublished = {\url{https://github.com/open-mmlab/mmaction2}},
+ year={2020}
+}
+```
+
+## 许可
+
+该项目开源自 [Apache 2.0 license](/LICENSE)
+
+## OpenMMLab 的其他项目
+
+- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
+- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
+- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
+- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
+- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
+- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
+- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具箱
+- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
+- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
+- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
+- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
+- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
+- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
+- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
+- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
+- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
+- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
+- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
+
+## 欢迎加入 OpenMMLab 社区
+
+扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3)
+
+
+

+
+
+我们会在 OpenMMLab 社区为大家
+
+- 📢 分享 AI 框架的前沿核心技术
+- 💻 解读 PyTorch 常用模块源码
+- 📰 发布 OpenMMLab 的相关新闻
+- 🚀 介绍 OpenMMLab 开发的前沿算法
+- 🏃 获取更高效的问题答疑和意见反馈
+- 🔥 提供与各行各业开发者充分交流的平台
+
+干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/default_runtime.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/default_runtime.py
new file mode 100644
index 00000000..3bfa9752
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/default_runtime.py
@@ -0,0 +1,18 @@
+checkpoint_config = dict(interval=1)
+log_config = dict(
+ interval=20,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook'),
+ ])
+# runtime settings
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+
+# disable opencv multithreading to avoid system being overloaded
+opencv_num_threads = 0
+# set multi-process start method as `fork` to speed up the training
+mp_start_method = 'fork'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/audioonly_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/audioonly_r50.py
new file mode 100644
index 00000000..d4a190c8
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/audioonly_r50.py
@@ -0,0 +1,18 @@
+# model settings
+model = dict(
+ type='AudioRecognizer',
+ backbone=dict(
+ type='ResNetAudio',
+ depth=50,
+ pretrained=None,
+ in_channels=1,
+ norm_eval=False),
+ cls_head=dict(
+ type='AudioTSNHead',
+ num_classes=400,
+ in_channels=1024,
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bmn_400x100.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bmn_400x100.py
new file mode 100644
index 00000000..edaccb98
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bmn_400x100.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='BMN',
+ temporal_dim=100,
+ boundary_ratio=0.5,
+ num_samples=32,
+ num_samples_per_bin=3,
+ feat_dim=400,
+ soft_nms_alpha=0.4,
+ soft_nms_low_threshold=0.5,
+ soft_nms_high_threshold=0.9,
+ post_process_top_k=100)
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_pem.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_pem.py
new file mode 100644
index 00000000..7acb7d31
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_pem.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='PEM',
+ pem_feat_dim=32,
+ pem_hidden_dim=256,
+ pem_u_ratio_m=1,
+ pem_u_ratio_l=2,
+ pem_high_temporal_iou_threshold=0.6,
+ pem_low_temporal_iou_threshold=0.2,
+ soft_nms_alpha=0.75,
+ soft_nms_low_threshold=0.65,
+ soft_nms_high_threshold=0.9,
+ post_process_top_k=100)
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_tem.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_tem.py
new file mode 100644
index 00000000..84a2b699
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/bsn_tem.py
@@ -0,0 +1,8 @@
+# model settings
+model = dict(
+ type='TEM',
+ temporal_dim=100,
+ boundary_ratio=0.1,
+ tem_feat_dim=400,
+ tem_hidden_dim=512,
+ tem_match_threshold=0.5)
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/c3d_sports1m_pretrained.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/c3d_sports1m_pretrained.py
new file mode 100644
index 00000000..1cdc3d49
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/c3d_sports1m_pretrained.py
@@ -0,0 +1,23 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='C3D',
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', # noqa: E501
+ style='pytorch',
+ conv_cfg=dict(type='Conv3d'),
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU'),
+ dropout_ratio=0.5,
+ init_std=0.005),
+ cls_head=dict(
+ type='I3DHead',
+ num_classes=101,
+ in_channels=4096,
+ spatial_type=None,
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='score'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/i3d_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/i3d_r50.py
new file mode 100644
index 00000000..fee08bc2
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/i3d_r50.py
@@ -0,0 +1,27 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='ResNet3d',
+ pretrained2d=True,
+ pretrained='torchvision://resnet50',
+ depth=50,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=2,
+ pool1_stride_t=2,
+ conv_cfg=dict(type='Conv3d'),
+ norm_eval=False,
+ inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)),
+ zero_init_residual=False),
+ cls_head=dict(
+ type='I3DHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
+
+# This setting refers to https://github.com/open-mmlab/mmaction/blob/master/mmaction/models/tenons/backbones/resnet_i3d.py#L329-L332 # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/ircsn_r152.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/ircsn_r152.py
new file mode 100644
index 00000000..36e700c3
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/ircsn_r152.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='ResNet3dCSN',
+ pretrained2d=False,
+ pretrained=None,
+ depth=152,
+ with_pool2=False,
+ bottleneck_mode='ir',
+ norm_eval=False,
+ zero_init_residual=False),
+ cls_head=dict(
+ type='I3DHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob', max_testing_views=10))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/r2plus1d_r34.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/r2plus1d_r34.py
new file mode 100644
index 00000000..b5bcdac0
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/r2plus1d_r34.py
@@ -0,0 +1,28 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='ResNet2Plus1d',
+ depth=34,
+ pretrained=None,
+ pretrained2d=False,
+ norm_eval=False,
+ conv_cfg=dict(type='Conv2plus1d'),
+ norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3),
+ conv1_kernel=(3, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(1, 1, 1, 1),
+ spatial_strides=(1, 2, 2, 2),
+ temporal_strides=(1, 2, 2, 2),
+ zero_init_residual=False),
+ cls_head=dict(
+ type='I3DHead',
+ num_classes=400,
+ in_channels=512,
+ spatial_type='avg',
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowfast_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowfast_r50.py
new file mode 100644
index 00000000..afa8aab0
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowfast_r50.py
@@ -0,0 +1,39 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=8, # tau
+ speed_ratio=8, # alpha
+ channel_ratio=8, # beta_inv
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ norm_eval=False),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ norm_eval=False)),
+ cls_head=dict(
+ type='SlowFastHead',
+ in_channels=2304, # 2048+256
+ num_classes=400,
+ spatial_type='avg',
+ dropout_ratio=0.5),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowonly_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowonly_r50.py
new file mode 100644
index 00000000..13081786
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/slowonly_r50.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained='torchvision://resnet50',
+ lateral=False,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ norm_eval=False),
+ cls_head=dict(
+ type='I3DHead',
+ in_channels=2048,
+ num_classes=400,
+ spatial_type='avg',
+ dropout_ratio=0.5),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tanet_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tanet_r50.py
new file mode 100644
index 00000000..b20ea822
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tanet_r50.py
@@ -0,0 +1,20 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='TANet',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ num_segments=8,
+ tam_cfg=dict()),
+ cls_head=dict(
+ type='TSMHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.5,
+ init_std=0.001),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tin_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tin_r50.py
new file mode 100644
index 00000000..af9ac373
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tin_r50.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='ResNetTIN',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ norm_eval=False,
+ shift_div=4),
+ cls_head=dict(
+ type='TSMHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.5,
+ init_std=0.001,
+ is_shift=False),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips=None))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_slowonly_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_slowonly_r50.py
new file mode 100644
index 00000000..072e5e88
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_slowonly_r50.py
@@ -0,0 +1,40 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained='torchvision://resnet50',
+ lateral=False,
+ out_indices=(2, 3),
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ norm_eval=False),
+ neck=dict(
+ type='TPN',
+ in_channels=(1024, 2048),
+ out_channels=1024,
+ spatial_modulation_cfg=dict(
+ in_channels=(1024, 2048), out_channels=2048),
+ temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
+ upsample_cfg=dict(scale_factor=(1, 1, 1)),
+ downsample_cfg=dict(downsample_scale=(1, 1, 1)),
+ level_fusion_cfg=dict(
+ in_channels=(1024, 1024),
+ mid_channels=(1024, 1024),
+ out_channels=2048,
+ downsample_scales=((1, 1, 1), (1, 1, 1))),
+ aux_head_cfg=dict(out_channels=400, loss_weight=0.5)),
+ cls_head=dict(
+ type='TPNHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_tsm_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_tsm_r50.py
new file mode 100644
index 00000000..4a038669
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tpn_tsm_r50.py
@@ -0,0 +1,36 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='ResNetTSM',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ out_indices=(2, 3),
+ norm_eval=False,
+ shift_div=8),
+ neck=dict(
+ type='TPN',
+ in_channels=(1024, 2048),
+ out_channels=1024,
+ spatial_modulation_cfg=dict(
+ in_channels=(1024, 2048), out_channels=2048),
+ temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
+ upsample_cfg=dict(scale_factor=(1, 1, 1)),
+ downsample_cfg=dict(downsample_scale=(1, 1, 1)),
+ level_fusion_cfg=dict(
+ in_channels=(1024, 1024),
+ mid_channels=(1024, 1024),
+ out_channels=2048,
+ downsample_scales=((1, 1, 1), (1, 1, 1))),
+ aux_head_cfg=dict(out_channels=174, loss_weight=0.5)),
+ cls_head=dict(
+ type='TPNHead',
+ num_classes=174,
+ in_channels=2048,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob', fcn_test=True))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/trn_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/trn_r50.py
new file mode 100644
index 00000000..ff84e78c
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/trn_r50.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='ResNet',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ norm_eval=False,
+ partial_bn=True),
+ cls_head=dict(
+ type='TRNHead',
+ num_classes=400,
+ in_channels=2048,
+ num_segments=8,
+ spatial_type='avg',
+ relation_type='TRNMultiScale',
+ hidden_dim=256,
+ dropout_ratio=0.8,
+ init_std=0.001),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_mobilenet_v2.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_mobilenet_v2.py
new file mode 100644
index 00000000..bce81074
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_mobilenet_v2.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='MobileNetV2TSM',
+ shift_div=8,
+ num_segments=8,
+ is_shift=True,
+ pretrained='mmcls://mobilenet_v2'),
+ cls_head=dict(
+ type='TSMHead',
+ num_segments=8,
+ num_classes=400,
+ in_channels=1280,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.5,
+ init_std=0.001,
+ is_shift=True),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_r50.py
new file mode 100644
index 00000000..477497b6
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsm_r50.py
@@ -0,0 +1,21 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='ResNetTSM',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ norm_eval=False,
+ shift_div=8),
+ cls_head=dict(
+ type='TSMHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.5,
+ init_std=0.001,
+ is_shift=True),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50.py
new file mode 100644
index 00000000..d879ea69
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50.py
@@ -0,0 +1,19 @@
+# model settings
+model = dict(
+ type='Recognizer2D',
+ backbone=dict(
+ type='ResNet',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ norm_eval=False),
+ cls_head=dict(
+ type='TSNHead',
+ num_classes=400,
+ in_channels=2048,
+ spatial_type='avg',
+ consensus=dict(type='AvgConsensus', dim=1),
+ dropout_ratio=0.4,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips=None))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50_audio.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50_audio.py
new file mode 100644
index 00000000..2c3ab0df
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/tsn_r50_audio.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='AudioRecognizer',
+ backbone=dict(type='ResNet', depth=50, in_channels=1, norm_eval=False),
+ cls_head=dict(
+ type='AudioTSNHead',
+ num_classes=400,
+ in_channels=2048,
+ dropout_ratio=0.5,
+ init_std=0.01),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/x3d.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/x3d.py
new file mode 100644
index 00000000..10e30205
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/models/x3d.py
@@ -0,0 +1,14 @@
+# model settings
+model = dict(
+ type='Recognizer3D',
+ backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2),
+ cls_head=dict(
+ type='X3DHead',
+ in_channels=432,
+ num_classes=400,
+ spatial_type='avg',
+ dropout_ratio=0.5,
+ fc1_bias=False),
+ # model training and testing settings
+ train_cfg=None,
+ test_cfg=dict(average_clips='prob'))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/adam_20e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/adam_20e.py
new file mode 100644
index 00000000..baa535f7
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/adam_20e.py
@@ -0,0 +1,7 @@
+# optimizer
+optimizer = dict(
+ type='Adam', lr=0.01, weight_decay=0.00001) # this lr is used for 1 gpus
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=10)
+total_epochs = 20
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_100e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_100e.py
new file mode 100644
index 00000000..de37742b
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_100e.py
@@ -0,0 +1,10 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.01, # this lr is used for 8 gpus
+ momentum=0.9,
+ weight_decay=0.0001)
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[40, 80])
+total_epochs = 100
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_150e_warmup.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_150e_warmup.py
new file mode 100644
index 00000000..af33a7c4
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_150e_warmup.py
@@ -0,0 +1,13 @@
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.01, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[90, 130],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=10)
+total_epochs = 150
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_50e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_50e.py
new file mode 100644
index 00000000..9345715d
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_50e.py
@@ -0,0 +1,10 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.01, # this lr is used for 8 gpus
+ momentum=0.9,
+ weight_decay=0.0001)
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[20, 40])
+total_epochs = 50
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_100e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_100e.py
new file mode 100644
index 00000000..dbdc4739
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_100e.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ constructor='TSMOptimizerConstructor',
+ paramwise_cfg=dict(fc_lr5=True),
+ lr=0.01, # this lr is used for 8 gpus
+ momentum=0.9,
+ weight_decay=0.0001)
+optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[40, 80])
+total_epochs = 100
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_50e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_50e.py
new file mode 100644
index 00000000..24f4f344
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_50e.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ constructor='TSMOptimizerConstructor',
+ paramwise_cfg=dict(fc_lr5=True),
+ lr=0.01, # this lr is used for 8 gpus
+ momentum=0.9,
+ weight_decay=0.0001)
+optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[20, 40])
+total_epochs = 50
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_100e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_100e.py
new file mode 100644
index 00000000..63ed3f27
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_100e.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ constructor='TSMOptimizerConstructor',
+ paramwise_cfg=dict(fc_lr5=True),
+ lr=0.01, # this lr is used for 8 gpus
+ momentum=0.9,
+ weight_decay=0.00002)
+optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[40, 80])
+total_epochs = 100
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_50e.py b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_50e.py
new file mode 100644
index 00000000..78612def
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/_base_/schedules/sgd_tsm_mobilenet_v2_50e.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ constructor='TSMOptimizerConstructor',
+ paramwise_cfg=dict(fc_lr5=True),
+ lr=0.01, # this lr is used for 8 gpus
+ momentum=0.9,
+ weight_decay=0.00002)
+optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[20, 40])
+total_epochs = 50
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50.py
new file mode 100644
index 00000000..965338ea
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50.py
@@ -0,0 +1,43 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1)),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50_nl.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50_nl.py
new file mode 100644
index 00000000..fd2f739d
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/_base_/models/slowonly_r50_nl.py
@@ -0,0 +1,50 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1),
+ norm_cfg=dict(type='BN3d', requires_grad=True),
+ non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)),
+ non_local_cfg=dict(
+ sub_sample=True,
+ use_scale=True,
+ norm_cfg=dict(type='BN3d', requires_grad=True),
+ mode='embedded_gaussian')),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README.md b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README.md
new file mode 100644
index 00000000..18574fcb
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README.md
@@ -0,0 +1,97 @@
+# ACRN
+
+[Actor-centric relation network](https://openaccess.thecvf.com/content_ECCV_2018/html/Chen_Sun_Actor-centric_Relation_Network_ECCV_2018_paper.html)
+
+
+
+## Abstract
+
+
+
+Current state-of-the-art approaches for spatio-temporal action localization rely on detections at the frame level and model temporal context with 3D ConvNets. Here, we go one step further and model spatio-temporal relations to capture the interactions between human actors, relevant objects and scene elements essential to differentiate similar human actions. Our approach is weakly supervised and mines the relevant elements automatically with an actor-centric relational network (ACRN). ACRN computes and accumulates pair-wise relation information from actor and global scene features, and generates relation features for action classification. It is implemented as neural networks and can be trained jointly with an existing action detection system. We show that ACRN outperforms alternative approaches which capture relation information, and that the proposed framework improves upon the state-of-the-art performance on JHMDB and AVA. A visualization of the learned relation features confirms that our approach is able to attend to the relevant relations for each action.
+
+
+
+
+

+
+
+## Results and Models
+
+### AVA2.1
+
+| Model | Modality | Pretrained | Backbone | Input | gpus | mAP | log | json | ckpt |
+| :---------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :----------: | :------: | :---: | :--: | :--: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb](/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 27.1 | [log](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb-49b07bf2.pth) |
+
+### AVA2.2
+
+| Model | Modality | Pretrained | Backbone | Input | gpus | mAP | log | json | ckpt |
+| :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :----------: | :------: | :---: | :--: | :--: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 27.8 | [log](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-2be32625.pth) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+
+:::
+
+For more details on data preparation, you can refer to AVA in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train ACRN with SlowFast backbone on AVA with periodic validation.
+
+```shell
+python tools/train.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py --validate
+```
+
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test ACRN with SlowFast backbone on AVA and dump the result to a csv file.
+
+```shell
+python tools/test.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
+```
+
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) .
+
+## Citation
+
+
+
+```BibTeX
+@inproceedings{gu2018ava,
+ title={Ava: A video dataset of spatio-temporally localized atomic visual actions},
+ author={Gu, Chunhui and Sun, Chen and Ross, David A and Vondrick, Carl and Pantofaru, Caroline and Li, Yeqing and Vijayanarasimhan, Sudheendra and Toderici, George and Ricco, Susanna and Sukthankar, Rahul and others},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={6047--6056},
+ year={2018}
+}
+```
+
+```BibTeX
+@inproceedings{sun2018actor,
+ title={Actor-centric relation network},
+ author={Sun, Chen and Shrivastava, Abhinav and Vondrick, Carl and Murphy, Kevin and Sukthankar, Rahul and Schmid, Cordelia},
+ booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
+ pages={318--334},
+ year={2018}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README_zh-CN.md
new file mode 100644
index 00000000..23ceb9fc
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/README_zh-CN.md
@@ -0,0 +1,81 @@
+# ACRN
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{gu2018ava,
+ title={Ava: A video dataset of spatio-temporally localized atomic visual actions},
+ author={Gu, Chunhui and Sun, Chen and Ross, David A and Vondrick, Carl and Pantofaru, Caroline and Li, Yeqing and Vijayanarasimhan, Sudheendra and Toderici, George and Ricco, Susanna and Sukthankar, Rahul and others},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={6047--6056},
+ year={2018}
+}
+```
+
+
+
+```BibTeX
+@inproceedings{sun2018actor,
+ title={Actor-centric relation network},
+ author={Sun, Chen and Shrivastava, Abhinav and Vondrick, Carl and Murphy, Kevin and Sukthankar, Rahul and Schmid, Cordelia},
+ booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
+ pages={318--334},
+ year={2018}
+}
+```
+
+## 模型库
+
+### AVA2.1
+
+| 配置文件 | 模态 | 预训练 | 主干网络 | 输入 | GPU 数量 | mAP | log | json | ckpt |
+| :---------------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :----------: | :------: | :--: | :------: | :--: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb](/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 27.1 | [log](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb-49b07bf2.pth) |
+
+### AVA2.2
+
+| 配置文件 | 模态 | 预训练 | 主干网络 | 输入 | GPU 数量 | mAP | log | json | ckpt |
+| :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :----------: | :------: | :--: | :------: | :--: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 27.8 | [log](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-2be32625.pth) |
+
+- 注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+
+对于数据集准备的细节,用户可参考 [数据准备](/docs_zh_CN/data_preparation.md)。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:在 AVA 数据集上训练 ACRN 辅以 SlowFast 主干网络,并定期验证。
+
+```shell
+python tools/train.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py --validate
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 AVA 上测试 ACRN 辅以 SlowFast 主干网络,并将结果存为 csv 文件。
+
+```shell
+python tools/test.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/metafile.yml
new file mode 100644
index 00000000..50cacc7f
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/metafile.yml
@@ -0,0 +1,49 @@
+Collections:
+- Name: ACRN
+ README: configs/detection/acrn/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1807.10982
+ Title: Actor-Centric Relation Network
+Models:
+- Config: configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py
+ In Collection: ACRN
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 6
+ Epochs: 10
+ Input: 32x2
+ Modality: RGB
+ Parameters: 92232057
+ Pretrained: Kinetics-400
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Name: slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 27.1
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.log
+ Weights: https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb-49b07bf2.pth
+- Config: configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
+ In Collection: ACRN
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 6
+ Epochs: 10
+ Input: 32x2
+ Modality: RGB
+ Parameters: 92232057
+ Pretrained: Kinetics-400
+ Training Data: AVA v2.2
+ Training Resources: 8 GPUs
+ Name: slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb
+ Results:
+ - Dataset: AVA v2.2
+ Metrics:
+ mAP: 27.8
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log
+ Weights: https://download.openmmlab.com/mmaction/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-2be32625.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
new file mode 100644
index 00000000..d42ef11e
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
@@ -0,0 +1,170 @@
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=4,
+ speed_ratio=4,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ fusion_kernel=7,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True,
+ temporal_pool_mode='max'),
+ shared_head=dict(type='ACRNHead', in_channels=4608, out_channels=2304),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ dropout_ratio=0.5,
+ in_channels=2304,
+ num_classes=81,
+ multilabel=True)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.2.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.2.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+# optimizer
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=2,
+ warmup_ratio=0.1)
+total_epochs = 10
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1)
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb' # noqa: E501
+load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' # noqa: E501
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py
new file mode 100644
index 00000000..4d069cbb
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava_rgb.py
@@ -0,0 +1,170 @@
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=4,
+ speed_ratio=4,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ fusion_kernel=7,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True,
+ temporal_pool_mode='max'),
+ shared_head=dict(type='ACRNHead', in_channels=4608, out_channels=2304),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ dropout_ratio=0.5,
+ in_channels=2304,
+ num_classes=81,
+ multilabel=True)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+# optimizer
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=2,
+ warmup_ratio=0.1)
+total_epochs = 10
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1)
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb' # noqa: E501
+load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' # noqa: E501
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README.md b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README.md
new file mode 100644
index 00000000..f46a3961
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README.md
@@ -0,0 +1,146 @@
+# AVA
+
+[Ava: A video dataset of spatio-temporally localized atomic visual actions](https://openaccess.thecvf.com/content_cvpr_2018/html/Gu_AVA_A_Video_CVPR_2018_paper.html)
+
+
+
+
+

+
+
+## Abstract
+
+
+
+This paper introduces a video dataset of spatio-temporally localized Atomic Visual Actions (AVA). The AVA dataset densely annotates 80 atomic visual actions in 430 15-minute video clips, where actions are localized in space and time, resulting in 1.58M action labels with multiple labels per person occurring frequently. The key characteristics of our dataset are: (1) the definition of atomic visual actions, rather than composite actions; (2) precise spatio-temporal annotations with possibly multiple annotations for each person; (3) exhaustive annotation of these atomic actions over 15-minute video clips; (4) people temporally linked across consecutive segments; and (5) using movies to gather a varied set of action representations. This departs from existing datasets for spatio-temporal action recognition, which typically provide sparse annotations for composite actions in short video clips. We will release the dataset publicly.
+AVA, with its realistic scene and action complexity, exposes the intrinsic difficulty of action recognition. To benchmark this, we present a novel approach for action localization that builds upon the current state-of-the-art methods, and demonstrates better performance on JHMDB and UCF101-24 categories. While setting a new state of the art on existing datasets, the overall results on AVA are low at 15.6% mAP, underscoring the need for developing new approaches for video understanding.
+
+
+
+
+

+
+
+
+
+```BibTeX
+@inproceedings{feichtenhofer2019slowfast,
+ title={Slowfast networks for video recognition},
+ author={Feichtenhofer, Christoph and Fan, Haoqi and Malik, Jitendra and He, Kaiming},
+ booktitle={Proceedings of the IEEE international conference on computer vision},
+ pages={6202--6211},
+ year={2019}
+}
+```
+
+## Results and Models
+
+### AVA2.1
+
+| Model | Modality | Pretrained | Backbone | Input | gpus | Resolution | mAP | log | json | ckpt |
+| :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :----------: | :-------: | :---: | :--: | :------------: | :---: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 4x16 | 8 | short-side 256 | 20.1 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth) |
+| [slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | OmniSource | ResNet50 | 4x16 | 8 | short-side 256 | 21.8 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201217-0c6d2e98.pth) |
+| [slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb](/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 4x16 | 8 | short-side 256 | 21.75 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/20210316_122517.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/20210316_122517.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb_20210316-959829ec.pth) |
+| [slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb](/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 8x8 | 8x2 | short-side 256 | 23.79 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/20210316_122517.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/20210316_122517.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb_20210316-5742e4dd.pth) |
+| [slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet101 | 8x8 | 8x2 | short-side 256 | 24.6 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201217-1c9b4117.pth) |
+| [slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb](/configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py) | RGB | OmniSource | ResNet101 | 8x8 | 8x2 | short-side 256 | 25.9 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth) |
+| [slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8x2 | short-side 256 | 24.4 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-6e7c704d.pth) |
+| [slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8x2 | short-side 256 | 25.4 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222-f4d209c9.pth) |
+| [slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8x2 | short-side 256 | 25.5 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217-ae225e97.pth) |
+
+### AVA2.2
+
+| Model | Modality | Pretrained | Backbone | Input | gpus | mAP | log | json | ckpt |
+| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :----------: | :------: | :---: | :--: | :--: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 26.1 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-b987b516.pth) |
+| [slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 26.4 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-874e0845.pth) |
+| [slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 26.8 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-345618cd.pth) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. **Context** indicates that using both RoI feature and global pooled feature for classification, which leads to around 1% mAP improvement in general.
+
+:::
+
+For more details on data preparation, you can refer to AVA in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train SlowOnly model on AVA with periodic validation.
+
+```shell
+python tools/train.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py --validate
+```
+
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting) .
+
+### Train Custom Classes From Ava Dataset
+
+You can train custom classes from ava. Ava suffers from class imbalance. There are more then 100,000 samples for classes like `stand`/`listen to (a person)`/`talk to (e.g., self, a person, a group)`/`watch (a person)`, whereas half of all classes has less than 500 samples. In most cases, training custom classes with fewer samples only will lead to better results.
+
+Three steps to train custom classes:
+
+- Step 1: Select custom classes from original classes, named `custom_classes`. Class `0` should not be selected since it is reserved for further usage (to identify whether a proposal is positive or negative, not implemented yet) and will be added automatically.
+- Step 2: Set `num_classes`. In order to be compatible with current codes, Please make sure `num_classes == len(custom_classes) + 1`.
+ - The new class `0` corresponds to original class `0`. The new class `i`(i > 0) corresponds to original class `custom_classes[i-1]`.
+ - There are three `num_classes` in ava config, `model -> roi_head -> bbox_head -> num_classes`, `data -> train -> num_classes` and `data -> val -> num_classes`.
+ - If `num_classes <= 5`, input arg `topk` of `BBoxHeadAVA` should be modified. The default value of `topk` is `(3, 5)`, and all elements of `topk` must be smaller than `num_classes`.
+- Step 3: Make sure all custom classes are in `label_file`. It is worth mentioning that there are two label files, `ava_action_list_v2.1_for_activitynet_2018.pbtxt`(contains 60 classes, 20 classes are missing) and `ava_action_list_v2.1.pbtxt`(contains all 80 classes).
+
+Take `slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb` as an example, training custom classes with AP in range `(0.1, 0.3)`, aka `[3, 6, 10, 27, 29, 38, 41, 48, 51, 53, 54, 59, 61, 64, 70, 72]`. Please note that, the previously mentioned AP is calculated by original ckpt, which is trained by all 80 classes. The results are listed as follows.
+
+| training classes | mAP(custom classes) | config | log | json | ckpt |
+| :--------------: | :-----------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| All 80 classes | 0.1948 | [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth) |
+| custom classes | 0.3311 | [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes-4ab80419.pth) |
+| All 80 classes | 0.1864 | [slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py](/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-6e7c704d.pth) |
+| custom classes | 0.3785 | [slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes](/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes_20210305.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes_20210305.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes_20210305-c6225546.pth) |
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test SlowOnly model on AVA and dump the result to a csv file.
+
+```shell
+python tools/test.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
+```
+
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) .
+
+## Citation
+
+
+
+```BibTeX
+@inproceedings{gu2018ava,
+ title={Ava: A video dataset of spatio-temporally localized atomic visual actions},
+ author={Gu, Chunhui and Sun, Chen and Ross, David A and Vondrick, Carl and Pantofaru, Caroline and Li, Yeqing and Vijayanarasimhan, Sudheendra and Toderici, George and Ricco, Susanna and Sukthankar, Rahul and others},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={6047--6056},
+ year={2018}
+}
+```
+
+```BibTeX
+@article{duan2020omni,
+ title={Omni-sourced Webly-supervised Learning for Video Recognition},
+ author={Duan, Haodong and Zhao, Yue and Xiong, Yuanjun and Liu, Wentao and Lin, Dahua},
+ journal={arXiv preprint arXiv:2003.13042},
+ year={2020}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README_zh-CN.md
new file mode 100644
index 00000000..1b4b2b08
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/README_zh-CN.md
@@ -0,0 +1,129 @@
+# AVA
+
+
+

+
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{gu2018ava,
+ title={Ava: A video dataset of spatio-temporally localized atomic visual actions},
+ author={Gu, Chunhui and Sun, Chen and Ross, David A and Vondrick, Carl and Pantofaru, Caroline and Li, Yeqing and Vijayanarasimhan, Sudheendra and Toderici, George and Ricco, Susanna and Sukthankar, Rahul and others},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={6047--6056},
+ year={2018}
+}
+```
+
+
+
+```BibTeX
+@article{duan2020omni,
+ title={Omni-sourced Webly-supervised Learning for Video Recognition},
+ author={Duan, Haodong and Zhao, Yue and Xiong, Yuanjun and Liu, Wentao and Lin, Dahua},
+ journal={arXiv preprint arXiv:2003.13042},
+ year={2020}
+}
+```
+
+
+
+```BibTeX
+@inproceedings{feichtenhofer2019slowfast,
+ title={Slowfast networks for video recognition},
+ author={Feichtenhofer, Christoph and Fan, Haoqi and Malik, Jitendra and He, Kaiming},
+ booktitle={Proceedings of the IEEE international conference on computer vision},
+ pages={6202--6211},
+ year={2019}
+}
+```
+
+## 模型库
+
+### AVA2.1
+
+| 配置文件 | 模态 | 预训练 | 主干网络 | 输入 | GPU 数量 | 分辨率 | mAP | log | json | ckpt |
+| :--------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :----------: | :-------: | :--: | :------: | :------: | :---: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 4x16 | 8 | 短边 256 | 20.1 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth) |
+| [slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | OmniSource | ResNet50 | 4x16 | 8 | 短边 256 | 21.8 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201217-0c6d2e98.pth) |
+| [slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb](/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 4x16 | 8 | 短边 256 | 21.75 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/20210316_122517.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/20210316_122517.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb_20210316-959829ec.pth) |
+| [slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb](/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 8x8 | 8x2 | 短边 256 | 23.79 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/20210316_122517.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/20210316_122517.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb_20210316-5742e4dd.pth) |
+| [slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet101 | 8x8 | 8x2 | 短边 256 | 24.6 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201217-1c9b4117.pth) |
+| [slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb](/configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py) | RGB | OmniSource | ResNet101 | 8x8 | 8x2 | 短边 256 | 25.9 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth) |
+| [slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8x2 | 短边 256 | 24.4 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-6e7c704d.pth) |
+| [slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8x2 | 短边 256 | 25.4 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222-f4d209c9.pth) |
+| [slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8x2 | 短边 256 | 25.5 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217-ae225e97.pth) |
+
+### AVA2.2
+
+| 配置文件 | 模态 | 预训练 | 主干网络 | 输入 | GPU 数量 | mAP | log | json | ckpt |
+| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :----------: | :------: | :--: | :------: | :--: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 26.1 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-b987b516.pth) |
+| [slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 26.4 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-874e0845.pth) |
+| [slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb](/configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py) | RGB | Kinetics-400 | ResNet50 | 32x2 | 8 | 26.8 | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-345618cd.pth) |
+
+注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. **Context** 表示同时使用 RoI 特征与全局特征进行分类,可带来约 1% mAP 的提升。
+
+对于数据集准备的细节,用户可参考 [数据准备](/docs_zh_CN/data_preparation.md)。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:在 AVA 数据集上训练 SlowOnly,并定期验证。
+
+```shell
+python tools/train.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py --validate
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+### 训练 AVA 数据集中的自定义类别
+
+用户可以训练 AVA 数据集中的自定义类别。AVA 中不同类别的样本量很不平衡:其中有超过 100000 样本的类别: `stand`/`listen to (a person)`/`talk to (e.g., self, a person, a group)`/`watch (a person)`,也有样本较少的类别(半数类别不足 500 样本)。大多数情况下,仅使用样本较少的类别进行训练将在这些类别上得到更好精度。
+
+训练 AVA 数据集中的自定义类别包含 3 个步骤:
+
+1. 从原先的类别中选择希望训练的类别,将其填写至配置文件的 `custom_classes` 域中。其中 `0` 不表示具体的动作类别,不应被选择。
+2. 将 `num_classes` 设置为 `num_classes = len(custom_classes) + 1`。
+ - 在新的类别到编号的对应中,编号 `0` 仍对应原类别 `0`,编号 `i` (i > 0) 对应原类别 `custom_classes[i-1]`。
+ - 配置文件中 3 处涉及 `num_classes` 需要修改:`model -> roi_head -> bbox_head -> num_classes`, `data -> train -> num_classes`, `data -> val -> num_classes`.
+ - 若 `num_classes <= 5`, 配置文件 `BBoxHeadAVA` 中的 `topk` 参数应被修改。`topk` 的默认值为 `(3, 5)`,`topk` 中的所有元素应小于 `num_classes`。
+3. 确认所有自定义类别在 `label_file` 中。
+
+以 `slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb` 为例,这一配置文件训练所有 AP 在 `(0.1, 0.3)` 间的类别(这里的 AP 为 AVA 80 类训出模型的表现),即 `[3, 6, 10, 27, 29, 38, 41, 48, 51, 53, 54, 59, 61, 64, 70, 72]`。下表列出了自定义类别训练的模型精度:
+
+| 训练类别 | mAP (自定义类别) | 配置文件 | log | json | ckpt |
+| :--------: | :----------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| 全部 80 类 | 0.1948 | [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth) |
+| 自定义类别 | 0.3311 | [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes-4ab80419.pth) |
+| 全部 80 类 | 0.1864 | [slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-6e7c704d.pth) |
+| 自定义类别 | 0.3785 | [slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes](/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py) | [log](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes_20210305.log) | [json](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes_20210305.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes_20210305-c6225546.pth) |
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 AVA 上测试 SlowOnly 模型,并将结果存为 csv 文件。
+
+```shell
+python tools/test.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/metafile.yml
new file mode 100644
index 00000000..971abd7b
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/metafile.yml
@@ -0,0 +1,259 @@
+Collections:
+- Name: AVA
+ README: configs/detection/ava/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1705.08421
+ Title: "AVA: A Video Dataset of Spatio-temporally Localized Atomic Visual Actions"
+Models:
+- Config: configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 16
+ Epochs: 20
+ Input: 4x16
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 20.1
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth
+- Config: configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 16
+ Epochs: 20
+ Input: 4x16
+ Pretrained: OmniSource
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 21.8
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201127.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201127.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb_20201217-0c6d2e98.pth
+- Config: configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 10
+ Input: 4x16
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 21.75
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/20210316_122517.log.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/20210316_122517.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb_20210316-959829ec.pth
+- Config: configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 6
+ Epochs: 10
+ Input: 8x8
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 23.79
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/20210316_122517.log.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/20210316_122517.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb_20210316-5742e4dd.pth
+- Config: configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet101
+ Batch Size: 6
+ Epochs: 20
+ Input: 8x8
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 24.6
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201127.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201127.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb_20201217-1c9b4117.pth
+- Config: configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet101
+ Batch Size: 6
+ Epochs: 20
+ Input: 8x8
+ Pretrained: OmniSource
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 25.9
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201127.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201127.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth
+- Config: configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 9
+ Epochs: 20
+ Input: 32x2
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 24.4
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-6e7c704d.pth
+- Config: configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 9
+ Epochs: 20
+ Input: 32x2
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 25.4
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201222-f4d209c9.pth
+- Config: configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 5
+ Epochs: 20
+ Input: 32x2
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 25.5
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb_20201217-ae225e97.pth
+- Config: configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 6
+ Epochs: 10
+ Input: 32x2
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.2
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb
+ Results:
+ - Dataset: AVA v2.2
+ Metrics:
+ mAP: 26.1
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-b987b516.pth
+- Config: configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 6
+ Epochs: 10
+ Input: 32x2
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.2
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb
+ Results:
+ - Dataset: AVA v2.2
+ Metrics:
+ mAP: 26.8
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-345618cd.pth
+- Config: configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
+ In Collection: AVA
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 6
+ Epochs: 10
+ Input: 32x2
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.2
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb
+ Results:
+ - Dataset: AVA v2.2
+ Metrics:
+ mAP: 26.4
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.log
+ Weights: https://download.openmmlab.com/mmaction/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb-874e0845.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..a180bb91
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,175 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=8,
+ speed_ratio=8,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True,
+ with_global=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=4608,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=9,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.1125, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowfast_context_kinetics_pretrained_r50_4x16x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowfast/'
+ 'slowfast_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowfast_r50_4x16x1_256e_kinetics400_rgb_20200704-bcde7ed7.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..f649374a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,174 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=8,
+ speed_ratio=8,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2304,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=9,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.1125, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowfast/'
+ 'slowfast_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowfast_r50_4x16x1_256e_kinetics400_rgb_20200704-bcde7ed7.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py
new file mode 100644
index 00000000..413065cb
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py
@@ -0,0 +1,184 @@
+# custom classes of ava dataset
+# Here we choose classes with AP in range [0.1, 0.3)
+# AP is calculated by **slowonly** ckpt, which is trained by all 80 classes
+custom_classes = [3, 6, 10, 27, 29, 38, 41, 48, 51, 53, 54, 59, 61, 64, 70, 72]
+num_classes = len(custom_classes) + 1
+
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=8,
+ speed_ratio=8,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2304,
+ num_classes=num_classes,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=9,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ num_classes=num_classes,
+ custom_classes=custom_classes,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ num_classes=num_classes,
+ custom_classes=custom_classes,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.1125, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.05)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowfast/'
+ 'slowfast_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowfast_r50_4x16x1_256e_kinetics400_rgb_20200704-bcde7ed7.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py
new file mode 100644
index 00000000..7c3826d8
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py
@@ -0,0 +1,175 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=4,
+ speed_ratio=4,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ fusion_kernel=7,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2304,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=5,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowfast/'
+ 'slowfast_r50_8x8x1_256e_kinetics400_rgb/'
+ 'slowfast_r50_8x8x1_256e_kinetics400_rgb_20200704-73547d2b.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
new file mode 100644
index 00000000..9fa024f2
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
@@ -0,0 +1,168 @@
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=4,
+ speed_ratio=4,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ fusion_kernel=7,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ dropout_ratio=0.5,
+ in_channels=2304,
+ num_classes=81,
+ multilabel=True)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.2.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.2.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+# optimizer
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=2,
+ warmup_ratio=0.1)
+total_epochs = 10
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1)
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/slowfast_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb' # noqa: E501
+load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' # noqa: E501
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
new file mode 100644
index 00000000..71af48e1
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
@@ -0,0 +1,171 @@
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=4,
+ speed_ratio=4,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ fusion_kernel=7,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True,
+ temporal_pool_mode='max'),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ dropout_ratio=0.5,
+ in_channels=2304,
+ focal_alpha=3.0,
+ focal_gamma=1.0,
+ num_classes=81,
+ multilabel=True)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.2.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.2.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+# optimizer
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=2,
+ warmup_ratio=0.1)
+total_epochs = 10
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1)
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/slowfast_temporal_max_focal_alpha3_gamma1_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb' # noqa: E501
+load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' # noqa: E501
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
new file mode 100644
index 00000000..a4979d9b
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py
@@ -0,0 +1,169 @@
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowFast',
+ pretrained=None,
+ resample_rate=4,
+ speed_ratio=4,
+ channel_ratio=8,
+ slow_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=True,
+ fusion_kernel=7,
+ conv1_kernel=(1, 7, 7),
+ dilations=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ inflate=(0, 0, 1, 1),
+ spatial_strides=(1, 2, 2, 1)),
+ fast_pathway=dict(
+ type='resnet3d',
+ depth=50,
+ pretrained=None,
+ lateral=False,
+ base_channels=8,
+ conv1_kernel=(5, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1))),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True,
+ temporal_pool_mode='max'),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ dropout_ratio=0.5,
+ in_channels=2304,
+ num_classes=81,
+ multilabel=True)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.2.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.2.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+# optimizer
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=2,
+ warmup_ratio=0.1)
+total_epochs = 10
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1)
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/slowfast_temporal_max_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb' # noqa: E501
+load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' # noqa: E501
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py
new file mode 100644
index 00000000..ecc89f7a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb.py
@@ -0,0 +1,158 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=101,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1)),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=8, frame_interval=8),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=8, frame_interval=8, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ # During testing, each video may have different shape
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_kinetics_pretrained_r101_8x8x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'omni/slowonly_r101_without_omni_8x8x1_'
+ 'kinetics400_rgb_20200926-0c730aef.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..54df99e5
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,158 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1)),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=16,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py
new file mode 100644
index 00000000..30d9ba82
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom_classes.py
@@ -0,0 +1,169 @@
+# custom classes of ava dataset
+# Here we choose classes with AP in range [0.1, 0.3)
+# AP is calculated by original ckpt, which is trained by all 80 classes
+custom_classes = [3, 6, 10, 27, 29, 38, 41, 48, 51, 53, 54, 59, 61, 64, 70, 72]
+num_classes = len(custom_classes) + 1
+
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1)),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=num_classes,
+ multilabel=True,
+ topk=(3, 5),
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=16,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ num_classes=num_classes,
+ custom_classes=custom_classes,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ num_classes=num_classes,
+ custom_classes=custom_classes,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_custom')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py
new file mode 100644
index 00000000..e0a05510
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb.py
@@ -0,0 +1,120 @@
+_base_ = ['../_base_/models/slowonly_r50_nl.py']
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=12,
+ workers_per_gpu=2,
+ # During testing, each video may have different shape
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(
+ type='SGD', lr=0.3, momentum=0.9, weight_decay=1e-06, nesterov=True)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[4, 6, 8],
+ warmup='linear',
+ warmup_iters=800,
+ warmup_ratio=0.01)
+total_epochs = 10
+
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_nl_kinetics_pretrained_r50_4x16x1_10e_ava_rgb')
+load_from = (
+ 'https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_nl_embedded_gaussian_r50_4x16x1_150e_kinetics400_rgb/'
+ 'slowonly_nl_embedded_gaussian_r50_4x16x1_150e_kinetics400_rgb_20210308-0d6e5a69.pth' # noqa: E501
+)
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py
new file mode 100644
index 00000000..105b8320
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb.py
@@ -0,0 +1,119 @@
+_base_ = ['../_base_/models/slowonly_r50_nl.py']
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=8, frame_interval=8),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=8, frame_interval=8, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ # During testing, each video may have different shape
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(
+ type='SGD', lr=0.15, momentum=0.9, weight_decay=1e-06, nesterov=True)
+# this lr is used for 8x2 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[4, 6, 8],
+ warmup='linear',
+ warmup_iters=1600,
+ warmup_ratio=0.01)
+total_epochs = 10
+
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_nl_kinetics_pretrained_r50_8x8x1_10e_ava_rgb')
+load_from = (
+ 'https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_nl_embedded_gaussian_r50_8x8x1_150e_kinetics400_rgb/'
+ 'slowonly_nl_embedded_gaussian_r50_8x8x1_150e_kinetics400_rgb_20210308-e8dd9e82.pth' # noqa: E501
+)
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py
new file mode 100644
index 00000000..23f3aaf5
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py
@@ -0,0 +1,158 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=101,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1)),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=8, frame_interval=8),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=8, frame_interval=8, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+data = dict(
+ videos_per_gpu=6,
+ workers_per_gpu=2,
+ # During testing, each video may have different shape
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'omni/'
+ 'slowonly_r101_omni_8x8x1_kinetics400_rgb_20200926-b5dbb701.pth')
+
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..067e1745
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/ava/slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,159 @@
+# model setting
+model = dict(
+ type='FastRCNN',
+ backbone=dict(
+ type='ResNet3dSlowOnly',
+ depth=50,
+ pretrained=None,
+ pretrained2d=False,
+ lateral=False,
+ num_stages=4,
+ conv1_kernel=(1, 7, 7),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ spatial_strides=(1, 2, 2, 1)),
+ roi_head=dict(
+ type='AVARoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor3D',
+ roi_layer_type='RoIAlign',
+ output_size=8,
+ with_temporal_pool=True),
+ bbox_head=dict(
+ type='BBoxHeadAVA',
+ in_channels=2048,
+ num_classes=81,
+ multilabel=True,
+ dropout_ratio=0.5)),
+ train_cfg=dict(
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssignerAVA',
+ pos_iou_thr=0.9,
+ neg_iou_thr=0.9,
+ min_pos_iou=0.9),
+ sampler=dict(
+ type='RandomSampler',
+ num=32,
+ pos_fraction=1,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=1.0,
+ debug=False)),
+ test_cfg=dict(rcnn=dict(action_thr=0.002)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=16,
+ workers_per_gpu=2,
+ # During testing, each video may have different shape
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+
+optimizer = dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.00001)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = ('./work_dirs/ava/'
+ 'slowonly_omnisource_pretrained_r50_4x16x1_20e_ava_rgb')
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'omni/'
+ 'slowonly_r50_omni_4x16x1_kinetics400_rgb_20200926-51b1f7ea.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README.md b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README.md
new file mode 100644
index 00000000..0658acc9
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README.md
@@ -0,0 +1,132 @@
+# LFB
+
+[Long-term feature banks for detailed video understanding](https://openaccess.thecvf.com/content_CVPR_2019/html/Wu_Long-Term_Feature_Banks_for_Detailed_Video_Understanding_CVPR_2019_paper.html)
+
+
+
+## Abstract
+
+
+
+To understand the world, we humans constantly need to relate the present to the past, and put events in context. In this paper, we enable existing video models to do the same. We propose a long-term feature bank---supportive information extracted over the entire span of a video---to augment state-of-the-art video models that otherwise would only view short clips of 2-5 seconds. Our experiments demonstrate that augmenting 3D convolutional networks with a long-term feature bank yields state-of-the-art results on three challenging video datasets: AVA, EPIC-Kitchens, and Charades.
+
+
+
+
+

+
+
+## Results and Models
+
+### AVA2.1
+
+| Model | Modality | Pretrained | Backbone | Input | gpus | Resolution | mAP | log | json | ckpt |
+| :-----------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :----------: | :--------------------------------------------------------------------------------------------------: | :---: | :--: | :------------: | :---: | :------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py](/configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | [slowonly_r50_4x16x1](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | 4x16 | 8 | short-side 256 | 24.11 | [log](https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210224_125052.log) | [json](https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210224_125052.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210224-2ae136d9.pth) |
+| [lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py](/configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | [slowonly_r50_4x16x1](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | 4x16 | 8 | short-side 256 | 20.17 | [log](https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log) | [json](https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210301-19c330b7.pth) |
+| [lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py](/configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | [slowonly_r50_4x16x1](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | 4x16 | 8 | short-side 256 | 22.15 | [log](https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log) | [json](https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210301-37efcd15.pth) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. We use `slowonly_r50_4x16x1` instead of `I3D-R50-NL` in the original paper as the backbone of LFB, but we have achieved the similar improvement: (ours: 20.1 -> 24.11 vs. author: 22.1 -> 25.8).
+3. Because the long-term features are randomly sampled in testing, the test accuracy may have some differences.
+4. Before train or test lfb, you need to infer feature bank with the [lfb_slowonly_r50_ava_infer.py](/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py). For more details on infer feature bank, you can refer to [Train](#Train) part.
+5. You can also dowonload long-term feature bank from [AVA_train_val_float32_lfb](https://download.openmmlab.com/mmaction/detection/lfb/AVA_train_val_float32_lfb.rar) or [AVA_train_val_float16_lfb](https://download.openmmlab.com/mmaction/detection/lfb/AVA_train_val_float16_lfb.rar), and then put them on `lfb_prefix_path`.
+6. The ROIHead now supports single-label classification (i.e. the network outputs at most
+ one-label per actor). This can be done by (a) setting multilabel=False during training and
+ the test_cfg.rcnn.action_thr for testing.
+
+:::
+
+## Train
+
+### a. Infer long-term feature bank for training
+
+Before train or test lfb, you need to infer long-term feature bank first.
+
+Specifically, run the test on the training, validation, testing dataset with the config file [lfb_slowonly_r50_ava_infer](/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py) (The config file will only infer the feature bank of training dataset and you need set `dataset_mode = 'val'` to infer the feature bank of validation dataset in the config file.), and the shared head [LFBInferHead](/mmaction/models/heads/lfb_infer_head.py) will generate the feature bank.
+
+A long-term feature bank file of AVA training and validation datasets with float32 precision occupies 3.3 GB. If store the features with float16 precision, the feature bank occupies 1.65 GB.
+
+You can use the following command to infer feature bank of AVA training and validation dataset and the feature bank will be stored in `lfb_prefix_path/lfb_train.pkl` and `lfb_prefix_path/lfb_val.pkl`.
+
+```shell
+# set `dataset_mode = 'train'` in lfb_slowonly_r50_ava_infer.py
+python tools/test.py configs/detection/lfb/lfb_slowonly_r50_ava_infer.py \
+ checkpoints/YOUR_BASELINE_CHECKPOINT.pth --eval mAP
+
+# set `dataset_mode = 'val'` in lfb_slowonly_r50_ava_infer.py
+python tools/test.py configs/detection/lfb/lfb_slowonly_r50_ava_infer.py \
+ checkpoints/YOUR_BASELINE_CHECKPOINT.pth --eval mAP
+```
+
+We use [slowonly_r50_4x16x1 checkpoint](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth) from [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) to infer feature bank.
+
+### b. Train LFB
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train LFB model on AVA with half-precision long-term feature bank.
+
+```shell
+python tools/train.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py \
+ --validate --seed 0 --deterministic
+```
+
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Test
+
+### a. Infer long-term feature bank for testing
+
+Before train or test lfb, you also need to infer long-term feature bank first. If you have generated the feature bank file, you can skip it.
+
+The step is the same with **Infer long-term feature bank for training** part in [Train](#Train).
+
+### b. Test LFB
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test LFB model on AVA with half-precision long-term feature bank and dump the result to a csv file.
+
+```shell
+python tools/test.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
+```
+
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
+
+## Citation
+
+
+
+```BibTeX
+@inproceedings{gu2018ava,
+ title={Ava: A video dataset of spatio-temporally localized atomic visual actions},
+ author={Gu, Chunhui and Sun, Chen and Ross, David A and Vondrick, Carl and Pantofaru, Caroline and Li, Yeqing and Vijayanarasimhan, Sudheendra and Toderici, George and Ricco, Susanna and Sukthankar, Rahul and others},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={6047--6056},
+ year={2018}
+}
+```
+
+```BibTeX
+@inproceedings{wu2019long,
+ title={Long-term feature banks for detailed video understanding},
+ author={Wu, Chao-Yuan and Feichtenhofer, Christoph and Fan, Haoqi and He, Kaiming and Krahenbuhl, Philipp and Girshick, Ross},
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ pages={284--293},
+ year={2019}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README_zh-CN.md
new file mode 100644
index 00000000..2f42c393
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/README_zh-CN.md
@@ -0,0 +1,103 @@
+# LFB
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{wu2019long,
+ title={Long-term feature banks for detailed video understanding},
+ author={Wu, Chao-Yuan and Feichtenhofer, Christoph and Fan, Haoqi and He, Kaiming and Krahenbuhl, Philipp and Girshick, Ross},
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ pages={284--293},
+ year={2019}
+}
+```
+
+## 模型库
+
+### AVA2.1
+
+| 配置文件 | 模态 | 预训练 | 主干网络 | 输入 | GPU 数量 | 分辨率 | 平均精度 | log | json | ckpt |
+| :-----------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :----------: | :--------------------------------------------------------------------------------------------------: | :--: | :------: | :------: | :------: | :------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py](/configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | [slowonly_r50_4x16x1](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | 4x16 | 8 | 短边 256 | 24.11 | [log](https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210224_125052.log) | [json](https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210224_125052.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210224-2ae136d9.pth) |
+| [lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py](/configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | [slowonly_r50_4x16x1](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | 4x16 | 8 | 短边 256 | 20.17 | [log](https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log) | [json](https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210301-19c330b7.pth) |
+| [lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py](/configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py) | RGB | Kinetics-400 | [slowonly_r50_4x16x1](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) | 4x16 | 8 | 短边 256 | 22.15 | [log](https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log) | [json](https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log.json) | [ckpt](https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210301-37efcd15.pth) |
+
+- 注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. 本 LFB 模型暂没有使用原论文中的 `I3D-R50-NL` 作为主干网络,而是用 `slowonly_r50_4x16x1` 替代,但取得了同样的提升效果:(本模型:20.1 -> 24.11 而原论文模型:22.1 -> 25.8)。
+3. 因为测试时,长时特征是被随机采样的,所以测试精度可能有一些偏差。
+4. 在训练或测试 LFB 之前,用户需要使用配置文件特征库 [lfb_slowonly_r50_ava_infer.py](/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py) 来推导长时特征库。有关推导长时特征库的更多细节,请参照[训练部分](#%E8%AE%AD%E7%BB%83)。
+5. 用户也可以直接从 [AVA_train_val_float32_lfb](https://download.openmmlab.com/mmaction/detection/lfb/AVA_train_val_float32_lfb.rar) 或者 [AVA_train_val_float16_lfb](https://download.openmmlab.com/mmaction/detection/lfb/AVA_train_val_float16_lfb.rar) 下载 float32 或 float16 的长时特征库,并把它们放在 `lfb_prefix_path` 上。
+
+## 训练
+
+### a. 为训练 LFB 推导长时特征库
+
+在训练或测试 LFB 之前,用户首先需要推导长时特征库。
+
+具体来说,使用配置文件 [lfb_slowonly_r50_ava_infer](/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py),在训练集、验证集、测试集上都运行一次模型测试。
+
+配置文件的默认设置是推导训练集的长时特征库,用户需要将 `dataset_mode` 设置成 `'val'` 来推导验证集的长时特征库,在推导过程中。共享头 [LFBInferHead](/mmaction/models/heads/lfb_infer_head.py) 会生成长时特征库。
+
+AVA 训练集和验证集的 float32 精度的长时特征库文件大约占 3.3 GB。如果以半精度来存储长时特征,文件大约占 1.65 GB。
+
+用户可以使用以下命令来推导 AVA 训练集和验证集的长时特征库,而特征库会被存储为 `lfb_prefix_path/lfb_train.pkl` 和 `lfb_prefix_path/lfb_val.pkl`。
+
+```shell
+# 在 lfb_slowonly_r50_ava_infer.py 中 设置 `dataset_mode = 'train'`
+python tools/test.py configs/detection/lfb/lfb_slowonly_r50_ava_infer.py \
+ checkpoints/YOUR_BASELINE_CHECKPOINT.pth --eval mAP
+
+# 在 lfb_slowonly_r50_ava_infer.py 中 设置 `dataset_mode = 'val'`
+python tools/test.py configs/detection/lfb/lfb_slowonly_r50_ava_infer.py \
+ checkpoints/YOUR_BASELINE_CHECKPOINT.pth --eval mAP
+```
+
+MMAction2 使用来自配置文件 [slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb](/configs/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py) 的模型权重文件 [slowonly_r50_4x16x1 checkpoint](https://download.openmmlab.com/mmaction/detection/ava/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb_20201217-40061d5f.pth)作为推导长时特征库的 LFB 模型的主干网络的预训练模型。
+
+### b. 训练 LFB
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:使用半精度的长时特征库在 AVA 数据集上训练 LFB 模型。
+
+```shell
+python tools/train.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py \
+ --validate --seed 0 --deterministic
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 测试
+
+### a. 为测试 LFB 推导长时特征库
+
+在训练或测试 LFB 之前,用户首先需要推导长时特征库。如果用户之前已经生成了特征库文件,可以跳过这一步。
+
+这一步做法与[训练部分](#Train)中的 **为训练 LFB 推导长时特征库** 相同。
+
+### b. 测试 LFB
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:使用半精度的长时特征库在 AVA 数据集上测试 LFB 模型,并将结果导出为一个 json 文件。
+
+```shell
+python tools/test.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..6ba6a8fc
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,137 @@
+_base_ = ['../_base_/models/slowonly_r50.py']
+
+# model settings
+lfb_prefix_path = 'data/ava/lfb_half'
+max_num_sampled_feat = 5
+window_size = 60
+lfb_channels = 2048
+dataset_modes = ('train', 'val')
+
+model = dict(
+ roi_head=dict(
+ shared_head=dict(
+ type='FBOHead',
+ lfb_cfg=dict(
+ lfb_prefix_path=lfb_prefix_path,
+ max_num_sampled_feat=max_num_sampled_feat,
+ window_size=window_size,
+ lfb_channels=lfb_channels,
+ dataset_modes=dataset_modes,
+ device='gpu'),
+ fbo_cfg=dict(type='avg')),
+ bbox_head=dict(in_channels=4096)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids', 'img_key'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape', 'img_key'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=12,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+
+optimizer = dict(type='SGD', lr=0.15, momentum=0.9, weight_decay=1e-05)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb' # noqa E501
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..6c4dc19d
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,137 @@
+_base_ = ['../_base_/models/slowonly_r50.py']
+
+# model settings
+lfb_prefix_path = 'data/ava/lfb_half'
+max_num_sampled_feat = 5
+window_size = 60
+lfb_channels = 2048
+dataset_modes = ('train', 'val')
+
+model = dict(
+ roi_head=dict(
+ shared_head=dict(
+ type='FBOHead',
+ lfb_cfg=dict(
+ lfb_prefix_path=lfb_prefix_path,
+ max_num_sampled_feat=max_num_sampled_feat,
+ window_size=window_size,
+ lfb_channels=lfb_channels,
+ dataset_modes=dataset_modes,
+ device='gpu'),
+ fbo_cfg=dict(type='max')),
+ bbox_head=dict(in_channels=4096)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids', 'img_key'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape', 'img_key'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=12,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+
+optimizer = dict(type='SGD', lr=0.15, momentum=0.9, weight_decay=1e-05)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb' # noqa E501
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
new file mode 100644
index 00000000..bdd90ce6
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
@@ -0,0 +1,147 @@
+_base_ = ['../_base_/models/slowonly_r50.py']
+
+# model settings
+lfb_prefix_path = 'data/ava/lfb_half'
+max_num_sampled_feat = 5
+window_size = 60
+lfb_channels = 2048
+dataset_modes = ('train', 'val')
+
+model = dict(
+ roi_head=dict(
+ shared_head=dict(
+ type='FBOHead',
+ lfb_cfg=dict(
+ lfb_prefix_path=lfb_prefix_path,
+ max_num_sampled_feat=max_num_sampled_feat,
+ window_size=window_size,
+ lfb_channels=lfb_channels,
+ dataset_modes=dataset_modes,
+ device='gpu'),
+ fbo_cfg=dict(
+ type='non_local',
+ st_feat_channels=2048,
+ lt_feat_channels=lfb_channels,
+ latent_channels=512,
+ num_st_feat=1,
+ num_lt_feat=window_size * max_num_sampled_feat,
+ num_non_local_layers=2,
+ st_feat_dropout_ratio=0.2,
+ lt_feat_dropout_ratio=0.2,
+ pre_activate=True)),
+ bbox_head=dict(in_channels=2560)))
+
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
+ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
+
+exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
+exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
+ 'recall_93.9.pkl')
+proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+train_pipeline = [
+ dict(type='SampleAVAFrames', clip_len=4, frame_interval=16),
+ dict(type='RawFrameDecode'),
+ dict(type='RandomRescale', scale_range=(256, 320)),
+ dict(type='RandomCrop', size=256),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
+ dict(
+ type='ToDataContainer',
+ fields=[
+ dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
+ ]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
+ meta_keys=['scores', 'entity_ids', 'img_key'])
+]
+# The testing is w/o. any cropping / flipping
+val_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape', 'img_key'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=12,
+ workers_per_gpu=2,
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ exclude_file=exclude_file_train,
+ pipeline=train_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_train,
+ person_det_score_thr=0.9,
+ data_prefix=data_root),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ exclude_file=exclude_file_val,
+ pipeline=val_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_val,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+data['test'] = data['val']
+evaluation = dict(interval=1, save_best='mAP@0.5IOU')
+
+optimizer = dict(type='SGD', lr=0.15, momentum=0.9, weight_decay=1e-05)
+# this lr is used for 8 gpus
+
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+
+lr_config = dict(
+ policy='step',
+ step=[10, 15],
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=5,
+ warmup_ratio=0.1)
+total_epochs = 20
+
+checkpoint_config = dict(interval=1)
+workflow = [('train', 1)]
+log_config = dict(
+ interval=20, hooks=[
+ dict(type='TextLoggerHook'),
+ ])
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb' # noqa E501
+load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb/'
+ 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth')
+resume_from = None
+find_unused_parameters = False
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py
new file mode 100644
index 00000000..568f0765
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/lfb_slowonly_r50_ava_infer.py
@@ -0,0 +1,65 @@
+# This config is used to generate long-term feature bank.
+_base_ = ['../_base_/models/slowonly_r50.py']
+
+# model settings
+lfb_prefix_path = 'data/ava/lfb_half'
+dataset_mode = 'train' # ['train', 'val', 'test']
+
+model = dict(
+ roi_head=dict(
+ shared_head=dict(
+ type='LFBInferHead',
+ lfb_prefix_path=lfb_prefix_path,
+ dataset_mode=dataset_mode,
+ use_half_precision=True)))
+
+# dataset settings
+dataset_type = 'AVADataset'
+data_root = 'data/ava/rawframes'
+anno_root = 'data/ava/annotations'
+
+ann_file_infer = f'{anno_root}/ava_{dataset_mode}_v2.1.csv'
+
+exclude_file_infer = (
+ f'{anno_root}/ava_{dataset_mode}_excluded_timestamps_v2.1.csv')
+
+label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
+
+proposal_file_infer = (
+ f'{anno_root}/ava_dense_proposals_{dataset_mode}.FAIR.recall_93.9.pkl')
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+
+infer_pipeline = [
+ dict(
+ type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW', collapse=True),
+ # Rename is needed to use mmdet detectors
+ dict(type='Rename', mapping=dict(imgs='img')),
+ dict(type='ToTensor', keys=['img', 'proposals']),
+ dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
+ dict(
+ type='Collect',
+ keys=['img', 'proposals'],
+ meta_keys=['scores', 'img_shape', 'img_key'],
+ nested=True)
+]
+
+data = dict(
+ videos_per_gpu=1,
+ workers_per_gpu=2,
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_infer,
+ exclude_file=exclude_file_infer,
+ pipeline=infer_pipeline,
+ label_file=label_file,
+ proposal_file=proposal_file_infer,
+ person_det_score_thr=0.9,
+ data_prefix=data_root))
+
+dist_params = dict(backend='nccl')
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/metafile.yml
new file mode 100644
index 00000000..90ec931e
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/detection/lfb/metafile.yml
@@ -0,0 +1,70 @@
+Collections:
+- Name: LFB
+ README: configs/detection/lfb/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1812.05038
+ Title: Long-Term Feature Banks for Detailed Video Understanding
+Models:
+- Config: configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
+ In Collection: LFB
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 20
+ Input: 4x16
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 24.11
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210224_125052.log.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210224_125052.log
+ Weights: https://download.openmmlab.com/mmaction/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_nl_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210224-2ae136d9.pth
+- Config: configs/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
+ In Collection: LFB
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 20
+ Input: 4x16
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 20.17
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log
+ Weights: https://download.openmmlab.com/mmaction/detection/lfb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_avg_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210301-19c330b7.pth
+- Config: configs/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
+ In Collection: LFB
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 20
+ Input: 4x16
+ Pretrained: Kinetics-400
+ Resolution: short-side 256
+ Training Data: AVA v2.1
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb.py
+ Results:
+ - Dataset: AVA v2.1
+ Metrics:
+ mAP: 22.15
+ Task: Spatial Temporal Action Detection
+ Training Json Log: https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log.json
+ Training Log: https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/20210301_124812.log
+ Weights: https://download.openmmlab.com/mmaction/detection/lfb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb/lfb_max_kinetics_pretrained_slowonly_r50_4x16x1_20e_ava_rgb_20210301-37efcd15.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README.md b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README.md
new file mode 100644
index 00000000..ccf07450
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README.md
@@ -0,0 +1,115 @@
+# BMN
+
+[Bmn: Boundary-matching network for temporal action proposal generation](https://openaccess.thecvf.com/content_ICCV_2019/html/Lin_BMN_Boundary-Matching_Network_for_Temporal_Action_Proposal_Generation_ICCV_2019_paper.html)
+
+
+
+## Abstract
+
+
+
+Temporal action proposal generation is an challenging and promising task which aims to locate temporal regions in real-world videos where action or event may occur. Current bottom-up proposal generation methods can generate proposals with precise boundary, but cannot efficiently generate adequately reliable confidence scores for retrieving proposals. To address these difficulties, we introduce the Boundary-Matching (BM) mechanism to evaluate confidence scores of densely distributed proposals, which denote a proposal as a matching pair of starting and ending boundaries and combine all densely distributed BM pairs into the BM confidence map. Based on BM mechanism, we propose an effective, efficient and end-to-end proposal generation method, named Boundary-Matching Network (BMN), which generates proposals with precise temporal boundaries as well as reliable confidence scores simultaneously. The two-branches of BMN are jointly trained in an unified framework. We conduct experiments on two challenging datasets: THUMOS-14 and ActivityNet-1.3, where BMN shows significant performance improvement with remarkable efficiency and generalizability. Further, combining with existing action classifier, BMN can achieve state-of-the-art temporal action detection performance.
+
+
+
+
+

+
+
+## Results and Models
+
+### ActivityNet feature
+
+| config | feature | gpus | AR@100 | AUC | AP@0.5 | AP@0.75 | AP@0.95 | mAP | gpu_mem(M) | iter time(s) | ckpt | log | json |
+| :-----------------------------------------------------------------------------------------------------------: | :------------: | :--: | :----: | :---: | :----: | :-----: | :-----: | :---: | :--------: | ------------ | :----------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------: | -------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [bmn_400x100_9e_2x8_activitynet_feature](/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py) | cuhk_mean_100 | 2 | 75.28 | 67.22 | 42.47 | 31.31 | 9.92 | 30.34 | 5420 | 3.27 | [ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature_20200619-42a3b111.pth) | [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log.json) |
+| | mmaction_video | 2 | 75.43 | 67.22 | 42.62 | 31.56 | 10.86 | 30.77 | 5420 | 3.27 | [ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809-c9fd14d2.pth) | [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.json) |
+| | mmaction_clip | 2 | 75.35 | 67.38 | 43.08 | 32.19 | 10.73 | 31.15 | 5420 | 3.27 | [ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809-10d803ce.pth) | [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.json) |
+| [BMN-official](https://github.com/JJBOY/BMN-Boundary-Matching-Network) (for reference)\* | cuhk_mean_100 | - | 75.27 | 67.49 | 42.22 | 30.98 | 9.22 | 30.00 | - | - | - | - | - |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk), mmaction_video and mmaction_clip denote feature extracted by mmaction, with video-level activitynet finetuned model or clip-level activitynet finetuned model respectively.
+3. We evaluate the action detection performance of BMN, using [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) submission for ActivityNet2017 Untrimmed Video Classification Track to assign label for each action proposal.
+
+:::
+
+\*We train BMN with the [official repo](https://github.com/JJBOY/BMN-Boundary-Matching-Network), evaluate its proposal generation and action detection performance with [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) for label assigning.
+
+For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train BMN model on ActivityNet features dataset.
+
+```shell
+python tools/train.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
+```
+
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting) .
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test BMN on ActivityNet feature dataset.
+
+```shell
+# Note: If evaluated, then please make sure the annotation file for test data contains groundtruth.
+python tools/test.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json
+```
+
+You can also test the action detection performance of the model, with [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) prediction file and generated proposal file (`results.json` in last command).
+
+```shell
+python tools/analysis/report_map.py --proposal path/to/proposal_file
+```
+
+:::{note}
+
+1. (Optional) You can use the following command to generate a formatted proposal file, which will be fed into the action classifier (Currently supports SSN and P-GCN, not including TSN, I3D etc.) to get the classification result of proposals.
+
+ ```shell
+ python tools/data/activitynet/convert_proposal_format.py
+ ```
+
+:::
+
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) .
+
+## Citation
+
+```BibTeX
+@inproceedings{lin2019bmn,
+ title={Bmn: Boundary-matching network for temporal action proposal generation},
+ author={Lin, Tianwei and Liu, Xiao and Li, Xin and Ding, Errui and Wen, Shilei},
+ booktitle={Proceedings of the IEEE International Conference on Computer Vision},
+ pages={3889--3898},
+ year={2019}
+}
+```
+
+
+
+```BibTeX
+@article{zhao2017cuhk,
+ title={Cuhk \& ethz \& siat submission to activitynet challenge 2017},
+ author={Zhao, Y and Zhang, B and Wu, Z and Yang, S and Zhou, L and Yan, S and Wang, L and Xiong, Y and Lin, D and Qiao, Y and others},
+ journal={arXiv preprint arXiv:1710.08011},
+ volume={8},
+ year={2017}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README_zh-CN.md
new file mode 100644
index 00000000..72c4f3fe
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/README_zh-CN.md
@@ -0,0 +1,98 @@
+# BMN
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{lin2019bmn,
+ title={Bmn: Boundary-matching network for temporal action proposal generation},
+ author={Lin, Tianwei and Liu, Xiao and Li, Xin and Ding, Errui and Wen, Shilei},
+ booktitle={Proceedings of the IEEE International Conference on Computer Vision},
+ pages={3889--3898},
+ year={2019}
+}
+```
+
+
+
+```BibTeX
+@article{zhao2017cuhk,
+ title={Cuhk \& ethz \& siat submission to activitynet challenge 2017},
+ author={Zhao, Y and Zhang, B and Wu, Z and Yang, S and Zhou, L and Yan, S and Wang, L and Xiong, Y and Lin, D and Qiao, Y and others},
+ journal={arXiv preprint arXiv:1710.08011},
+ volume={8},
+ year={2017}
+}
+```
+
+## 模型库
+
+### ActivityNet feature
+
+| 配置文件 | 特征 | GPU 数量 | AR@100 | AUC | AP@0.5 | AP@0.75 | AP@0.95 | mAP | GPU 显存占用 (M) | 推理时间 (s) | ckpt | log | json |
+| :-----------------------------------------------------------------------------------------------------------: | :------------: | :------: | :----: | :---: | :----: | :-----: | :-----: | :---: | :--------------: | ------------ | :----------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------: | -------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [bmn_400x100_9e_2x8_activitynet_feature](/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py) | cuhk_mean_100 | 2 | 75.28 | 67.22 | 42.47 | 31.31 | 9.92 | 30.34 | 5420 | 3.27 | [ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature_20200619-42a3b111.pth) | [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log.json) |
+| | mmaction_video | 2 | 75.43 | 67.22 | 42.62 | 31.56 | 10.86 | 30.77 | 5420 | 3.27 | [ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809-c9fd14d2.pth) | [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.json) |
+| | mmaction_clip | 2 | 75.35 | 67.38 | 43.08 | 32.19 | 10.73 | 31.15 | 5420 | 3.27 | [ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809-10d803ce.pth) | [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.json) |
+| [BMN-official](https://github.com/JJBOY/BMN-Boundary-Matching-Network) (for reference)\* | cuhk_mean_100 | - | 75.27 | 67.49 | 42.22 | 30.98 | 9.22 | 30.00 | - | - | - | - | - |
+
+- 注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. 对于 **特征** 这一列,`cuhk_mean_100` 表示所使用的特征为利用 [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk) 代码库抽取的,被广泛利用的 CUHK ActivityNet 特征,
+ `mmaction_video` 和 `mmaction_clip` 分布表示所使用的特征为利用 MMAction 抽取的,视频级别 ActivityNet 预训练模型的特征;视频片段级别 ActivityNet 预训练模型的特征。
+3. MMAction2 使用 ActivityNet2017 未剪辑视频分类赛道上 [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) 所提交的结果来为每个视频的时序动作候选指定标签,以用于 BMN 模型评估。
+
+\*MMAction2 在 [原始代码库](https://github.com/JJBOY/BMN-Boundary-Matching-Network) 上训练 BMN,并且在 [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) 的对应标签上评估时序动作候选生成和时序检测的结果。
+
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 ActivityNet 特征部分。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:在 ActivityNet 特征上训练 BMN。
+
+```shell
+python tools/train.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 ActivityNet 特征上测试 BMN 模型。
+
+```shell
+# 注:如果需要进行指标验证,需确测试数据的保标注文件包含真实标签
+python tools/test.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json
+```
+
+用户也可以利用 [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) 的预测文件评估模型时序检测的结果,并生成时序动作候选文件(即命令中的 `results.json`)
+
+```shell
+python tools/analysis/report_map.py --proposal path/to/proposal_file
+```
+
+注:
+
+1. (可选项) 用户可以使用以下指令生成格式化的时序动作候选文件,该文件可被送入动作识别器中(目前只支持 SSN 和 P-GCN,不包括 TSN, I3D 等),以获得时序动作候选的分类结果。
+
+ ```shell
+ python tools/data/activitynet/convert_proposal_format.py
+ ```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
new file mode 100644
index 00000000..6e27661f
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
@@ -0,0 +1,88 @@
+_base_ = [
+ '../../_base_/models/bmn_400x100.py', '../../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ActivityNetDataset'
+data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+ann_file_train = 'data/ActivityNet/anet_anno_train.json'
+ann_file_val = 'data/ActivityNet/anet_anno_val.json'
+ann_file_test = 'data/ActivityNet/anet_anno_val.json'
+
+test_pipeline = [
+ dict(type='LoadLocalizationFeature'),
+ dict(
+ type='Collect',
+ keys=['raw_feature'],
+ meta_name='video_meta',
+ meta_keys=[
+ 'video_name', 'duration_second', 'duration_frame', 'annotations',
+ 'feature_frame'
+ ]),
+ dict(type='ToTensor', keys=['raw_feature']),
+]
+train_pipeline = [
+ dict(type='LoadLocalizationFeature'),
+ dict(type='GenerateLocalizationLabels'),
+ dict(
+ type='Collect',
+ keys=['raw_feature', 'gt_bbox'],
+ meta_name='video_meta',
+ meta_keys=['video_name']),
+ dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']),
+ dict(
+ type='ToDataContainer',
+ fields=[dict(key='gt_bbox', stack=False, cpu_only=True)])
+]
+val_pipeline = [
+ dict(type='LoadLocalizationFeature'),
+ dict(type='GenerateLocalizationLabels'),
+ dict(
+ type='Collect',
+ keys=['raw_feature', 'gt_bbox'],
+ meta_name='video_meta',
+ meta_keys=[
+ 'video_name', 'duration_second', 'duration_frame', 'annotations',
+ 'feature_frame'
+ ]),
+ dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']),
+ dict(
+ type='ToDataContainer',
+ fields=[dict(key='gt_bbox', stack=False, cpu_only=True)])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=8,
+ train_dataloader=dict(drop_last=True),
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_test,
+ pipeline=test_pipeline,
+ data_prefix=data_root_val),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ pipeline=val_pipeline,
+ data_prefix=data_root_val),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ pipeline=train_pipeline,
+ data_prefix=data_root))
+evaluation = dict(interval=1, metrics=['AR@AN'])
+
+# optimizer
+optimizer = dict(
+ type='Adam', lr=0.001, weight_decay=0.0001) # this lr is used for 2 gpus
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=7)
+total_epochs = 9
+
+# runtime settings
+log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
+work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/'
+output_config = dict(out=f'{work_dir}/results.json', output_format='json')
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/metafile.yml
new file mode 100644
index 00000000..40eafd4f
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bmn/metafile.yml
@@ -0,0 +1,73 @@
+Collections:
+- Name: BMN
+ README: configs/localization/bmn/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1907.09702
+ Title: "BMN: Boundary-Matching Network for Temporal Action Proposal Generation"
+Models:
+- Config: configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
+ In Collection: BMN
+ Metadata:
+ Batch Size: 8
+ Epochs: 9
+ Training Data: ActivityNet v1.3
+ Training Resources: 2 GPUs
+ feature: cuhk_mean_100
+ Name: bmn_400x100_9e_2x8_activitynet_feature (cuhk_mean_100)
+ Results:
+ - Dataset: ActivityNet v1.3
+ Metrics:
+ AP@0.5: 42.47
+ AP@0.75: 31.31
+ AP@0.95: 9.92
+ AR@100: 75.28
+ AUC: 67.22
+ mAP: 30.34
+ Task: Temporal Action Localization
+ Training Json Log: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log.json
+ Training Log: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log
+ Weights: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature_20200619-42a3b111.pth
+- Config: configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
+ In Collection: BMN
+ Metadata:
+ Batch Size: 8
+ Epochs: 9
+ Training Data: ActivityNet v1.3
+ Training Resources: 2 GPUs
+ feature: mmaction_video
+ Name: bmn_400x100_9e_2x8_activitynet_feature (mmaction_video)
+ Results:
+ - Dataset: ActivityNet v1.3
+ Metrics:
+ AP@0.5: 42.62
+ AP@0.75: 31.56
+ AP@0.95: 10.86
+ AR@100: 75.43
+ AUC: 67.22
+ mAP: 30.77
+ Task: Temporal Action Localization
+ Training Json Log: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.json
+ Training Log: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.log
+ Weights: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809-c9fd14d2.pth
+- Config: configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
+ In Collection: BMN
+ Metadata:
+ Batch Size: 8
+ Epochs: 9
+ Training Data: ActivityNet v1.3
+ Training Resources: 2 GPUs
+ feature: mmaction_clip
+ Name: bmn_400x100_9e_2x8_activitynet_feature (mmaction_clip)
+ Results:
+ - Dataset: ActivityNet v1.3
+ Metrics:
+ AP@0.5: 43.08
+ AP@0.75: 32.19
+ AP@0.95: 10.73
+ AR@100: 75.35
+ AUC: 67.38
+ mAP: 31.15
+ Task: Temporal Action Localization
+ Training Json Log: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.json
+ Training Log: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.log
+ Weights: https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809-10d803ce.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README.md b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README.md
new file mode 100644
index 00000000..c307cb15
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README.md
@@ -0,0 +1,173 @@
+# BSN
+
+[Bsn: Boundary sensitive network for temporal action proposal generation](https://openaccess.thecvf.com/content_ECCV_2018/html/Tianwei_Lin_BSN_Boundary_Sensitive_ECCV_2018_paper.html)
+
+
+
+## Abstract
+
+
+
+Temporal action proposal generation is an important yet challenging problem, since temporal proposals with rich action content are indispensable for analysing real-world videos with long duration and high proportion irrelevant content. This problem requires methods not only generating proposals with precise temporal boundaries, but also retrieving proposals to cover truth action instances with high recall and high overlap using relatively fewer proposals. To address these difficulties, we introduce an effective proposal generation method, named Boundary-Sensitive Network (BSN), which adopts "local to global" fashion. Locally, BSN first locates temporal boundaries with high probabilities, then directly combines these boundaries as proposals. Globally, with Boundary-Sensitive Proposal feature, BSN retrieves proposals by evaluating the confidence of whether a proposal contains an action within its region. We conduct experiments on two challenging datasets: ActivityNet-1.3 and THUMOS14, where BSN outperforms other state-of-the-art temporal action proposal generation methods with high recall and high temporal precision. Finally, further experiments demonstrate that by combining existing action classifiers, our method significantly improves the state-of-the-art temporal action detection performance.
+
+
+
+
+

+
+
+## Results and Models
+
+### ActivityNet feature
+
+| config | feature | gpus | pretrain | AR@100 | AUC | gpu_mem(M) | iter time(s) | ckpt | log | json |
+| :--------------------------------------- | :------------: | :--: | :------: | :----: | :---: | :-------------: | :-------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| bsn_400x100_1x16_20e_activitynet_feature | cuhk_mean_100 | 1 | None | 74.66 | 66.45 | 41(TEM)+25(PEM) | 0.074(TEM)+0.036(PEM) | [ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature_20200619-cd6accc3.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature_20210203-1c27763d.pth) | [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log.json) |
+| | mmaction_video | 1 | None | 74.93 | 66.74 | 41(TEM)+25(PEM) | 0.074(TEM)+0.036(PEM) | [ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809-ad6ec626.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809-aa861b26.pth) | [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.json) |
+| | mmaction_clip | 1 | None | 75.19 | 66.81 | 41(TEM)+25(PEM) | 0.074(TEM)+0.036(PEM) | [ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809-0a563554.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809-e32f61e6.pth) | [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.json) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk), mmaction_video and mmaction_clip denote feature extracted by mmaction, with video-level activitynet finetuned model or clip-level activitynet finetuned model respectively.
+
+:::
+
+For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following commands to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Examples:
+
+1. train BSN(TEM) on ActivityNet features dataset.
+
+ ```shell
+ python tools/train.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
+ ```
+
+2. train BSN(PEM) on PGM results.
+
+ ```shell
+ python tools/train.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
+ ```
+
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Inference
+
+You can use the following commands to inference a model.
+
+1. For TEM Inference
+
+ ```shell
+ # Note: This could not be evaluated.
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+2. For PGM Inference
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py ${CONFIG_FILE} [--mode ${MODE}]
+ ```
+
+3. For PEM Inference
+
+ ```shell
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+Examples:
+
+1. Inference BSN(TEM) with pretrained model.
+
+ ```shell
+ python tools/test.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth
+ ```
+
+2. Inference BSN(PGM) with pretrained model.
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py --mode train
+ ```
+
+3. Inference BSN(PEM) with evaluation metric 'AR@AN' and output the results.
+
+ ```shell
+ # Note: If evaluated, then please make sure the annotation file for test data contains groundtruth.
+ python tools/test.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json
+ ```
+
+## Test
+
+You can use the following commands to test a model.
+
+1. TEM
+
+ ```shell
+ # Note: This could not be evaluated.
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+2. PGM
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py ${CONFIG_FILE} [--mode ${MODE}]
+ ```
+
+3. PEM
+
+ ```shell
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+Examples:
+
+1. Test a TEM model on ActivityNet dataset.
+
+ ```shell
+ python tools/test.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth
+ ```
+
+2. Test a PGM model on ActivityNet dataset.
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py --mode test
+ ```
+
+3. Test a PEM model with with evaluation metric 'AR@AN' and output the results.
+
+ ```shell
+ python tools/test.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json
+ ```
+
+:::{note}
+
+1. (Optional) You can use the following command to generate a formatted proposal file, which will be fed into the action classifier (Currently supports only SSN and P-GCN, not including TSN, I3D etc.) to get the classification result of proposals.
+
+ ```shell
+ python tools/data/activitynet/convert_proposal_format.py
+ ```
+
+:::
+
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
+
+## Citation
+
+```BibTeX
+@inproceedings{lin2018bsn,
+ title={Bsn: Boundary sensitive network for temporal action proposal generation},
+ author={Lin, Tianwei and Zhao, Xu and Su, Haisheng and Wang, Chongjing and Yang, Ming},
+ booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
+ pages={3--19},
+ year={2018}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README_zh-CN.md
new file mode 100644
index 00000000..14e6251a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/README_zh-CN.md
@@ -0,0 +1,156 @@
+# BSN
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{lin2018bsn,
+ title={Bsn: Boundary sensitive network for temporal action proposal generation},
+ author={Lin, Tianwei and Zhao, Xu and Su, Haisheng and Wang, Chongjing and Yang, Ming},
+ booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
+ pages={3--19},
+ year={2018}
+}
+```
+
+## 模型库
+
+### ActivityNet feature
+
+| 配置文件 | 特征 | GPU 数量 | 预训练 | AR@100 | AUC | GPU 显存占用 (M) | 迭代时间 (s) | ckpt | log | json |
+| :--------------------------------------- | :------------: | :------: | :----: | :----: | :---: | :--------------: | :-------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| bsn_400x100_1x16_20e_activitynet_feature | cuhk_mean_100 | 1 | None | 74.66 | 66.45 | 41(TEM)+25(PEM) | 0.074(TEM)+0.036(PEM) | [ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature_20200619-cd6accc3.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature_20210203-1c27763d.pth) | [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log.json) |
+| | mmaction_video | 1 | None | 74.93 | 66.74 | 41(TEM)+25(PEM) | 0.074(TEM)+0.036(PEM) | [ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809-ad6ec626.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809-aa861b26.pth) | [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.json) |
+| | mmaction_clip | 1 | None | 75.19 | 66.81 | 41(TEM)+25(PEM) | 0.074(TEM)+0.036(PEM) | [ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809-0a563554.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809-e32f61e6.pth) | [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.json) |
+
+注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. 对于 **特征** 这一列,`cuhk_mean_100` 表示所使用的特征为利用 [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk) 代码库抽取的,被广泛利用的 CUHK ActivityNet 特征,
+ `mmaction_video` 和 `mmaction_clip` 分布表示所使用的特征为利用 MMAction 抽取的,视频级别 ActivityNet 预训练模型的特征;视频片段级别 ActivityNet 预训练模型的特征。
+
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 ActivityNet 特征部分。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:
+
+1. 在 ActivityNet 特征上训练 BSN(TEM) 模型。
+
+ ```shell
+ python tools/train.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
+ ```
+
+2. 基于 PGM 的结果训练 BSN(PEM)。
+
+ ```shell
+ python tools/train.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
+ ```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何进行推理
+
+用户可以使用以下指令进行模型推理。
+
+1. 推理 TEM 模型。
+
+ ```shell
+ # Note: This could not be evaluated.
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+2. 推理 PGM 模型
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py ${CONFIG_FILE} [--mode ${MODE}]
+ ```
+
+3. 推理 PEM 模型
+
+ ```shell
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+例如
+
+1. 利用预训练模型进行 BSN(TEM) 模型的推理。
+
+ ```shell
+ python tools/test.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth
+ ```
+
+2. 利用预训练模型进行 BSN(PGM) 模型的推理
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py --mode train
+ ```
+
+3. 推理 BSN(PEM) 模型,并计算 'AR@AN' 指标,输出结果文件。
+
+ ```shell
+ # 注:如果需要进行指标验证,需确测试数据的保标注文件包含真实标签
+ python tools/test.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json
+ ```
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+1. TEM
+
+ ```shell
+ # 注:该命令无法进行指标验证
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+2. PGM
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py ${CONFIG_FILE} [--mode ${MODE}]
+ ```
+
+3. PEM
+
+ ```shell
+ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+ ```
+
+例如:
+
+1. 在 ActivityNet 数据集上测试 TEM 模型。
+
+ ```shell
+ python tools/test.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth
+ ```
+
+2. 在 ActivityNet 数据集上测试 PGM 模型。
+
+ ```shell
+ python tools/misc/bsn_proposal_generation.py configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py --mode test
+ ```
+
+3. 测试 PEM 模型,并计算 'AR@AN' 指标,输出结果文件。
+
+ ```shell
+ python tools/test.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json
+ ```
+
+注:
+
+1. (可选项) 用户可以使用以下指令生成格式化的时序动作候选文件,该文件可被送入动作识别器中(目前只支持 SSN 和 P-GCN,不包括 TSN, I3D 等),以获得时序动作候选的分类结果。
+
+ ```shell
+ python tools/data/activitynet/convert_proposal_format.py
+ ```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
new file mode 100644
index 00000000..429d2284
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
@@ -0,0 +1,95 @@
+_base_ = [
+ '../../_base_/models/bsn_pem.py', '../../_base_/schedules/adam_20e.py',
+ '../../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ActivityNetDataset'
+data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+ann_file_train = 'data/ActivityNet/anet_anno_train.json'
+ann_file_val = 'data/ActivityNet/anet_anno_val.json'
+ann_file_test = 'data/ActivityNet/anet_anno_val.json'
+
+work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/'
+pgm_proposals_dir = f'{work_dir}/pgm_proposals/'
+pgm_features_dir = f'{work_dir}/pgm_features/'
+
+test_pipeline = [
+ dict(
+ type='LoadProposals',
+ top_k=1000,
+ pgm_proposals_dir=pgm_proposals_dir,
+ pgm_features_dir=pgm_features_dir),
+ dict(
+ type='Collect',
+ keys=['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score'],
+ meta_name='video_meta',
+ meta_keys=[
+ 'video_name', 'duration_second', 'duration_frame', 'annotations',
+ 'feature_frame'
+ ]),
+ dict(type='ToTensor', keys=['bsp_feature'])
+]
+
+train_pipeline = [
+ dict(
+ type='LoadProposals',
+ top_k=500,
+ pgm_proposals_dir=pgm_proposals_dir,
+ pgm_features_dir=pgm_features_dir),
+ dict(
+ type='Collect',
+ keys=['bsp_feature', 'reference_temporal_iou'],
+ meta_name='video_meta',
+ meta_keys=[]),
+ dict(type='ToTensor', keys=['bsp_feature', 'reference_temporal_iou']),
+ dict(
+ type='ToDataContainer',
+ fields=(dict(key='bsp_feature', stack=False),
+ dict(key='reference_temporal_iou', stack=False)))
+]
+
+val_pipeline = [
+ dict(
+ type='LoadProposals',
+ top_k=1000,
+ pgm_proposals_dir=pgm_proposals_dir,
+ pgm_features_dir=pgm_features_dir),
+ dict(
+ type='Collect',
+ keys=['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score'],
+ meta_name='video_meta',
+ meta_keys=[
+ 'video_name', 'duration_second', 'duration_frame', 'annotations',
+ 'feature_frame'
+ ]),
+ dict(type='ToTensor', keys=['bsp_feature'])
+]
+data = dict(
+ videos_per_gpu=16,
+ workers_per_gpu=8,
+ train_dataloader=dict(drop_last=True),
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_test,
+ pipeline=test_pipeline,
+ data_prefix=data_root_val),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ pipeline=val_pipeline,
+ data_prefix=data_root_val),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ pipeline=train_pipeline,
+ data_prefix=data_root))
+evaluation = dict(interval=1, metrics=['AR@AN'])
+
+# runtime settings
+checkpoint_config = dict(interval=1, filename_tmpl='pem_epoch_{}.pth')
+log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
+output_config = dict(out=f'{work_dir}/results.json', output_format='json')
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
new file mode 100644
index 00000000..2c5f7a03
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
@@ -0,0 +1,32 @@
+# dataset settings
+dataset_type = 'ActivityNetDataset'
+data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+ann_file_train = 'data/ActivityNet/anet_anno_train.json'
+ann_file_val = 'data/ActivityNet/anet_anno_val.json'
+ann_file_test = 'data/ActivityNet/anet_anno_test.json'
+
+work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/'
+tem_results_dir = f'{work_dir}/tem_results/'
+pgm_proposals_dir = f'{work_dir}/pgm_proposals/'
+pgm_features_dir = f'{work_dir}/pgm_features/'
+
+temporal_scale = 100
+pgm_proposals_cfg = dict(
+ pgm_proposals_thread=8, temporal_scale=temporal_scale, peak_threshold=0.5)
+pgm_features_test_cfg = dict(
+ pgm_features_thread=4,
+ top_k=1000,
+ num_sample_start=8,
+ num_sample_end=8,
+ num_sample_action=16,
+ num_sample_interp=3,
+ bsp_boundary_ratio=0.2)
+pgm_features_train_cfg = dict(
+ pgm_features_thread=4,
+ top_k=500,
+ num_sample_start=8,
+ num_sample_end=8,
+ num_sample_action=16,
+ num_sample_interp=3,
+ bsp_boundary_ratio=0.2)
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
new file mode 100644
index 00000000..60093cf4
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
@@ -0,0 +1,79 @@
+_base_ = ['../../_base_/models/bsn_tem.py', '../../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ActivityNetDataset'
+data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/'
+ann_file_train = 'data/ActivityNet/anet_anno_train.json'
+ann_file_val = 'data/ActivityNet/anet_anno_val.json'
+ann_file_test = 'data/ActivityNet/anet_anno_full.json'
+
+test_pipeline = [
+ dict(type='LoadLocalizationFeature'),
+ dict(
+ type='Collect',
+ keys=['raw_feature'],
+ meta_name='video_meta',
+ meta_keys=['video_name']),
+ dict(type='ToTensor', keys=['raw_feature'])
+]
+train_pipeline = [
+ dict(type='LoadLocalizationFeature'),
+ dict(type='GenerateLocalizationLabels'),
+ dict(
+ type='Collect',
+ keys=['raw_feature', 'gt_bbox'],
+ meta_name='video_meta',
+ meta_keys=['video_name']),
+ dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']),
+ dict(type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False)])
+]
+val_pipeline = [
+ dict(type='LoadLocalizationFeature'),
+ dict(type='GenerateLocalizationLabels'),
+ dict(
+ type='Collect',
+ keys=['raw_feature', 'gt_bbox'],
+ meta_name='video_meta',
+ meta_keys=['video_name']),
+ dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']),
+ dict(type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False)])
+]
+
+data = dict(
+ videos_per_gpu=16,
+ workers_per_gpu=8,
+ train_dataloader=dict(drop_last=True),
+ val_dataloader=dict(videos_per_gpu=1),
+ test_dataloader=dict(videos_per_gpu=1),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_test,
+ pipeline=test_pipeline,
+ data_prefix=data_root_val),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ pipeline=val_pipeline,
+ data_prefix=data_root_val),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ pipeline=train_pipeline,
+ data_prefix=data_root))
+
+# optimizer
+optimizer = dict(
+ type='Adam', lr=0.001, weight_decay=0.0001) # this lr is used for 1 gpus
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=7)
+total_epochs = 20
+
+# runtime settings
+checkpoint_config = dict(interval=1, filename_tmpl='tem_epoch_{}.pth')
+log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
+workflow = [('train', 1), ('val', 1)]
+work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/'
+tem_results_dir = f'{work_dir}/tem_results/'
+output_config = dict(out=tem_results_dir, output_format='csv')
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/metafile.yml
new file mode 100644
index 00000000..e1bddeb9
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/bsn/metafile.yml
@@ -0,0 +1,85 @@
+Collections:
+- Name: BSN
+ README: configs/localization/bsn/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1806.02964
+ Title: "BSN: Boundary Sensitive Network for Temporal Action Proposal Generation"
+Models:
+- Config:
+ - configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
+ - configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
+ - configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
+ In Collection: BSN
+ Metadata:
+ Pretrained: None
+ Training Data: ActivityNet v1.3
+ Training Resources: 1 GPUs
+ feature: cuhk_mean_100
+ Name: bsn_400x100_1x16_20e_activitynet_feature (cuhk_mean_100)
+ Results:
+ - Dataset: ActivityNet v1.3
+ Metrics:
+ AR@100: 74.66
+ AUC: 66.45
+ Task: Temporal Action Localization
+ Training Json Log:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log.json
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log.json
+ Training Log:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log
+ Weights:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature_20200619-cd6accc3.pth
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature_20210203-1c27763d.pth
+- Config:
+ - configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
+ - configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
+ - configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
+ In Collection: BSN
+ Metadata:
+ Pretrained: None
+ Training Data: ActivityNet v1.3
+ Training Resources: 1 GPUs
+ feature: mmaction_video
+ Name: bsn_400x100_1x16_20e_activitynet_feature (mmaction_video)
+ Results:
+ - Dataset: ActivityNet v1.3
+ Metrics:
+ AR@100: 74.93
+ AUC: 66.74
+ Task: Temporal Action Localization
+ Training Json Log:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.json
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.json
+ Training Log:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.log
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.log
+ Weights:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809-ad6ec626.pth
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809-aa861b26.pth
+- Config:
+ - configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
+ - configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
+ - configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
+ In Collection: BSN
+ Metadata:
+ Pretrained: None
+ Training Data: ActivityNet v1.3
+ Training Resources: 1 GPUs
+ feature: mmaction_clip
+ Name: bsn_400x100_1x16_20e_activitynet_feature (mmaction_clip)
+ Results:
+ - Dataset: ActivityNet v1.3
+ Metrics:
+ AR@100: 75.19
+ AUC: 66.81
+ Task: Temporal Action Localization
+ Training Json Log:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.json
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.json
+ Training Log:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.log
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.log
+ Weights:
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809-0a563554.pth
+ - https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809-e32f61e6.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README.md b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README.md
new file mode 100644
index 00000000..7eb73213
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README.md
@@ -0,0 +1,79 @@
+# SSN
+
+[Temporal Action Detection With Structured Segment Networks](https://openaccess.thecvf.com/content_iccv_2017/html/Zhao_Temporal_Action_Detection_ICCV_2017_paper.html)
+
+
+
+## Abstract
+
+
+
+Detecting actions in untrimmed videos is an important yet challenging task. In this paper, we present the structured segment network (SSN), a novel framework which models the temporal structure of each action instance via a structured temporal pyramid. On top of the pyramid, we further introduce a decomposed discriminative model comprising two classifiers, respectively for classifying actions and determining completeness. This allows the framework to effectively distinguish positive proposals from background or incomplete ones, thus leading to both accurate recognition and localization. These components are integrated into a unified network that can be efficiently trained in an end-to-end fashion. Additionally, a simple yet effective temporal action proposal scheme, dubbed temporal actionness grouping (TAG) is devised to generate high quality action proposals. On two challenging benchmarks, THUMOS14 and ActivityNet, our method remarkably outperforms previous state-of-the-art methods, demonstrating superior accuracy and strong adaptivity in handling actions with various temporal structures.
+
+
+
+
+

+
+
+## Results and Models
+
+| config | gpus | backbone | pretrain | mAP@0.3 | mAP@0.4 | mAP@0.5 | reference mAP@0.3 | reference mAP@0.4 | reference mAP@0.5 | gpu_mem(M) | ckpt | log | json | reference ckpt | reference json |
+| :---------------------------------------------------------------------------------------: | :--: | :------: | :------: | :-----: | :-----: | :-----: | :---------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | :--------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------: | ------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------: |
+| [ssn_r50_450e_thumos14_rgb](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) | 8 | ResNet50 | ImageNet | 29.37 | 22.15 | 15.69 | [27.61](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started) | [21.28](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started) | [14.57](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started) | 6352 | [ckpt](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/ssn_r50_450e_thumos14_rgb_20201012-1920ab16.pth) | [log](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log) | [json](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log.json) | [ckpt](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/ssn_r50_450e_thumos14_rgb_ref_20201014-b6f48f68.pth) | [json](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/20201008_103258.log.json) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. Since SSN utilizes different structured temporal pyramid pooling methods at training and testing, please refer to [ssn_r50_450e_thumos14_rgb_train](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) at training and [ssn_r50_450e_thumos14_rgb_test](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py) at testing.
+3. We evaluate the action detection performance of SSN, using action proposals of TAG. For more details on data preparation, you can refer to thumos14 TAG proposals in [Data Preparation](/docs/data_preparation.md).
+4. The reference SSN in is evaluated with `ResNet50` backbone in MMAction, which is the same backbone with ours. Note that the original setting of MMAction SSN uses the `BNInception` backbone.
+
+:::
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train SSN model on thumos14 dataset.
+
+```shell
+python tools/train.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
+```
+
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test BMN on ActivityNet feature dataset.
+
+```shell
+# Note: If evaluated, then please make sure the annotation file for test data contains groundtruth.
+python tools/test.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py checkpoints/SOME_CHECKPOINT.pth --eval mAP
+```
+
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
+
+## Citation
+
+```BibTeX
+@InProceedings{Zhao_2017_ICCV,
+author = {Zhao, Yue and Xiong, Yuanjun and Wang, Limin and Wu, Zhirong and Tang, Xiaoou and Lin, Dahua},
+title = {Temporal Action Detection With Structured Segment Networks},
+booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)},
+month = {Oct},
+year = {2017}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README_zh-CN.md
new file mode 100644
index 00000000..62ccc2ca
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/README_zh-CN.md
@@ -0,0 +1,63 @@
+# SSN
+
+## 简介
+
+
+
+```BibTeX
+@InProceedings{Zhao_2017_ICCV,
+author = {Zhao, Yue and Xiong, Yuanjun and Wang, Limin and Wu, Zhirong and Tang, Xiaoou and Lin, Dahua},
+title = {Temporal Action Detection With Structured Segment Networks},
+booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)},
+month = {Oct},
+year = {2017}
+}
+```
+
+## 模型库
+
+| 配置文件 | GPU 数量 | 主干网络 | 预训练 | mAP@0.3 | mAP@0.4 | mAP@0.5 | 参考代码的 mAP@0.3 | 参考代码的 mAP@0.4 | 参考代码的 mAP@0.5 | GPU 显存占用 (M) | ckpt | log | json | 参考代码的 ckpt | 参考代码的 json |
+| :---------------------------------------------------------------------------------------: | :------: | :------: | :------: | :-----: | :-----: | :-----: | :---------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | :--------------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------: | ------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------: |
+| [ssn_r50_450e_thumos14_rgb](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) | 8 | ResNet50 | ImageNet | 29.37 | 22.15 | 15.69 | [27.61](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started) | [21.28](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started) | [14.57](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started) | 6352 | [ckpt](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/ssn_r50_450e_thumos14_rgb_20201012-1920ab16.pth) | [log](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log) | [json](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log.json) | [ckpt](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/ssn_r50_450e_thumos14_rgb_ref_20201014-b6f48f68.pth) | [json](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/20201008_103258.log.json) |
+
+注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. 由于 SSN 在训练和测试阶段使用不同的结构化时序金字塔池化方法(structured temporal pyramid pooling methods),请分别参考 [ssn_r50_450e_thumos14_rgb_train](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) 和 [ssn_r50_450e_thumos14_rgb_test](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py)。
+3. MMAction2 使用 TAG 的时序动作候选进行 SSN 模型的精度验证。关于数据准备的更多细节,用户可参考 [Data 数据集准备文档](/docs_zh_CN/data_preparation.md) 准备 thumos14 的 TAG 时序动作候选。
+4. 参考代码的 SSN 模型是和 MMAction2 一样在 `ResNet50` 主干网络上验证的。注意,这里的 SSN 的初始设置与原代码库的 `BNInception` 骨干网络的设置相同。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:在 thumos14 数据集上训练 SSN 模型。
+
+```shell
+python tools/train.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 ActivityNet 特征上测试 BMN。
+
+```shell
+# 注:如果需要进行指标验证,需确测试数据的保标注文件包含真实标签
+python tools/test.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py checkpoints/SOME_CHECKPOINT.pth --eval mAP
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/metafile.yml
new file mode 100644
index 00000000..d2b58800
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/metafile.yml
@@ -0,0 +1,30 @@
+Collections:
+- Name: SSN
+ README: configs/localization/ssn/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1704.06228
+ Title: Temporal Action Detection with Structured Segment Networks
+Models:
+- Config: configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
+ In Collection: SSN
+ Metadata:
+ Architecture: ResNet50
+ Pretrained: ImageNet
+ Training Data: THUMOS 14
+ Training Resources: 8 GPUs
+ Name: ssn_r50_450e_thumos14_rgb
+ Results:
+ - Dataset: THUMOS 14
+ Metrics:
+ mAP@0.3: 29.37
+ mAP@0.4: 22.15
+ mAP@0.5: 15.69
+ Task: Temporal Action Localization
+ Training Json Log: https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log.json
+ Training Log: https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log
+ Weights: https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/ssn_r50_450e_thumos14_rgb_20201012-1920ab16.pth
+ reference mAP@0.3: '[27.61](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started)'
+ reference mAP@0.4: '[21.28](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started)'
+ reference mAP@0.5: '[14.57](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started)'
+ reference ckpt: '[ckpt](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/ssn_r50_450e_thumos14_rgb_ref_20201014-b6f48f68.pth)'
+ reference json: '[json](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/20201008_103258.log.json)'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py
new file mode 100644
index 00000000..b9ed3979
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py
@@ -0,0 +1,109 @@
+# model training and testing settings
+train_cfg_ = dict(
+ ssn=dict(
+ assigner=dict(
+ positive_iou_threshold=0.7,
+ background_iou_threshold=0.01,
+ incomplete_iou_threshold=0.3,
+ background_coverage_threshold=0.02,
+ incomplete_overlap_threshold=0.01),
+ sampler=dict(
+ num_per_video=8,
+ positive_ratio=1,
+ background_ratio=1,
+ incomplete_ratio=6,
+ add_gt_as_proposals=True),
+ loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1),
+ debug=False))
+test_cfg_ = dict(
+ ssn=dict(
+ sampler=dict(test_interval=6, batch_size=16),
+ evaluater=dict(
+ top_k=2000,
+ nms=0.2,
+ softmax_before_filter=True,
+ cls_score_dict=None,
+ cls_top_k=2)))
+# model settings
+model = dict(
+ type='SSN',
+ backbone=dict(
+ type='ResNet',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ norm_eval=False,
+ partial_bn=True),
+ spatial_type='avg',
+ dropout_ratio=0.8,
+ cls_head=dict(
+ type='SSNHead',
+ dropout_ratio=0.,
+ in_channels=2048,
+ num_classes=20,
+ consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)),
+ use_regression=True),
+ test_cfg=test_cfg_)
+# dataset settings
+dataset_type = 'SSNDataset'
+data_root = './data/thumos14/rawframes/'
+data_root_val = './data/thumos14/rawframes/'
+ann_file_train = 'data/thumos14/thumos14_tag_val_proposal_list.txt'
+ann_file_val = 'data/thumos14/thumos14_tag_val_proposal_list.txt'
+ann_file_test = 'data/thumos14/thumos14_tag_test_proposal_list.txt'
+img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=True)
+test_pipeline = [
+ dict(
+ type='SampleProposalFrames',
+ clip_len=1,
+ body_segments=5,
+ aug_segments=(2, 2),
+ aug_ratio=0.5,
+ mode='test'),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(340, 256), keep_ratio=True),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCHW'),
+ dict(
+ type='Collect',
+ keys=[
+ 'imgs', 'relative_proposal_list', 'scale_factor_list',
+ 'proposal_tick_list', 'reg_norm_consts'
+ ],
+ meta_keys=[]),
+ dict(
+ type='ToTensor',
+ keys=[
+ 'imgs', 'relative_proposal_list', 'scale_factor_list',
+ 'proposal_tick_list', 'reg_norm_consts'
+ ])
+]
+data = dict(
+ videos_per_gpu=1,
+ workers_per_gpu=2,
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_test,
+ data_prefix=data_root,
+ train_cfg=train_cfg_,
+ test_cfg=test_cfg_,
+ aug_ratio=0.5,
+ test_mode=True,
+ pipeline=test_pipeline))
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.001, momentum=0.9,
+ weight_decay=1e-6) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[200, 400])
+checkpoint_config = dict(interval=5)
+log_config = dict(interval=5, hooks=[dict(type='TextLoggerHook')])
+# runtime settings
+total_epochs = 450
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/ssn_r50_1x5_450e_thumos14_rgb'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
new file mode 100644
index 00000000..75d927a7
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
@@ -0,0 +1,154 @@
+# model training and testing settings
+train_cfg_ = dict(
+ ssn=dict(
+ assigner=dict(
+ positive_iou_threshold=0.7,
+ background_iou_threshold=0.01,
+ incomplete_iou_threshold=0.3,
+ background_coverage_threshold=0.02,
+ incomplete_overlap_threshold=0.01),
+ sampler=dict(
+ num_per_video=8,
+ positive_ratio=1,
+ background_ratio=1,
+ incomplete_ratio=6,
+ add_gt_as_proposals=True),
+ loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1),
+ debug=False))
+test_cfg_ = dict(
+ ssn=dict(
+ sampler=dict(test_interval=6, batch_size=16),
+ evaluater=dict(
+ top_k=2000,
+ nms=0.2,
+ softmax_before_filter=True,
+ cls_score_dict=None,
+ cls_top_k=2)))
+# model settings
+model = dict(
+ type='SSN',
+ backbone=dict(
+ type='ResNet',
+ pretrained='torchvision://resnet50',
+ depth=50,
+ norm_eval=False,
+ partial_bn=True),
+ spatial_type='avg',
+ dropout_ratio=0.8,
+ loss_cls=dict(type='SSNLoss'),
+ cls_head=dict(
+ type='SSNHead',
+ dropout_ratio=0.,
+ in_channels=2048,
+ num_classes=20,
+ consensus=dict(
+ type='STPPTrain',
+ stpp_stage=(1, 1, 1),
+ num_segments_list=(2, 5, 2)),
+ use_regression=True),
+ train_cfg=train_cfg_)
+# dataset settings
+dataset_type = 'SSNDataset'
+data_root = './data/thumos14/rawframes/'
+data_root_val = './data/thumos14/rawframes/'
+ann_file_train = 'data/thumos14/thumos14_tag_val_proposal_list.txt'
+ann_file_val = 'data/thumos14/thumos14_tag_val_proposal_list.txt'
+ann_file_test = 'data/thumos14/thumos14_tag_test_proposal_list.txt'
+img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=True)
+train_pipeline = [
+ dict(
+ type='SampleProposalFrames',
+ clip_len=1,
+ body_segments=5,
+ aug_segments=(2, 2),
+ aug_ratio=0.5),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(340, 256), keep_ratio=True),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NPTCHW'),
+ dict(
+ type='Collect',
+ keys=[
+ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels',
+ 'proposal_type'
+ ],
+ meta_keys=[]),
+ dict(
+ type='ToTensor',
+ keys=[
+ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels',
+ 'proposal_type'
+ ])
+]
+val_pipeline = [
+ dict(
+ type='SampleProposalFrames',
+ clip_len=1,
+ body_segments=5,
+ aug_segments=(2, 2),
+ aug_ratio=0.5),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(340, 256), keep_ratio=True),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NPTCHW'),
+ dict(
+ type='Collect',
+ keys=[
+ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels',
+ 'proposal_type'
+ ],
+ meta_keys=[]),
+ dict(
+ type='ToTensor',
+ keys=[
+ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels',
+ 'proposal_type'
+ ])
+]
+data = dict(
+ videos_per_gpu=1,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ train_cfg=train_cfg_,
+ test_cfg=test_cfg_,
+ body_segments=5,
+ aug_segments=(2, 2),
+ aug_ratio=0.5,
+ test_mode=False,
+ verbose=True,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root,
+ train_cfg=train_cfg_,
+ test_cfg=test_cfg_,
+ body_segments=5,
+ aug_segments=(2, 2),
+ aug_ratio=0.5,
+ test_mode=False,
+ pipeline=val_pipeline))
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.001, momentum=0.9,
+ weight_decay=1e-6) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[200, 400])
+checkpoint_config = dict(interval=5)
+log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')])
+# runtime settings
+total_epochs = 450
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/ssn_r50_1x5_450e_thumos14_rgb'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+find_unused_parameters = True
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README.md
new file mode 100644
index 00000000..859890c1
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README.md
@@ -0,0 +1,87 @@
+# C3D
+
+[Learning Spatiotemporal Features with 3D Convolutional Networks](https://openaccess.thecvf.com/content_iccv_2015/html/Tran_Learning_Spatiotemporal_Features_ICCV_2015_paper.html)
+
+
+
+## Abstract
+
+
+
+We propose a simple, yet effective approach for spatiotemporal feature learning using deep 3-dimensional convolutional networks (3D ConvNets) trained on a large scale supervised video dataset. Our findings are three-fold: 1) 3D ConvNets are more suitable for spatiotemporal feature learning compared to 2D ConvNets; 2) A homogeneous architecture with small 3x3x3 convolution kernels in all layers is among the best performing architectures for 3D ConvNets; and 3) Our learned features, namely C3D (Convolutional 3D), with a simple linear classifier outperform state-of-the-art methods on 4 different benchmarks and are comparable with current best methods on the other 2 benchmarks. In addition, the features are compact: achieving 52.8% accuracy on UCF101 dataset with only 10 dimensions and also very efficient to compute due to the fast inference of ConvNets. Finally, they are conceptually very simple and easy to train and use.
+
+
+
+
+

+
+
+## Results and Models
+
+### UCF-101
+
+| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference_time(video/s) | gpu_mem(M) | ckpt | log | json |
+| :------------------------------------------------------------------------------------------------------ | :--------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: |
+| [c3d_sports1m_16x1x1_45e_ucf101_rgb.py](/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py) | 128x171 | 8 | c3d | sports1m | 83.27 | 95.90 | 10 clips x 1 crop | x | 6053 | [ckpt](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/c3d_sports1m_16x1x1_45e_ucf101_rgb_20201021-26655025.pth) | [log](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log) | [json](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log.json) |
+
+:::{note}
+
+1. The author of C3D normalized UCF-101 with volume mean and used SVM to classify videos, while we normalized the dataset with RGB mean value and used a linear classifier.
+2. The **gpus** indicates the number of gpu (32G V100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+3. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time,
+ not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time.
+
+:::
+
+For more details on data preparation, you can refer to UCF-101 in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train C3D model on UCF-101 dataset in a deterministic option with periodic validation.
+
+```shell
+python tools/train.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py \
+ --validate --seed 0 --deterministic
+```
+
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test C3D model on UCF-101 dataset and dump the result to a json file.
+
+```shell
+python tools/test.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy
+```
+
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
+
+## Citation
+
+
+
+```BibTeX
+@ARTICLE{2014arXiv1412.0767T,
+author = {Tran, Du and Bourdev, Lubomir and Fergus, Rob and Torresani, Lorenzo and Paluri, Manohar},
+title = {Learning Spatiotemporal Features with 3D Convolutional Networks},
+keywords = {Computer Science - Computer Vision and Pattern Recognition},
+year = 2014,
+month = dec,
+eid = {arXiv:1412.0767}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README_zh-CN.md
new file mode 100644
index 00000000..3344f7d0
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/README_zh-CN.md
@@ -0,0 +1,69 @@
+# C3D
+
+## 简介
+
+
+
+```BibTeX
+@ARTICLE{2014arXiv1412.0767T,
+author = {Tran, Du and Bourdev, Lubomir and Fergus, Rob and Torresani, Lorenzo and Paluri, Manohar},
+title = {Learning Spatiotemporal Features with 3D Convolutional Networks},
+keywords = {Computer Science - Computer Vision and Pattern Recognition},
+year = 2014,
+month = dec,
+eid = {arXiv:1412.0767}
+}
+```
+
+## 模型库
+
+### UCF-101
+
+| 配置文件 | 分辨率 | GPU 数量 | 主干网络 | 预训练 | top1 准确率 | top5 准确率 | 测试方案 | 推理时间 (video/s) | GPU 显存占用 (M) | ckpt | log | json |
+| :------------------------------------------------------------------------------------------------------ | :-----: | :------: | :------: | :------: | :---------: | :---------: | :---------------: | :----------------: | :--------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: |
+| [c3d_sports1m_16x1x1_45e_ucf101_rgb.py](/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py) | 128x171 | 8 | c3d | sports1m | 83.27 | 95.90 | 10 clips x 1 crop | x | 6053 | [ckpt](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/c3d_sports1m_16x1x1_45e_ucf101_rgb_20201021-26655025.pth) | [log](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log) | [json](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log.json) |
+
+注:
+
+1. C3D 的原论文使用 UCF-101 的数据均值进行数据正则化,并且使用 SVM 进行视频分类。MMAction2 使用 ImageNet 的 RGB 均值进行数据正则化,并且使用线性分类器。
+2. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+3. 这里的 **推理时间** 是根据 [基准测试脚本](/tools/analysis/benchmark.py) 获得的,采用测试时的采帧策略,且只考虑模型的推理时间,
+ 并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
+
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 UCF-101 部分。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:以一个确定性的训练方式,辅以定期的验证过程进行 C3D 模型在 UCF-101 数据集上的训练。
+
+```shell
+python tools/train.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py \
+ --validate --seed 0 --deterministic
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 UCF-101 数据集上测试 C3D 模型,并将结果导出为一个 json 文件。
+
+```shell
+python tools/test.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py
new file mode 100644
index 00000000..cd96fca8
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py
@@ -0,0 +1,95 @@
+_base_ = '../../_base_/models/c3d_sports1m_pretrained.py'
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/ucf101/rawframes'
+data_root_val = 'data/ucf101/rawframes'
+split = 1 # official train/test splits. valid numbers: 1, 2, 3
+ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt'
+ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt'
+ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt'
+img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=16, frame_interval=1, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(128, 171)),
+ dict(type='RandomCrop', size=112),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=16,
+ frame_interval=1,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(128, 171)),
+ dict(type='CenterCrop', crop_size=112),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=16,
+ frame_interval=1,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(128, 171)),
+ dict(type='CenterCrop', crop_size=112),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+data = dict(
+ videos_per_gpu=30,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_test,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.001, momentum=0.9,
+ weight_decay=0.0005) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[20, 40])
+total_epochs = 45
+checkpoint_config = dict(interval=5)
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+log_config = dict(
+ interval=20,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook'),
+ ])
+# runtime settings
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = f'./work_dirs/c3d_sports1m_16x1x1_45e_ucf101_split_{split}_rgb/'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/metafile.yml
new file mode 100644
index 00000000..f3e7ec9a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/c3d/metafile.yml
@@ -0,0 +1,30 @@
+Collections:
+- Name: C3D
+ README: configs/recognition/c3d/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1412.0767
+ Title: Learning Spatiotemporal Features with 3D Convolutional Networks
+Models:
+- Config: configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py
+ In Collection: C3D
+ Metadata:
+ Architecture: c3d
+ Batch Size: 30
+ Epochs: 45
+ FLOPs: 38615475200
+ Parameters: 78409573
+ Pretrained: sports1m
+ Resolution: 128x171
+ Training Data: UCF101
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: c3d_sports1m_16x1x1_45e_ucf101_rgb
+ Results:
+ - Dataset: UCF101
+ Metrics:
+ Top 1 Accuracy: 83.27
+ Top 5 Accuracy: 95.9
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/c3d_sports1m_16x1x1_45e_ucf101_rgb_20201021-26655025.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README.md
new file mode 100644
index 00000000..5fa387e5
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README.md
@@ -0,0 +1,108 @@
+# CSN
+
+[Video Classification With Channel-Separated Convolutional Networks](https://openaccess.thecvf.com/content_ICCV_2019/html/Tran_Video_Classification_With_Channel-Separated_Convolutional_Networks_ICCV_2019_paper.html)
+
+
+
+## Abstract
+
+
+
+Group convolution has been shown to offer great computational savings in various 2D convolutional architectures for image classification. It is natural to ask: 1) if group convolution can help to alleviate the high computational cost of video classification networks; 2) what factors matter the most in 3D group convolutional networks; and 3) what are good computation/accuracy trade-offs with 3D group convolutional networks. This paper studies the effects of different design choices in 3D group convolutional networks for video classification. We empirically demonstrate that the amount of channel interactions plays an important role in the accuracy of 3D group convolutional networks. Our experiments suggest two main findings. First, it is a good practice to factorize 3D convolutions by separating channel interactions and spatiotemporal interactions as this leads to improved accuracy and lower computational cost. Second, 3D channel-separated convolutions provide a form of regularization, yielding lower training accuracy but higher test accuracy compared to 3D convolutions. These two empirical findings lead us to design an architecture -- Channel-Separated Convolutional Network (CSN) -- which is simple, efficient, yet accurate. On Sports1M, Kinetics, and Something-Something, our CSNs are comparable with or better than the state-of-the-art while being 2-3 times more efficient.
+
+
+
+
+

+
+
+## Results and Models
+
+### Kinetics-400
+
+| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log | json |
+| :------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------: | :--: | :-------: | :------: | :--------: | :--------: | :---------------------: | :--------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb](/configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py) | short-side 320 | x | ResNet50 | None | 73.6 | 91.3 | x | x | [ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb_20210618-4e29e2e8.pth) | [log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/20210618_182414.log) | [json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/20210618_182414.log.json) |
+| [ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py) | short-side 320 | x | ResNet50 | IG65M | 79.0 | 94.2 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_ig65m_pretrained_r50_32x2x1_58e_kinetics400_rgb_20210617-86d33018.pth) | x | x |
+| [ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb](/configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py) | short-side 320 | x | ResNet152 | None | 76.5 | 92.1 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-5c933ae1.pth) | x | x |
+| [ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | short-side 320 | x | ResNet152 | Sports1M | 78.2 | 93.0 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-b9b10241.pth) | x | x |
+| [ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py](/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | short-side 320 | 8x4 | ResNet152 | IG65M | 82.76/82.6 | 95.68/95.3 | x | 8516 | [ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth)/[infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-e63ee1bd.pth) | [log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log) | [json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log.json) |
+| [ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb](/configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py) | short-side 320 | x | ResNet152 | None | 77.8 | 92.8 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-d565828d.pth) | x | x |
+| [ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | short-side 320 | x | ResNet152 | Sports1M | 78.8 | 93.5 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-3367437a.pth) | x | x |
+| [ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | short-side 320 | x | ResNet152 | IG65M | 82.5 | 95.3 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-c3be9793.pth) | x | x |
+| [ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py](/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py) | short-side 320 | 8x4 | ResNet152 | IG65M | 80.14 | 94.93 | x | 8517 | [ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20200803-fc66ce8d.pth) | [log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log) | [json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log.json) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu (32G V100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time,
+ not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time.
+3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available.
+4. The **infer_ckpt** means those checkpoints are ported from [VMZ](https://github.com/facebookresearch/VMZ).
+
+:::
+
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train CSN model on Kinetics-400 dataset in a deterministic option with periodic validation.
+
+```shell
+python tools/train.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py \
+ --work-dir work_dirs/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb \
+ --validate --seed 0 --deterministic
+```
+
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test CSN model on Kinetics-400 dataset and dump the result to a json file.
+
+```shell
+python tools/test.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \
+ --out result.json --average-clips prob
+```
+
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
+
+## Citation
+
+```BibTeX
+@inproceedings{inproceedings,
+author = {Wang, Heng and Feiszli, Matt and Torresani, Lorenzo},
+year = {2019},
+month = {10},
+pages = {5551-5560},
+title = {Video Classification With Channel-Separated Convolutional Networks},
+doi = {10.1109/ICCV.2019.00565}
+}
+```
+
+
+
+```BibTeX
+@inproceedings{ghadiyaram2019large,
+ title={Large-scale weakly-supervised pre-training for video action recognition},
+ author={Ghadiyaram, Deepti and Tran, Du and Mahajan, Dhruv},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={12046--12055},
+ year={2019}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README_zh-CN.md
new file mode 100644
index 00000000..24f964db
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/README_zh-CN.md
@@ -0,0 +1,92 @@
+# CSN
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{inproceedings,
+author = {Wang, Heng and Feiszli, Matt and Torresani, Lorenzo},
+year = {2019},
+month = {10},
+pages = {5551-5560},
+title = {Video Classification With Channel-Separated Convolutional Networks},
+doi = {10.1109/ICCV.2019.00565}
+}
+```
+
+
+
+```BibTeX
+@inproceedings{ghadiyaram2019large,
+ title={Large-scale weakly-supervised pre-training for video action recognition},
+ author={Ghadiyaram, Deepti and Tran, Du and Mahajan, Dhruv},
+ booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+ pages={12046--12055},
+ year={2019}
+}
+```
+
+## 模型库
+
+### Kinetics-400
+
+| 配置文件 | 分辨率 | GPU 数量 | 主干网络 | 预训练 | top1 准确率 | top5 准确率 | 推理时间 (video/s) | GPU 显存占用 (M) | ckpt | log | json |
+| :------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------: | :------: | :-------: | :------: | :---------: | :---------: | :----------------: | :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb](/configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py) | 短边 320 | x | ResNet50 | None | 73.6 | 91.3 | x | x | [ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb_20210618-4e29e2e8.pth) | [log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/20210618_182414.log) | [json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/20210618_182414.log.json) |
+| [ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py) | 短边 320 | x | ResNet50 | IG65M | 79.0 | 94.2 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_ig65m_pretrained_r50_32x2x1_58e_kinetics400_rgb_20210617-86d33018.pth) | x | x |
+| [ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb](/configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py) | 短边 320 | x | ResNet152 | None | 76.5 | 92.1 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-5c933ae1.pth) | x | x |
+| [ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | 短边 320 | x | ResNet152 | Sports1M | 78.2 | 93.0 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-b9b10241.pth) | x | x |
+| [ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py](/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | 短边 320 | 8x4 | ResNet152 | IG65M | 82.76/82.6 | 95.68/95.3 | x | 8516 | [ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth)/[infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-e63ee1bd.pth) | [log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log) | [json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log.json) |
+| [ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb](/configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py) | 短边 320 | x | ResNet152 | None | 77.8 | 92.8 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-d565828d.pth) | x | x |
+| [ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | 短边 320 | x | ResNet152 | Sports1M | 78.8 | 93.5 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-3367437a.pth) | x | x |
+| [ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb](/configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py) | 短边 320 | x | ResNet152 | IG65M | 82.5 | 95.3 | x | x | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-c3be9793.pth) | x | x |
+| [ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py](/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py) | 短边 320 | 8x4 | ResNet152 | IG65M | 80.14 | 94.93 | x | 8517 | [ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20200803-fc66ce8d.pth) | [log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log) | [json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log.json) |
+
+注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. 这里的 **推理时间** 是根据 [基准测试脚本](/tools/analysis/benchmark.py) 获得的,采用测试时的采帧策略,且只考虑模型的推理时间,
+ 并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
+3. 这里使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
+4. 这里的 **infer_ckpt** 表示该模型权重文件是从 [VMZ](https://github.com/facebookresearch/VMZ) 导入的。
+
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:以一个确定性的训练方式,辅以定期的验证过程进行 CSN 模型在 Kinetics400 数据集上的训练。
+
+```shell
+python tools/train.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py \
+ --work-dir work_dirs/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb \
+ --validate --seed 0 --deterministic
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 Kinetics400 数据集上测试 CSN 模型,并将结果导出为一个 json 文件。
+
+```shell
+python tools/test.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \
+ --out result.json --average-clips prob
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
new file mode 100644
index 00000000..7cd96b72
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
@@ -0,0 +1,95 @@
+_base_ = [
+ './ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ norm_eval=True, bn_frozen=True, bottleneck_mode='ip', pretrained=None))
+
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[110.2008, 100.63983, 95.99475],
+ std=[58.14765, 56.46975, 55.332195],
+ to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=4,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+optimizer = dict(
+ type='SGD', lr=0.08, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=40)
+total_epochs = 180
+
+work_dir = './work_dirs/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
new file mode 100644
index 00000000..7aed801a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
@@ -0,0 +1,15 @@
+_base_ = [
+ './ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ norm_eval=True,
+ bn_frozen=True,
+ bottleneck_mode='ip',
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/csn/ipcsn_from_scratch_r152_ig65m_20210617-c4b99d38.pth' # noqa: E501
+ ))
+
+work_dir = './work_dirs/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
new file mode 100644
index 00000000..fc5372a8
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
@@ -0,0 +1,88 @@
+_base_ = [
+ './ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ norm_eval=True,
+ bn_frozen=True,
+ bottleneck_mode='ip',
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/csn/ipcsn_from_scratch_r152_sports1m_20210617-7a7cc5b9.pth' # noqa: E501
+ ))
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[110.2008, 100.63983, 95.99475],
+ std=[58.14765, 56.46975, 55.332195],
+ to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=3,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+work_dir = './work_dirs/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
new file mode 100644
index 00000000..777b2c0c
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
@@ -0,0 +1,95 @@
+_base_ = [
+ './ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ norm_eval=True, bn_frozen=True, bottleneck_mode='ir', pretrained=None))
+
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[110.2008, 100.63983, 95.99475],
+ std=[58.14765, 56.46975, 55.332195],
+ to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=4,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+optimizer = dict(
+ type='SGD', lr=0.08, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=40)
+total_epochs = 180
+
+work_dir = './work_dirs/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py
new file mode 100644
index 00000000..cef9d5de
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py
@@ -0,0 +1,97 @@
+_base_ = [
+ './ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ depth=50,
+ norm_eval=True,
+ bn_frozen=True,
+ bottleneck_mode='ir',
+ pretrained=None))
+
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=4,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+optimizer = dict(
+ type='SGD', lr=0.08, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_by_epoch=True,
+ warmup_iters=40)
+total_epochs = 180
+
+work_dir = './work_dirs/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
new file mode 100644
index 00000000..54bc5b01
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
@@ -0,0 +1,102 @@
+_base_ = [
+ '../../_base_/models/ircsn_r152.py', '../../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ norm_eval=True,
+ bn_frozen=True,
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth' # noqa: E501
+ ))
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=3,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.000125, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[32, 48],
+ warmup='linear',
+ warmup_ratio=0.1,
+ warmup_by_epoch=True,
+ warmup_iters=16)
+total_epochs = 58
+
+work_dir = './work_dirs/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb' # noqa: E501
+find_unused_parameters = True
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py
new file mode 100644
index 00000000..fc44dc42
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py
@@ -0,0 +1,103 @@
+_base_ = [
+ '../../_base_/models/ircsn_r152.py', '../../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ depth=50,
+ norm_eval=True,
+ bn_frozen=True,
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r50_ig65m_20210617-ce545a37.pth' # noqa: E501
+ ))
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=3,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.000125, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[32, 48],
+ warmup='linear',
+ warmup_ratio=0.1,
+ warmup_by_epoch=True,
+ warmup_iters=16)
+total_epochs = 58
+
+work_dir = './work_dirs/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb' # noqa: E501
+find_unused_parameters = True
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py
new file mode 100644
index 00000000..015526cc
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py
@@ -0,0 +1,100 @@
+_base_ = [
+ '../../_base_/models/ircsn_r152.py', '../../_base_/default_runtime.py'
+]
+
+model = dict(
+ backbone=dict(
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth' # noqa: E501
+ ))
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=3,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.000125, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[32, 48],
+ warmup='linear',
+ warmup_ratio=0.1,
+ warmup_by_epoch=True,
+ warmup_iters=16)
+total_epochs = 58
+
+work_dir = './work_dirs/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb'
+find_unused_parameters = True
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
new file mode 100644
index 00000000..b4601839
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
@@ -0,0 +1,88 @@
+_base_ = [
+ './ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ norm_eval=True,
+ bn_frozen=True,
+ bottleneck_mode='ir',
+ pretrained= # noqa: E251
+ 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_sports1m_20210617-bcc9c0dd.pth' # noqa: E501
+ ))
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[110.2008, 100.63983, 95.99475],
+ std=[58.14765, 56.46975, 55.332195],
+ to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='RandomResizedCrop'),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=3,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+work_dir = './work_dirs/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/metafile.yml
new file mode 100644
index 00000000..408e1194
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/csn/metafile.yml
@@ -0,0 +1,204 @@
+Collections:
+- Name: CSN
+ README: configs/recognition/csn/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1904.02811
+ Title: Video Classification with Channel-Separated Convolutional Networks
+Models:
+- Config: configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Batch Size: 3
+ Epochs: 58
+ FLOPs: 98096676864
+ Parameters: 29703568
+ Pretrained: IG65M
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Training Resources: 32 GPUs
+ Modality: RGB
+ Name: ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 80.14
+ Top 5 Accuracy: 94.93
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20200803-fc66ce8d.pth
+- Config: configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Batch Size: 3
+ Epochs: 58
+ FLOPs: 98096676864
+ Parameters: 29703568
+ Pretrained: IG65M
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Training Resources: 32 GPUs
+ Modality: RGB
+ Name: ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 82.76
+ Top 5 Accuracy: 95.68
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth
+- Config: configs/recognition/csn/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Epochs: 180
+ FLOPs: 110337228800
+ Parameters: 33016592
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb
+ Converted From:
+ Weights: https://www.dropbox.com/s/3fihu6ti60047mu/ipCSN_152_kinetics_from_scratch_f129594342.pkl?dl=0
+ Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 77.8
+ Top 5 Accuracy: 92.8
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-d565828d.pth
+- Config: configs/recognition/csn/ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Epochs: 58
+ FLOPs: 110337228800
+ Parameters: 33016592
+ Pretrained: IG65M
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ipcsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb
+ Converted From:
+ Weights: https://www.dropbox.com/s/zpp3p0vn2i7bibl/ipCSN_152_ft_kinetics_from_ig65m_f133090949.pkl?dl=0
+ Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 82.5
+ Top 5 Accuracy: 95.3
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-c3be9793.pth
+- Config: configs/recognition/csn/ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Epochs: 58
+ FLOPs: 110337228800
+ Parameters: 33016592
+ Pretrained: Sports1M
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ipcsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb
+ Converted From:
+ Weights: https://www.dropbox.com/s/ir7cr0hda36knux/ipCSN_152_ft_kinetics_from_sports1m_f111279053.pkl?dl=0
+ Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 78.8
+ Top 5 Accuracy: 93.5
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-3367437a.pth
+- Config: configs/recognition/csn/ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Epochs: 180
+ FLOPs: 98096676864
+ Parameters: 29703568
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ircsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb
+ Converted From:
+ Weights: https://www.dropbox.com/s/46gcm7up60ssx5c/irCSN_152_kinetics_from_scratch_f98268019.pkl?dl=0
+ Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 76.5
+ Top 5 Accuracy: 92.1
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-5c933ae1.pth
+- Config: configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet50
+ Epochs: 58
+ FLOPs: 56209211392
+ Parameters: 13131152
+ Pretrained: IG65M
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ircsn_ig65m_pretrained_bnfrozen_r50_32x2x1_58e_kinetics400_rgb
+ Converted From:
+ Weights: https://www.dropbox.com/s/gmd8r87l3wmkn3h/irCSN_152_ft_kinetics_from_ig65m_f126851907.pkl?dl=0
+ Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 79.0
+ Top 5 Accuracy: 94.2
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_ig65m_pretrained_r50_32x2x1_58e_kinetics400_rgb_20210617-86d33018.pth
+- Config: configs/recognition/csn/ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet152
+ Epochs: 58
+ FLOPs: 98096676864
+ Parameters: 29703568
+ Pretrained: Sports1M
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ircsn_sports1m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb
+ Converted From:
+ Weights: https://www.dropbox.com/s/zuoj1aqouh6bo6k/irCSN_152_ft_kinetics_from_sports1m_f101599884.pkl?dl=0
+ Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 78.2
+ Top 5 Accuracy: 93.0
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-b9b10241.pth
+- Config: configs/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb.py
+ In Collection: CSN
+ Metadata:
+ Architecture: ResNet50
+ Epochs: 58
+ FLOPs: 56209211392
+ Parameters: 13131152
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.6
+ top5 accuracy: 91.3
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/csn/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb/ircsn_bnfrozen_r50_32x2x1_180e_kinetics400_rgb_20210618-4e29e2e8.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README.md
new file mode 100644
index 00000000..37fee079
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README.md
@@ -0,0 +1,108 @@
+# I3D
+
+[Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset](https://openaccess.thecvf.com/content_cvpr_2017/html/Carreira_Quo_Vadis_Action_CVPR_2017_paper.html)
+
+[Non-local Neural Networks](https://openaccess.thecvf.com/content_cvpr_2018/html/Wang_Non-Local_Neural_Networks_CVPR_2018_paper.html)
+
+
+
+## Abstract
+
+
+
+The paucity of videos in current action classification datasets (UCF-101 and HMDB-51) has made it difficult to identify good video architectures, as most methods obtain similar performance on existing small-scale benchmarks. This paper re-evaluates state-of-the-art architectures in light of the new Kinetics Human Action Video dataset. Kinetics has two orders of magnitude more data, with 400 human action classes and over 400 clips per class, and is collected from realistic, challenging YouTube videos. We provide an analysis on how current architectures fare on the task of action classification on this dataset and how much performance improves on the smaller benchmark datasets after pre-training on Kinetics. We also introduce a new Two-Stream Inflated 3D ConvNet (I3D) that is based on 2D ConvNet inflation: filters and pooling kernels of very deep image classification ConvNets are expanded into 3D, making it possible to learn seamless spatio-temporal feature extractors from video while leveraging successful ImageNet architecture designs and even their parameters. We show that, after pre-training on Kinetics, I3D models considerably improve upon the state-of-the-art in action classification, reaching 80.9% on HMDB-51 and 98.0% on UCF-101.
+
+
+
+
+

+
+
+## Results and Models
+
+### Kinetics-400
+
+| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log | json |
+| :----------------------------------------------------------------------------------------------------------------------------------------------- | :-------------: | :--: | :------: | :------: | :------: | :------: | :---------------------: | :--------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: |
+| [i3d_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py) | 340x256 | 8 | ResNet50 | ImageNet | 72.68 | 90.78 | 1.7 (320x3 frames) | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/i3d_r50_32x2x1_100e_kinetics400_rgb_20200614-c25ef9a4.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log.json) |
+| [i3d_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py) | short-side 256 | 8 | ResNet50 | ImageNet | 73.27 | 90.92 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_256p_32x2x1_100e_kinetics400_rgb_20200801-7d9f44de.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log.json) |
+| [i3d_r50_video_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py) | short-side 256p | 8 | ResNet50 | ImageNet | 72.85 | 90.75 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/i3d_r50_video_32x2x1_100e_kinetics400_rgb_20200826-e31c6f52.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log.json) |
+| [i3d_r50_dense_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py) | 340x256 | 8x2 | ResNet50 | ImageNet | 72.77 | 90.57 | 1.7 (320x3 frames) | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_32x2x1_100e_kinetics400_rgb_20200616-2bbb4361.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log.json) |
+| [i3d_r50_dense_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py) | short-side 256 | 8 | ResNet50 | ImageNet | 73.48 | 91.00 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log.json) |
+| [i3d_r50_lazy_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py) | 340x256 | 8 | ResNet50 | ImageNet | 72.32 | 90.72 | 1.8 (320x3 frames) | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_32x2x1_100e_kinetics400_rgb_20200612-000e4d2a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log.json) |
+| [i3d_r50_lazy_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py) | short-side 256 | 8 | ResNet50 | ImageNet | 73.24 | 90.99 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb_20200817-4e90d1d5.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log.json) |
+| [i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py) | short-side 256p | 8x4 | ResNet50 | ImageNet | 74.71 | 91.81 | x | 6438 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200813-6e6aef1b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log.json) |
+| [i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py) | short-side 256p | 8x4 | ResNet50 | ImageNet | 73.37 | 91.26 | x | 4944 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200815-17f84aa2.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log.json) |
+| [i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py) | short-side 256p | 8x4 | ResNet50 | ImageNet | 73.92 | 91.59 | x | 4832 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb_20200814-7c30d5bb.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log.json) |
+
+:::{note}
+
+1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default.
+ According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
+ e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
+2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time.
+3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available.
+
+:::
+
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
+
+## Train
+
+You can use the following command to train a model.
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+Example: train I3D model on Kinetics-400 dataset in a deterministic option with periodic validation.
+
+```shell
+python tools/train.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py \
+ --work-dir work_dirs/i3d_r50_32x2x1_100e_kinetics400_rgb \
+ --validate --seed 0 --deterministic
+```
+
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
+
+## Test
+
+You can use the following command to test a model.
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+Example: test I3D model on Kinetics-400 dataset and dump the result to a json file.
+
+```shell
+python tools/test.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \
+ --out result.json --average-clips prob
+```
+
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
+
+## Citation
+
+```BibTeX
+@inproceedings{inproceedings,
+ author = {Carreira, J. and Zisserman, Andrew},
+ year = {2017},
+ month = {07},
+ pages = {4724-4733},
+ title = {Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset},
+ doi = {10.1109/CVPR.2017.502}
+}
+```
+
+
+
+```BibTeX
+@article{NonLocal2018,
+ author = {Xiaolong Wang and Ross Girshick and Abhinav Gupta and Kaiming He},
+ title = {Non-local Neural Networks},
+ journal = {CVPR},
+ year = {2018}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README_zh-CN.md
new file mode 100644
index 00000000..c04a7e50
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/README_zh-CN.md
@@ -0,0 +1,91 @@
+# I3D
+
+## 简介
+
+
+
+```BibTeX
+@inproceedings{inproceedings,
+ author = {Carreira, J. and Zisserman, Andrew},
+ year = {2017},
+ month = {07},
+ pages = {4724-4733},
+ title = {Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset},
+ doi = {10.1109/CVPR.2017.502}
+}
+```
+
+
+
+```BibTeX
+@article{NonLocal2018,
+ author = {Xiaolong Wang and Ross Girshick and Abhinav Gupta and Kaiming He},
+ title = {Non-local Neural Networks},
+ journal = {CVPR},
+ year = {2018}
+}
+```
+
+## 模型库
+
+### Kinetics-400
+
+| 配置文件 | 分辨率 | GPU 数量 | 主干网络 | 预训练 | top1 准确率 | top5 准确率 | 推理时间 (video/s) | GPU 显存占用 (M) | ckpt | log | json |
+| :----------------------------------------------------------------------------------------------------------------------------------------------- | :-------: | :------: | :------: | :------: | :---------: | :---------: | :----------------: | :--------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: |
+| [i3d_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py) | 340x256 | 8 | ResNet50 | ImageNet | 72.68 | 90.78 | 1.7 (320x3 frames) | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/i3d_r50_32x2x1_100e_kinetics400_rgb_20200614-c25ef9a4.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log.json) |
+| [i3d_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py) | 短边 256 | 8 | ResNet50 | ImageNet | 73.27 | 90.92 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_256p_32x2x1_100e_kinetics400_rgb_20200801-7d9f44de.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log.json) |
+| [i3d_r50_video_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py) | 短边 256p | 8 | ResNet50 | ImageNet | 72.85 | 90.75 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/i3d_r50_video_32x2x1_100e_kinetics400_rgb_20200826-e31c6f52.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log.json) |
+| [i3d_r50_dense_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py) | 340x256 | 8x2 | ResNet50 | ImageNet | 72.77 | 90.57 | 1.7 (320x3 frames) | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_32x2x1_100e_kinetics400_rgb_20200616-2bbb4361.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log.json) |
+| [i3d_r50_dense_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py) | 短边 256 | 8 | ResNet50 | ImageNet | 73.48 | 91.00 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log.json) |
+| [i3d_r50_lazy_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py) | 340x256 | 8 | ResNet50 | ImageNet | 72.32 | 90.72 | 1.8 (320x3 frames) | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_32x2x1_100e_kinetics400_rgb_20200612-000e4d2a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log.json) |
+| [i3d_r50_lazy_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py) | 短边 256 | 8 | ResNet50 | ImageNet | 73.24 | 90.99 | x | 5170 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb_20200817-4e90d1d5.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log.json) |
+| [i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py) | 短边 256p | 8x4 | ResNet50 | ImageNet | 74.71 | 91.81 | x | 6438 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200813-6e6aef1b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log.json) |
+| [i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py) | 短边 256p | 8x4 | ResNet50 | ImageNet | 73.37 | 91.26 | x | 4944 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200815-17f84aa2.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log.json) |
+| [i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py) | 短边 256p | 8x4 | ResNet50 | ImageNet | 73.92 | 91.59 | x | 4832 | [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb_20200814-7c30d5bb.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log.json) |
+
+注:
+
+1. 这里的 **GPU 数量** 指的是得到模型权重文件对应的 GPU 个数。默认地,MMAction2 所提供的配置文件对应使用 8 块 GPU 进行训练的情况。
+ 依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
+ 如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
+2. 这里的 **推理时间** 是根据 [基准测试脚本](/tools/analysis/benchmark.py) 获得的,采用测试时的采帧策略,且只考虑模型的推理时间,
+ 并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
+3. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
+
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
+
+## 如何训练
+
+用户可以使用以下指令进行模型训练。
+
+```shell
+python tools/train.py ${CONFIG_FILE} [optional arguments]
+```
+
+例如:以一个确定性的训练方式,辅以定期的验证过程进行 I3D 模型在 Kinetics400 数据集上的训练。
+
+```shell
+python tools/train.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py \
+ --work-dir work_dirs/i3d_r50_32x2x1_100e_kinetics400_rgb \
+ --validate --seed 0 --deterministic
+```
+
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE) 中的 **训练配置** 部分。
+
+## 如何测试
+
+用户可以使用以下指令进行模型测试。
+
+```shell
+python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
+```
+
+例如:在 Kinetics400 数据集上测试 I3D 模型,并将结果导出为一个 json 文件。
+
+```shell
+python tools/test.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py \
+ checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \
+ --out result.json --average-clips prob
+```
+
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#%E6%B5%8B%E8%AF%95%E6%9F%90%E4%B8%AA%E6%95%B0%E6%8D%AE%E9%9B%86) 中的 **测试某个数据集** 部分。
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..46628500
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,96 @@
+_base_ = [
+ '../../_base_/models/i3d_r50.py', '../../_base_/schedules/sgd_100e.py',
+ '../../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(
+ backbone=dict(
+ non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)),
+ non_local_cfg=dict(
+ sub_sample=True,
+ use_scale=False,
+ norm_cfg=dict(type='BN3d', requires_grad=True),
+ mode='dot_product')))
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+
+# runtime settings
+checkpoint_config = dict(interval=5)
+work_dir = './work_dirs/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..969e42a0
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,13 @@
+_base_ = ['./i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py']
+
+# model settings
+model = dict(
+ backbone=dict(
+ non_local_cfg=dict(
+ sub_sample=True,
+ use_scale=False,
+ norm_cfg=dict(type='BN3d', requires_grad=True),
+ mode='embedded_gaussian')))
+
+# runtime settings
+work_dir = './work_dirs/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/' # noqa: E501
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..f2377587
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,13 @@
+_base_ = ['./i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py']
+
+# model settings
+model = dict(
+ backbone=dict(
+ non_local_cfg=dict(
+ sub_sample=True,
+ use_scale=False,
+ norm_cfg=dict(type='BN3d', requires_grad=True),
+ mode='gaussian')))
+
+# runtime settings
+work_dir = './work_dirs/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..aa0e523f
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,86 @@
+_base_ = [
+ '../../_base_/models/i3d_r50.py', '../../_base_/schedules/sgd_100e.py',
+ '../../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+
+# runtime settings
+checkpoint_config = dict(interval=5)
+work_dir = './work_dirs/i3d_r50_32x2x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..17ea4303
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,80 @@
+_base_ = ['./i3d_r50_32x2x1_100e_kinetics400_rgb.py']
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='DenseSampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='DenseSampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+# runtime settings
+work_dir = './work_dirs/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..f21feb2a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py
@@ -0,0 +1,88 @@
+_base_ = ['./i3d_r50_32x2x1_100e_kinetics400_rgb.py']
+
+# model settings
+model = dict(
+ backbone=dict(
+ inflate=(1, 1, 1, 1),
+ conv1_stride_t=1,
+ pool1_stride_t=1,
+ with_pool2=True))
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=8,
+ frame_interval=8,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=8,
+ frame_interval=8,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+# runtime settings
+work_dir = './work_dirs/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..de84b8fe
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,84 @@
+_base_ = ['./i3d_r50_32x2x1_100e_kinetics400_rgb.py']
+
+# dataset settings
+dataset_type = 'RawframeDataset'
+data_root = 'data/kinetics400/rawframes_train'
+data_root_val = 'data/kinetics400/rawframes_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='RawFrameDecode', decoding_backend='turbojpeg'),
+ dict(type='Resize', scale=(-1, 256), lazy=True),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0,
+ lazy=True),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False, lazy=True),
+ dict(type='Flip', flip_ratio=0.5, lazy=True),
+ dict(type='Fuse'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='RawFrameDecode', decoding_backend='turbojpeg'),
+ dict(type='Resize', scale=(-1, 256), lazy=True),
+ dict(type='CenterCrop', crop_size=224, lazy=True),
+ dict(type='Flip', flip_ratio=0, lazy=True),
+ dict(type='Fuse'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='RawFrameDecode', decoding_backend='turbojpeg'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+# runtime settings
+work_dir = './work_dirs/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..1477ac2a
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,83 @@
+_base_ = ['./i3d_r50_32x2x1_100e_kinetics400_rgb.py']
+
+# dataset settings
+dataset_type = 'VideoDataset'
+data_root = 'data/kinetics400/videos_train'
+data_root_val = 'data/kinetics400/videos_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='DecordInit'),
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(type='DecordInit'),
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(type='DecordInit'),
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+
+# runtime settings
+work_dir = './work_dirs/i3d_r50_video_3d_32x2x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..973f7fb8
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py
@@ -0,0 +1,83 @@
+_base_ = ['./i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py']
+
+# dataset settings
+dataset_type = 'VideoDataset'
+data_root = 'data/kinetics400/videos_train'
+data_root_val = 'data/kinetics400/videos_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='DecordInit'),
+ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(type='Flip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(type='DecordInit'),
+ dict(
+ type='SampleFrames',
+ clip_len=8,
+ frame_interval=8,
+ num_clips=1,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(type='DecordInit'),
+ dict(
+ type='SampleFrames',
+ clip_len=8,
+ frame_interval=8,
+ num_clips=10,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+#
+# runtime settings
+work_dir = './work_dirs/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb/'
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_imgaug_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_imgaug_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..86baa028
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_imgaug_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,111 @@
+_base_ = ['../../_base_/models/i3d_r50.py']
+
+# dataset settings
+dataset_type = 'VideoDataset'
+data_root = 'data/kinetics400/videos_train'
+data_root_val = 'data/kinetics400/videos_val'
+ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
+ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt'
+ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+train_pipeline = [
+ dict(type='DecordInit'),
+ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(
+ type='MultiScaleCrop',
+ input_size=224,
+ scales=(1, 0.8),
+ random_crop=False,
+ max_wh_scale_gap=0),
+ dict(type='Resize', scale=(224, 224), keep_ratio=False),
+ dict(
+ type='Imgaug',
+ transforms=[
+ dict(type='Fliplr', p=0.5),
+ dict(type='Rotate', rotate=(-20, 20)),
+ dict(type='Dropout', p=(0, 0.05))
+ ]),
+ # dict(type='Imgaug', transforms='default'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs', 'label'])
+]
+val_pipeline = [
+ dict(type='DecordInit'),
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+test_pipeline = [
+ dict(type='DecordInit'),
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=10,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=8,
+ workers_per_gpu=2,
+ test_dataloader=dict(videos_per_gpu=1),
+ train=dict(
+ type=dataset_type,
+ ann_file=ann_file_train,
+ data_prefix=data_root,
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=val_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=ann_file_val,
+ data_prefix=data_root_val,
+ pipeline=test_pipeline))
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.01, momentum=0.9,
+ weight_decay=0.0001) # this lr is used for 8 gpus
+optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
+# learning policy
+lr_config = dict(policy='step', step=[40, 80])
+total_epochs = 100
+checkpoint_config = dict(interval=5)
+evaluation = dict(
+ interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
+log_config = dict(
+ interval=20,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook'),
+ ])
+# runtime settings
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/i3d_r50_video_3d_32x2x1_100e_kinetics400_rgb/'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py
new file mode 100644
index 00000000..497c0135
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py
@@ -0,0 +1,30 @@
+_base_ = ['../../_base_/models/i3d_r50.py']
+
+# dataset settings
+dataset_type = 'VideoDataset'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
+test_pipeline = [
+ dict(type='DecordInit', num_threads=1),
+ dict(
+ type='SampleFrames',
+ clip_len=32,
+ frame_interval=2,
+ num_clips=1,
+ test_mode=True),
+ dict(type='DecordDecode'),
+ dict(type='Resize', scale=(-1, 256)),
+ dict(type='ThreeCrop', crop_size=256),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='FormatShape', input_format='NCTHW'),
+ dict(type='Collect', keys=['imgs'], meta_keys=[]),
+ dict(type='ToTensor', keys=['imgs'])
+]
+data = dict(
+ videos_per_gpu=1,
+ workers_per_gpu=2,
+ test=dict(
+ type=dataset_type,
+ ann_file=None,
+ data_prefix=None,
+ pipeline=test_pipeline))
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/metafile.yml
new file mode 100644
index 00000000..22a7bfe3
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/i3d/metafile.yml
@@ -0,0 +1,237 @@
+Collections:
+- Name: I3D
+ README: configs/recognition/i3d/README.md
+ Paper:
+ URL: https://arxiv.org/abs/1705.07750
+ Title: Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
+Models:
+- Config: configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: 340x256
+ Training Data: Kinetics-400
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: i3d_r50_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 72.68
+ Top 5 Accuracy: 90.78
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/i3d_r50_32x2x1_100e_kinetics400_rgb_20200614-c25ef9a4.pth
+- Config: configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: short-side 256
+ Training Data: Kinetics-400
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: i3d_r50_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.27
+ Top 5 Accuracy: 90.92
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_256p_32x2x1_100e_kinetics400_rgb_20200801-7d9f44de.pth
+- Config: configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: short-side 256p
+ Training Data: Kinetics-400
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: i3d_r50_video_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 72.85
+ Top 5 Accuracy: 90.75
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/i3d_r50_video_32x2x1_100e_kinetics400_rgb_20200826-e31c6f52.pth
+- Config: configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: 340x256
+ Training Data: Kinetics-400
+ Training Resources: 16 GPUs
+ Modality: RGB
+ Name: i3d_r50_dense_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 72.77
+ Top 5 Accuracy: 90.57
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_32x2x1_100e_kinetics400_rgb_20200616-2bbb4361.pth
+- Config: configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: short-side 256
+ Training Data: Kinetics-400
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: i3d_r50_dense_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.48
+ Top 5 Accuracy: 91.0
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth
+- Config: configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: 340x256
+ Training Data: Kinetics-400
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: i3d_r50_lazy_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 72.32
+ Top 5 Accuracy: 90.72
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_32x2x1_100e_kinetics400_rgb_20200612-000e4d2a.pth
+- Config: configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 43564040192
+ Parameters: 28043472
+ Pretrained: ImageNet
+ Resolution: short-side 256
+ Training Data: Kinetics-400
+ Training Resources: 8 GPUs
+ Modality: RGB
+ Name: i3d_r50_lazy_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.24
+ Top 5 Accuracy: 90.99
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb_20200817-4e90d1d5.pth
+- Config: configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 54334488576
+ Parameters: 35397840
+ Pretrained: ImageNet
+ Resolution: short-side 256p
+ Training Data: Kinetics-400
+ Training Resources: 32 GPUs
+ Modality: RGB
+ Name: i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 74.71
+ Top 5 Accuracy: 91.81
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200813-6e6aef1b.pth
+- Config: configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 48962109440
+ Parameters: 31723728
+ Pretrained: ImageNet
+ Resolution: short-side 256p
+ Training Data: Kinetics-400
+ Training Resources: 32 GPUs
+ Modality: RGB
+ Name: i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.37
+ Top 5 Accuracy: 91.26
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200815-17f84aa2.pth
+- Config: configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py
+ In Collection: I3D
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 100
+ FLOPs: 54334488576
+ Parameters: 35397840
+ Pretrained: ImageNet
+ Resolution: short-side 256p
+ Training Data: Kinetics-400
+ Training Resources: 32 GPUs
+ Modality: RGB
+ Name: i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.92
+ Top 5 Accuracy: 91.59
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb_20200814-7c30d5bb.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README.md
new file mode 100644
index 00000000..daeda154
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README.md
@@ -0,0 +1,80 @@
+# Omni-sourced Webly-supervised Learning for Video Recognition
+
+[Omni-sourced Webly-supervised Learning for Video Recognition](https://arxiv.org/abs/2003.13042)
+
+[Dataset](https://docs.google.com/forms/d/e/1FAIpQLSd8_GlmHzG8FcDbW-OEu__G7qLgOSYZpH-i5vYVJcu7wcb_TQ/viewform?usp=sf_link)
+
+## Abstract
+
+
+
+We introduce OmniSource, a novel framework for leveraging web data to train video recognition models. OmniSource overcomes the barriers between data formats, such as images, short videos, and long untrimmed videos for webly-supervised learning. First, data samples with multiple formats, curated by task-specific data collection and automatically filtered by a teacher model, are transformed into a unified form. Then a joint-training strategy is proposed to deal with the domain gaps between multiple data sources and formats in webly-supervised learning. Several good practices, including data balancing, resampling, and cross-dataset mixup are adopted in joint training. Experiments show that by utilizing data from multiple sources and formats, OmniSource is more data-efficient in training. With only 3.5M images and 800K minutes videos crawled from the internet without human labeling (less than 2% of prior works), our models learned with OmniSource improve Top-1 accuracy of 2D- and 3D-ConvNet baseline models by 3.0% and 3.9%, respectively, on the Kinetics-400 benchmark. With OmniSource, we establish new records with different pretraining strategies for video recognition. Our best models achieve 80.4%, 80.5%, and 83.6 Top-1 accuracies on the Kinetics-400 benchmark respectively for training-from-scratch, ImageNet pre-training and IG-65M pre-training.
+
+
+
+
+

+
+
+## Results and Models
+
+### Kinetics-400 Model Release
+
+We currently released 4 models trained with OmniSource framework, including both 2D and 3D architectures. We compare the performance of models trained with or without OmniSource in the following table.
+
+| Model | Modality | Pretrained | Backbone | Input | Resolution | Top-1 (Baseline / OmniSource (Delta)) | Top-5 (Baseline / OmniSource (Delta))) | Download |
+| :------: | :------: | :--------: | :-------: | :---: | :------------: | :-----------------------------------: | :------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| TSN | RGB | ImageNet | ResNet50 | 3seg | 340x256 | 70.6 / 73.6 (+ 3.0) | 89.4 / 91.0 (+ 1.6) | [Baseline](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_imagenet_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-54192355.pth) |
+| TSN | RGB | IG-1B | ResNet50 | 3seg | short-side 320 | 73.1 / 75.7 (+ 2.6) | 90.4 / 91.9 (+ 1.5) | [Baseline](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_without_omni_1x1x3_kinetics400_rgb_20200926-c133dd49.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-2863fed0.pth) |
+| SlowOnly | RGB | Scratch | ResNet50 | 4x16 | short-side 320 | 72.9 / 76.8 (+ 3.9) | 90.9 / 92.5 (+ 1.6) | [Baseline](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r50_omni_4x16x1_kinetics400_rgb_20200926-51b1f7ea.pth) |
+| SlowOnly | RGB | Scratch | ResNet101 | 8x8 | short-side 320 | 76.5 / 80.4 (+ 3.9) | 92.7 / 94.4 (+ 1.7) | [Baseline](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_without_omni_8x8x1_kinetics400_rgb_20200926-0c730aef.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_omni_8x8x1_kinetics400_rgb_20200926-b5dbb701.pth) |
+
+1. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available.
+
+## Benchmark on Mini-Kinetics
+
+We release a subset of web dataset used in the OmniSource paper. Specifically, we release the web data in the 200 classes of [Mini-Kinetics](https://arxiv.org/pdf/1712.04851.pdf). The statistics of those datasets is detailed in [preparing_omnisource](/tools/data/omnisource/README.md). To obtain those data, you need to fill in a [data request form](https://docs.google.com/forms/d/e/1FAIpQLSd8_GlmHzG8FcDbW-OEu__G7qLgOSYZpH-i5vYVJcu7wcb_TQ/viewform?usp=sf_link). After we received your request, the download link of these data will be send to you. For more details on the released OmniSource web dataset, please refer to [preparing_omnisource](/tools/data/omnisource/README.md).
+
+We benchmark the OmniSource framework on the released subset, results are listed in the following table (we report the Top-1 and Top-5 accuracy on Mini-Kinetics validation). The benchmark can be used as a baseline for video recognition with web data.
+
+### TSN-8seg-ResNet50
+
+| Model | Modality | Pretrained | Backbone | Input | Resolution | top1 acc | top5 acc | ckpt | json | log |
+| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -------- | ---------- | -------- | ----- | -------------- | :------: | :------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [tsn_r50_1x1x8_100e_minikinetics_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 77.4 | 93.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030-b4eaf92b.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 78.0 | 93.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030-23966b4b.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_webimage_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 78.6 | 93.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030-66f5e046.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 80.6 | 95.0 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030-011f984d.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 78.6 | 93.2 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030-59f5d064.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 81.3 | 94.8 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030-0f56ef51.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030.log) |
+
+### SlowOnly-8x8-ResNet50
+
+| Model | Modality | Pretrained | Backbone | Input | Resolution | top1 acc | top5 acc | ckpt | json | log |
+| :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -------- | ---------- | -------- | ----- | -------------- | :------: | :------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowonly_r50_8x8x1_256e_minikinetics_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 78.6 | 93.9 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030-168eb098.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 80.8 | 95.0 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030-7da6dfc3.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 81.3 | 95.2 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030-c36616e9.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 82.4 | 95.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030-e2890e8d.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 80.3 | 94.5 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030-62974bac.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 82.9 | 95.8 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030-284cfd3b.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030.log) |
+
+We also list the benchmark in the original paper which run on Kinetics-400 for comparison:
+
+| Model | Baseline | +GG-img | +\[GG-IG\]-img | +IG-vid | +KRaw | OmniSource |
+| :--------------------: | :---------: | :---------: | :------------: | :---------: | :---------: | :---------: |
+| TSN-3seg-ResNet50 | 70.6 / 89.4 | 71.5 / 89.5 | 72.0 / 90.0 | 72.0 / 90.3 | 71.7 / 89.6 | 73.6 / 91.0 |
+| SlowOnly-4x16-ResNet50 | 73.8 / 90.9 | 74.5 / 91.4 | 75.2 / 91.6 | 75.2 / 91.7 | 74.5 / 91.1 | 76.6 / 92.5 |
+
+## Citation
+
+
+
+```BibTeX
+@article{duan2020omni,
+ title={Omni-sourced Webly-supervised Learning for Video Recognition},
+ author={Duan, Haodong and Zhao, Yue and Xiong, Yuanjun and Liu, Wentao and Lin, Dahua},
+ journal={arXiv preprint arXiv:2003.13042},
+ year={2020}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README_zh-CN.md b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README_zh-CN.md
new file mode 100644
index 00000000..ac872587
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/README_zh-CN.md
@@ -0,0 +1,72 @@
+# Omni-sourced Webly-supervised Learning for Video Recognition
+
+[Haodong Duan](https://github.com/kennymckormick), [Yue Zhao](https://github.com/zhaoyue-zephyrus), [Yuanjun Xiong](https://github.com/yjxiong), Wentao Liu, [Dahua Lin](https://github.com/lindahua)
+
+In ECCV, 2020. [Paper](https://arxiv.org/abs/2003.13042), [Dataset](https://docs.google.com/forms/d/e/1FAIpQLSd8_GlmHzG8FcDbW-OEu__G7qLgOSYZpH-i5vYVJcu7wcb_TQ/viewform?usp=sf_link)
+
+
+
+## 模型库
+
+### Kinetics-400
+
+MMAction2 当前公开了 4 个 OmniSource 框架训练的模型,包含 2D 架构与 3D 架构。下表比较了使用或不适用 OmniSource 框架训练得的模型在 Kinetics-400 上的精度:
+
+| 模型 | 模态 | 预训练 | 主干网络 | 输入 | 分辨率 | Top-1 准确率(Baseline / OmniSource (Delta)) | Top-5 准确率(Baseline / OmniSource (Delta))) | 模型下载链接 |
+| :------: | :--: | :------: | :-------: | :--: | :------------: | :-----------------------------------------: | :------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| TSN | RGB | ImageNet | ResNet50 | 3seg | 340x256 | 70.6 / 73.6 (+ 3.0) | 89.4 / 91.0 (+ 1.6) | [Baseline](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_imagenet_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-54192355.pth) |
+| TSN | RGB | IG-1B | ResNet50 | 3seg | short-side 320 | 73.1 / 75.7 (+ 2.6) | 90.4 / 91.9 (+ 1.5) | [Baseline](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_without_omni_1x1x3_kinetics400_rgb_20200926-c133dd49.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-2863fed0.pth) |
+| SlowOnly | RGB | None | ResNet50 | 4x16 | short-side 320 | 72.9 / 76.8 (+ 3.9) | 90.9 / 92.5 (+ 1.6) | [Baseline](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r50_omni_4x16x1_kinetics400_rgb_20200926-51b1f7ea.pth) |
+| SlowOnly | RGB | None | ResNet101 | 8x8 | short-side 320 | 76.5 / 80.4 (+ 3.9) | 92.7 / 94.4 (+ 1.7) | [Baseline](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_without_omni_8x8x1_kinetics400_rgb_20200926-0c730aef.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_omni_8x8x1_kinetics400_rgb_20200926-b5dbb701.pth) |
+
+1. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
+
+## Mini-Kinetics 上的基准测试
+
+OmniSource 项目当前公开了所采集网络数据的一个子集,涉及 [Mini-Kinetics](https://arxiv.org/pdf/1712.04851.pdf) 中的 200 个动作类别。[OmniSource 数据集准备](/tools/data/omnisource/README_zh-CN.md) 中记录了这些数据集的详细统计信息。用户可以通过填写 [申请表](https://docs.google.com/forms/d/e/1FAIpQLSd8_GlmHzG8FcDbW-OEu__G7qLgOSYZpH-i5vYVJcu7wcb_TQ/viewform?usp=sf_link) 获取这些数据,在完成填写后,数据下载链接会被发送至用户邮箱。更多关于 OmniSource 网络数据集的信息请参照 [OmniSource 数据集准备](/tools/data/omnisource/README_zh-CN.md)。
+
+MMAction2 在公开的数据集上进行了 OmniSource 框架的基准测试,下表记录了详细的结果(在 Mini-Kinetics 验证集上的精度),这些结果可以作为使用网络数据训练视频识别任务的基线。
+
+### TSN-8seg-ResNet50
+
+| 模型 | 模态 | 预训练 | 主干网络 | 输入 | 分辨率 | Top-1 准确率 | Top-5 准确率 | ckpt | json | log |
+| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :------: | :------: | :--: | :------------: | :----------: | :----------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [tsn_r50_1x1x8_100e_minikinetics_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 77.4 | 93.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030-b4eaf92b.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 78.0 | 93.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030-23966b4b.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_webimage_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 78.6 | 93.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030-66f5e046.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 80.6 | 95.0 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030-011f984d.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 78.6 | 93.2 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030-59f5d064.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030.log) |
+| [tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb](/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py) | RGB | ImageNet | ResNet50 | 3seg | short-side 320 | 81.3 | 94.8 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030-0f56ef51.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030.log) |
+
+### SlowOnly-8x8-ResNet50
+
+| 模型 | 模态 | 预训练 | 主干网络 | 输入 | 分辨率 | Top-1 准确率 | Top-5 准确率 | ckpt | json | log |
+| :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--: | :----: | :------: | :--: | :------------: | :----------: | :----------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| [slowonly_r50_8x8x1_256e_minikinetics_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 78.6 | 93.9 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030-168eb098.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 80.8 | 95.0 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030-7da6dfc3.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 81.3 | 95.2 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030-c36616e9.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 82.4 | 95.6 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030-e2890e8d.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 80.3 | 94.5 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030-62974bac.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030.log) |
+| [slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb](/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py) | RGB | None | ResNet50 | 8x8 | short-side 320 | 82.9 | 95.8 | [ckpt](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030-284cfd3b.pth) | [json](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030.json) | [log](https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030.log) |
+
+下表列出了原论文中在 Kinetics-400 上进行基准测试的结果供参考:
+
+| Model | Baseline | +GG-img | +\[GG-IG\]-img | +IG-vid | +KRaw | OmniSource |
+| :--------------------: | :---------: | :---------: | :------------: | :---------: | :---------: | :---------: |
+| TSN-3seg-ResNet50 | 70.6 / 89.4 | 71.5 / 89.5 | 72.0 / 90.0 | 72.0 / 90.3 | 71.7 / 89.6 | 73.6 / 91.0 |
+| SlowOnly-4x16-ResNet50 | 73.8 / 90.9 | 74.5 / 91.4 | 75.2 / 91.6 | 75.2 / 91.7 | 74.5 / 91.1 | 76.6 / 92.5 |
+
+## 注:
+
+如果 OmniSource 项目对您的研究有所帮助,请使用以下 BibTex 项进行引用:
+
+
+
+```BibTeX
+@article{duan2020omni,
+ title={Omni-sourced Webly-supervised Learning for Video Recognition},
+ author={Duan, Haodong and Zhao, Yue and Xiong, Yuanjun and Liu, Wentao and Lin, Dahua},
+ journal={arXiv preprint arXiv:2003.13042},
+ year={2020}
+}
+```
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/metafile.yml b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/metafile.yml
new file mode 100644
index 00000000..ae3db16e
--- /dev/null
+++ b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/metafile.yml
@@ -0,0 +1,388 @@
+Collections:
+- Name: OmniSource
+ README: configs/recognition/omnisource/README.md
+ Paper:
+ URL: https://arxiv.org/abs/2003.13042
+ Title: Omni-sourced Webly-supervised Learning for Video Recognition
+
+Models:
+- Config: configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 100
+ FLOPs: 134526976000
+ Input: 3seg
+ Modality: RGB
+ Parameters: 23917832
+ Pretrained: ImageNet
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: tsn_r50_1x1x8_100e_minikinetics_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 77.4
+ Top 5 Accuracy: 93.6
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/baseline/tsn_r50_1x1x8_100e_minikinetics_rgb_20201030-b4eaf92b.pth
+- Config: configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 100
+ FLOPs: 134526976000
+ Input: 3seg
+ Modality: RGB
+ Parameters: 23917832
+ Pretrained: ImageNet
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 78.0
+ Top 5 Accuracy: 93.6
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/googleimage/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb_20201030-23966b4b.pth
+- Config: configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 100
+ FLOPs: 134526976000
+ Input: 3seg
+ Modality: RGB
+ Parameters: 23917832
+ Pretrained: ImageNet
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: tsn_r50_1x1x8_100e_minikinetics_webimage_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 78.6
+ Top 5 Accuracy: 93.6
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/webimage/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb_20201030-66f5e046.pth
+- Config: configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 100
+ FLOPs: 134526976000
+ Input: 3seg
+ Modality: RGB
+ Parameters: 23917832
+ Pretrained: ImageNet
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 80.6
+ Top 5 Accuracy: 95.0
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/insvideo/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb_20201030-011f984d.pth
+- Config: configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 100
+ FLOPs: 134526976000
+ Input: 3seg
+ Modality: RGB
+ Parameters: 23917832
+ Pretrained: ImageNet
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 78.6
+ Top 5 Accuracy: 93.2
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/kineticsraw/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb_20201030-59f5d064.pth
+- Config: configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 100
+ FLOPs: 134526976000
+ Input: 3seg
+ Modality: RGB
+ Parameters: 23917832
+ Pretrained: ImageNet
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 81.3
+ Top 5 Accuracy: 94.8
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb/omnisource/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb_20201030-0f56ef51.pth
+- Config: configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 256
+ FLOPs: 54860070912
+ Input: 8x8
+ Modality: RGB
+ Parameters: 32044296
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: slowonly_r50_8x8x1_256e_minikinetics_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 78.6
+ Top 5 Accuracy: 93.9
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/baseline/slowonly_r50_8x8x1_256e_minikinetics_rgb_20201030-168eb098.pth
+- Config: configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 256
+ FLOPs: 54860070912
+ Input: 8x8
+ Modality: RGB
+ Parameters: 32044296
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 80.8
+ Top 5 Accuracy: 95.0
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/googleimage/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb_20201030-7da6dfc3.pth
+- Config: configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 256
+ FLOPs: 54860070912
+ Input: 8x8
+ Modality: RGB
+ Parameters: 32044296
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 81.3
+ Top 5 Accuracy: 95.2
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/webimage/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb_20201030-c36616e9.pth
+- Config: configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 256
+ FLOPs: 54860070912
+ Input: 8x8
+ Modality: RGB
+ Parameters: 32044296
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 82.4
+ Top 5 Accuracy: 95.6
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/insvideo/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb_20201030-e2890e8d.pth
+- Config: configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 256
+ FLOPs: 54860070912
+ Input: 8x8
+ Modality: RGB
+ Parameters: 32044296
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 80.3
+ Top 5 Accuracy: 94.5
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/kineticsraw/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb_20201030-62974bac.pth
+- Config: configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 12
+ Epochs: 256
+ FLOPs: 54860070912
+ Input: 8x8
+ Modality: RGB
+ Parameters: 32044296
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: MiniKinetics
+ Modality: RGB
+ Name: slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb
+ Results:
+ - Dataset: MiniKinetics
+ Metrics:
+ Top 1 Accuracy: 82.9
+ Top 5 Accuracy: 95.8
+ Task: Action Recognition
+ Training Json Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030.json
+ Training Log: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030.log
+ Weights: https://download.openmmlab.com/mmaction/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb/omnisource/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb_20201030-284cfd3b.pth
+- Config: configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 32
+ Epochs: 100
+ FLOPs: 102997721600
+ Parameters: 24327632
+ Pretrained: ImageNet
+ Resolution: 340x256
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: tsn_omnisource_r50_1x1x3_100e_kinetics_rgb
+ Converted From:
+ Weights: https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/models/kinetics400/omnisource/tsn_OmniSource_kinetics400_se_rgb_r50_seg3_f1s1_imagenet-4066cb7e.pth
+ Code: https://github.com/open-mmlab/mmaction
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 73.6
+ Top 5 Accuracy: 91.0
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_imagenet_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-54192355.pth
+- Config: configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 32
+ Epochs: 100
+ FLOPs: 102997721600
+ Parameters: 24327632
+ Pretrained: IG-1B
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: tsn_IG1B_pretrained_omnisource_r50_1x1x3_100e_kinetics_rgb
+ Converted From:
+ Weights: https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/models/kinetics400/omnisource/tsn_OmniSource_kinetics400_se_rgb_r50_seg3_f1s1_IG1B-25fc136b.pth
+ Code: https://github.com/open-mmlab/mmaction/
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 75.7
+ Top 5 Accuracy: 91.9
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-2863fed0.pth
+- Config: configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet50
+ Batch Size: 8
+ Epochs: 256
+ FLOPs: 27430649856
+ Parameters: 32454096
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: slowonly_r50_omnisource_4x16x1_256e_kinetics400_rgb
+ Converted From:
+ Weights: https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/models/kinetics400/omnisource/slowonly_OmniSource_kinetics400_se_rgb_r50_seg1_4x16_scratch-71f7b8ee.pth
+ Code: https://github.com/open-mmlab/mmaction/
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 76.8
+ Top 5 Accuracy: 92.5
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r50_omni_4x16x1_kinetics400_rgb_20200926-51b1f7ea.pth
+- Config: configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py
+ In Collection: OmniSource
+ Metadata:
+ Architecture: ResNet101
+ Batch Size: 8
+ Epochs: 196
+ FLOPs: 112063447040
+ Parameters: 60359120
+ Pretrained: None
+ Resolution: short-side 320
+ Training Data: Kinetics-400
+ Modality: RGB
+ Name: slowonly_r101_omnisource_8x8x1_196e_kinetics400_rgb
+ Converted From:
+ Weights: https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/models/kinetics400/omnisource/slowonly_OmniSource_kinetics400_se_rgb_r101_seg1_8x8_scratch-2f838cb0.pth
+ Code: https://github.com/open-mmlab/mmaction/
+ Results:
+ - Dataset: Kinetics-400
+ Metrics:
+ Top 1 Accuracy: 80.4
+ Top 5 Accuracy: 94.4
+ Task: Action Recognition
+ Weights: https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_omni_8x8x1_kinetics400_rgb_20200926-b5dbb701.pth
diff --git a/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/pipeline.png b/openmmlab_test/mmaction2-0.24.1/configs/recognition/omnisource/pipeline.png
new file mode 100644
index 0000000000000000000000000000000000000000..a3e3a2a046b04ea3f18dd26f007e97d268361b16
GIT binary patch
literal 245041
zcmce;Wmr^Q8#W9Gf|LjX3J8d_l!SDObST{jNOyNiNK5z764Kou9nwg54LIP?9p9S!
zxu56#@x8xa4m=K-J+t@Pd#!a{=XIXvHK89AByliFFp-duaHOTgm5`7g%pf5l_hUQ+
zuZWvHS3p9dM3NR4RdGw-Tk^Ei{hhIM3D>fOA5R=hy@liT;k#oyiQ_+?2pzCamX$U7
zD<35N_46mCT`GcCzvYUL%=r5={tQm_bjt)yoydKY7bX0SR>_&ok|MXKF-ELtL=*Qc
zNF!tHjKp=>oo?WBEI+&@E;KZ>
zRw35lS+GG1>=8#+3gUk{I(L8JcTsc@x15W6-(zmyuIzUq(`@g95eJk1a}^f$fzJ+l
z*0l^Hh^rV`|L^=_U)?iP))x^YnNHkwv<4sf_t$g)b>@G61luRd+5Nvc*jsFFZT%kO
z=h)bo7Ts138yj24clk6tG0MQK>T1W*xX(Z5p<_0=CI*Rwsy8EE&$9so1#V_=5
zJkS3MZd@|g8udl3uLO<6OZMh>Xu(ihNFyU6#^&aveSPnFmuys337MIhtA$8ONsa96
z@IEcO*4EZSo$h=}Jo^;Avpmyn=qjBD~tbHB*HfB$N}^5%Z~w#pGYqUk-?DRh`WBeZu<
zRQIW2D^WXa0pl7aNxOUs>_n5(T&&Td#y_%)Q&ZjV9t)y&l<78aXM|L+t}Je3dRljG
zU}!6yoS%>Wd8Ou2CLtj)+vsRI2df~97P_KGdhy}~Cl?oU!(yHF!nQZXGFE;cA!NaE
zPT#KfZ)2l3TxQhS9Ot*;tIe`avcQEeom{NL+FIJ$E2rLzI2&SB23^5i3+FONZT|lL
zn#LZ<DmdeIgThDmsK>cEIKhykz{dZGQIFMUZ}TS
zsfwbJvjm?4Te&GaTl7IwX{q(JL$fQKi&2K9latf3P^S@+lcQrb`HL62
zb{P|n&+hI)$P6C}p2=}q(bd(h9x-rqbhIaxj3(E$YyBW8*;jIsVDn?~Xe$twq*veo
zrPZ;yzq{=jm4$}{jcD%djOSLDz1uwk7w&Ov_PxJbzgiPi^N4D0YD#p{0cT+@BY+I$
zYwa`z_ce{(YX61k?;Zbcfo}Wgd6dacR>s$qZjw1UI}h`fO`H-n&qU-u8a3X{G+P?t#9w%j`EtkBW5SXOO{b}&{0V{LKn
z^bQ(*z2+gyP~YX8sMQ-Xlq3|*baJUQ6#1KPJ7_T1E+Z-`N^_Jaf5!Lkp}F8SB*oKe
z?UcD5VvvrbOj2H1i(3XB^$B<;ri6z1LCcHS>`YEF9J%A;A6vn!yFZ^=lsmFQf!QrJ
z1`UA6PlR38h0<_>_S4Xu9tTq<0TSKV_snF`@tlK?ulRg#j>D279BlxLkUqSW_+d84
zzRMoh=Wu-oyrwC?f<;~)kCQ?hGqYq@7Fn$2#ua84@|tOnQHbC%rbDTzesd$MH_`lv
zjIaLMDZQ(%EJe%qccaFirxq)YTA94jyvp-x9=M%1uYPGuNJ@sU4Brlu44DR4Zs=fq
ziOWwU9;(!stBP$YFD;dKb#nSOUXHiJ{=5z)z#IZVfAilSlH=aO}jG;wtgMI`t|eFuoL}femZ>O`V+5=WeYK*
zTbC14er>N++~9@ug{#*$+7@`N6RRHo+~KKMY2)CM%P
zb9Qd*e)-bcN|Ik@YZ96>I*#&lqwtnvg{$l9aPQBF6Y5)AN&4IV=;ZXcjH+dD*hmKd
z!SiN?cYJ3uNl7f`Skn(zkXi-sc(iQ-7NOHvQOR&%Bx{*xB7>Q26-p
zU`YQC-w(_J2sqGEg0msU7farsK4tNwZfivZlHiIQhbnrlx4=Ca)N)z%L4pTiU`Wff
zF*hw_rXfGy_-B!M+szg$0$o7dx1WOuS68wh!m{c4S10{TGrY}n+0+qTg&lxA09_6{
z9y~@m7<>0P<2R?yP;G3B?HjCoL7jV$A+^+aE7DtFW7+RSvISVqu?eDlZb7=Q_P#!I
zU%BrRzUhjcU?^7YHYfalA5Y@=)*!(T3QN1u;a^4oM>1LedL}I
zDva;nz3U1Py{;Y=;(tAh6O>{Ha`N@rz7cG?bZ@ru%L{&&vGMWBoZYxi&iIGsYb`NY
zglgFij`B%qE5}}Z`u?|uUn~LmI0)Cj$>GuGPQQ&O0m<&>tiC-%)s2GJ;q%;r=B|_L
zPzr$)4NI`6P3@?^!87Q}?CdlBtBEA3d?gSbOD@pXWHRKJL2l}uR1_Wt;Il-Ojo5L)
zy1tYtTNY1<1LyH$)~j__uA_;>#6+$XXA|aVciv9kj!f?}O^_7BewtojV_DwcxRl8$
zDwaFrN8r;o_#`bfI?^?}>}yT)MGG8ZEYw*W!IxW4&d#bVXINbJ=X*dP8Jzy?d%du-
zO>uun@tllo6XN-oLHk+D#d^5uqOGSVKlhRofuis&CnB){NO5v<^3UVoB^%QG(fLOV
zj9AU<@}rir5E#1P`J$r&)RKaPWW74`cCX5^5XvYI?eFh*FHOa&ajexEV670@1FjwkD|x>o%F&W>KSLyzQ`;G~D0mqQ&96
zrZ~1y{x20BxS_$PbUE6&dRq#xJa@!|y>KanT6UV`<>dvA`d_tN{rS=3&Q6>vmebUf
z+SVo%J3xJVdpiVCD4ObxBn9bA5r}t4;C`4aE%G+{q91Ingq3$UVl%0)d&ya!
zEIN)N17v9WJKh(jy^;eU=k~VA8GL1QXUFW2d>#blurtDk)7IA3WFRFr2$r5!EwlUX
zyrW1>y4#|kS!8}X-BRLm+#Hl%?)g_t2tC+Hrk~$|CYiYq8&ufOs8ABe{T;yOSO-TMJgdVTEd$^si{2liVu#luWnv
zM@Vn2=T&Y>P9nFJ(oz#1+FHJGwY_jyC2qgyDgH~B{_5#!+RDv#3B1$sbl=uDjaj>n
zsoB?&*C6urvCqMmYKtR1a6PZXW3acG=1W@Z>+9WcY#Y`dX4$ba)hNua+;U`I{tBCP
zeYxTYg^>Zg=kp7vEecq?cXSyB}qJ_-nn=<}J+?Wgz6T*&p-%_`cd60DY8$
zo!scM!82BI;-NS9d#1tgOdRbhcQUo!1)Pn-7W*yfhXHas1Dy9+Mc0I^)KACyv
zwCu5RIpRvRbY|#J`m4;EoRl;doO+I{Zsnqv8gP;f|yX{`#@@qt697gK`FVX3XRrJm6A(vtTyD}!zOtqa&0UI)9IozaY|P_g%Ek!<3NRIqG9o_KeC4-+ecaroTi?IY
zXI!)C+{^W8c;L!T68Wg5);S6iCQW;&&3B$8>gDDqH{UHDlafm|TB`C1N`oL>a?8r8
zUNn6S&z&gx)XBguGx-!UYL_OP5iA39-Cl>Jjr43UrhBsfYAwT(XQ*FvFH^sUOnThe
zn*MFsl$BqYgM1kiyNsz6$ZuD4REX>eRHjQ9f)b~7GTbn;)-t8MUNmiN`9$Nczw4Us
z?#7&YwR`JJ3eG9j^qv%n8%_e<7UaOi^Q_CWybD@*Aw%Kizfx7GLT-&v69`wTh
z{F%tj&F#T1oH{tD03S;}?P59?6M?z|1KDDZjb;-{*`=o(G}h^N=^
z@NhV!4-5za!B`2*EiJX}TG@`1(=>;N8@ebP7iDAFBE!*Uuv&FeJoP%|Y>R@<#LhoE
zIv&T?RRfUiK7)uuJ)Y%@TznoUJ1t4Fj?qOYUUjv#ySLZp#1!~R*COoy1)!M&pOxxs
zYndBL9nL*=;koHSe=mcC9#7V3o$KsY;b*A)C|Eih0C7$0v24UY93
zEpa@`v`_TV?c9P2*)pFrPMflD*XU&!>6xV@pw8rvrslII$iVA=r*7POF}N<~`#bN#
z3s1b_J;!T41gSR;miFjXB`vFCsoIr(_Bjs^Wr&QF9Z6v~=wgcq&-jNSBO-bjh1Dvc
zQR2MRV`u{kl~1}&dy9!_;Tho*YWR*1;A}fA>by!M6-jL^8cg0v_><88(rgj35t2-nUqj8MZ{+B*WN(Xd-`a
zOrAdIvn+3QVUBfHXuUZa5;*91KpiKI`+|?tmh)Es?kGvbK%`@OdfFWp@?|37;o!S>
z?|L--{x(7(@6Uw+|7Giv^bd;c2>lmE8ra#{J(6^>2b1={%SDux{evi`OSO0R_7D^Z
z$|@-Fdl-bfip*GDq6N>M6{&;)BBrcyJJo$fQ9%JEh{2IK&6$R*hr&eBzIm1p5C}iY
znk)v)aVClNzR1Ry6WQog0jgk{@6msVz+U&W)h~~$Y*&OpAo3IQn_XEMy|~~3DR^(8
zzI?Xk((oLSut1&$VUM`f*v8zPVdPx*FT96XpQ(A-5Ec;~J!)}fot$d9fnVc&?NVpE
z(l$NtFAX{^1tT*mtG70!h(QD_+LiVK4t;Ho>Ad@
z0R*zBt+hW`6$LBNeXp{w;dELXkp#1|{lT*XFFq$FeI2#fim3V*uI6XjdI2~d0*I}C
z(E$qw2Z!_RTNF691JRjz=Igx&%VT3ApwKV5MO4$Ix;ILO0ha%zk|V-DsH*-hx%d53
zRABZWnyMhvCl#QQalotmC~q6=dMvDios3LELSklVxq(0qEqA*B<|6>;9$+|4A@0F<
zcRpZ3rT^jJ5NzO4>OO75^+-0
zcw%gVf{`0MDk>^?l~(L?oMq5;ck(v4(QB-tW%C--xwroCS7_p|hz6_d$cSg0pJ?SW
z1?4lSl41|CXe?;&RyF(n!*au)UPQ}qwp^|&
z4G_uq&N$;50rQ^)6I2tV^83GY$VFtDczB?!IEJdK;DxKMq9ZHGSXfwA>|t>I=8{v8
zTPOHbgaM1I)u}6kSiSv{h!@=(+Gpqn=?djmWaHv^;2(YF9gmSF@zCtW>NO>Enp;b(
z_|y-(7z=D{jm%x>=D3>g@b0a@CgmFEit_vm&a-G$@hGgq43}_QE`(c%GbTbBSA}
z4NhT`mXQ6PV>sF3<9jO*8irYwg3YcZy_KMT
z_=i=WhxNStQ5h>aZNuA<#G=Nx|7^AZwV1ESB%|ktg2bcF70ltEbzii>H?GMkE0{@X
zsN3K%$GSDs;`KHNMHUNW6?zJRKA@zLqWR#bgkMBm$6U!A#O#HFsx{l-9}Pp}
zX+uIw#Q3JPdPVY*iArV_#?k-Tj4qC>8PR
z7pKpv{oIxFrlV$+dx)1%2!)%9sw#f1RZgN9!+;ZQWZG!Vh+LfO@#hImg$y#|qZ9LD
zf>&)d#hr)XfI-5VM%5zd;?KWHag
zPOpBDU2hpI3*3?P9rNmNQ(8#&f960%RZ&;3_PO1q6Nkjx*UlIVMyF((faoe@UA({+Y~Sv(#J
zND+`T<+Ra?gi3;Rb98D76=k_Bh$u*SKledoe3NNJk&zCg8Ee)^}LH
zOs%PlBNjVi{5<1Xm%Sj$3Z2Qqlu-Xf
z-v=5Gem?e%xW7qJbey{yveykUqej8N59M*`rpg?S8-hLxq=LH3$>n{W{V1@~^;m#K
zza65ud?c;3L9w2&a!-C@e{@L}HyP(vCcA9k@aK=^MK_(0XxH^}$Ood_xH!4dU!SDI
zy@bB7y}Kf@%+g6B-`LZYTFUuXC+$UJ&K!M}pH*Cp-)C>9OFUFnDAg@Bns>NFR+dJW
zHjGzkUHqdMPecIHQDsGY!{E?J79$mC#e(H$O?;)~spmJ%#>M3yi$X)#23_M}NsJYz
z!jvVvQi35Fj$^p@y@EG%8xgfpJhkXb5QF)NpwmBb_tZwo>wQn0>MAQwg-@fDuvN!A
zHoNl*<>zX6*kdnB#rOG&Q&?jY=nRiO`@N`ql($)HgcnYGaqzu;t#!s5Nf-lfLTv6W
zBmPf(qPY{!FAwfnQ*;uer%UOG%Be^y@i25)BWpM>Z#W@IeO1rkI1{K~Zc?g+?{QId_p}A$*9kq4?f}P@&>g#)H9M$d~aIU5{G0UTw6a7CgzzevgSyodM
zCaTJ`+FGN~u2X7Q7*C~?6As)A1U}XO1-@-h+$gyHm6HfHzNqDJ?Tp?~pHNe%@9oUH
zUwH)|WS&IMs;JL=D=FPNDT=!&d&guL=UUYOk;S_ZL6QQsDu(|z6dF;pNDPsUS9z2c
z3qe}6%oXusmhEgEDe)LD`!`=WttvgK#*@hj?082RB=-8?KotF}$cCqolKEmsXLYsL
z&Gs{js}HH-oRs8P7o+zBuZT_upj^D_cr&R;{k%ex9OR_sKP3K~wp^M-N#%q|3E<94
zV)<85(KCe7_iUO4gOtISv3&va-S$4@f!OB8Q;#95s&cu3MLn=#8Exf~s>AXnJ}8x<
zlnX_~4;Q7%0^eLSOO)~b$N=$35Zq{v=y!suMq*^Sw}4kK9j%mj&gaQEGH?5Dzf^GB
zsU!li>+F&bQB}to6(S?w2yscb6bG1bDy*V6K;C5$8W9lWad60u%HcQQw!w4ryAK_B
z^dJK#)V2|j&7-E%DHXNscxpKB6fLY_UwB!8FYP^jDEw0Hdx12xx;y71(?aJVxN2{a
z9X{CEWxImb&wBF_4g`CMv#TqD?2aNAlu=fm)S~Na@T;k-bJ*;E(%aX^y6a8S>+s&_
z4Mq1pX{jeA<{}W;m$GiSZZyA@LucR3F~@$&DbF7RhwuDXn^ttUT^-
z#;v0%jMCicco*s?MIBWkpmi5#*yYKgBL0BsbygYQYBwoSyAaXFD|y^XDO^^h(U3nf
zg+B=7@~R5^QR9?(A)af*I<6k+ikvGLZ9}BCcPRkQXFZ)uF&7wpfK#B@_H#>JUxD+Z
zw1A#8HHoBU=GO_082YHRcQ8M*nlWl^8EI<6+*_m<7Z}J<8I-NGk?hie|9EpCnZ^*8knSBPrwUxaBzA!6t&8$nUmlyw0QHxkc%*4
zRPMu*NM%(`>FI&2_)hy50xNVR;W^A{9p__O1ZEU8%|&{meUw#G>+EvLiaOpy!TO?jS|-{7@mtf6EWPKjsIhlW=cyxb=dDTDF1+L%0shD6G8w
z&c(WhHghNkujEHLT}sJ&hMpqj0v>OVcrPJzRC!Wk#l?(;+Ukm{9(bsrTG`mNOBaB^*U^Gf-SbO2-G7-5Y;iQ`
zL9AE+TyXd7;D|KVrl%wdUG*g2J)u?*8k)V(j)-o1|5h+4Zj?;2k_
z`(=W9p?-bTDa@kL>z3~=r(PGj(2;?V+eS#^oe_oc5+RHy!y}v^m+s=YsQsXKCoP@F
zJC>k8F*Y@YI&O5`T}Vu3&n!@iGps5XgI{dfF-Gj$SDM1I5?W2=p;~xj;_LE;Sn>BL
z`W@mPspE%l6k42P<%F(Xhyze%=&4s5a8s;IaNr1dsrr3;o(DQ8SQriDpl#34cx95%*2L`~2
z;Wbjr~>YyV(;Sa{8OoQ}#
zHfz@BKNYh$x@0PY9rTjgejD1t^v)Kfz?*f+rk$#KszqqgoR50xf+l6~^w96%kbW#^
zw;~TO%bSUY@&HM?yRITZvG?##~f1OWdy-~C~Rei2ALr3s5N>cg*6+PAb#xELtqJYTin
zzekhN+SE<@YW*M;gGfmRPpFP&9MWC;G*p~y)OAb))xQ_ZOJ{$w<+6NSR75@}R)bT4
z+9YnoT)~
zf1d&Xw+!Kq{4RlNCKq{7JGKoo%@E-$X;ikBQ-Ufid-0eUJ1ae5CE$`
zY^gwJq6+H!_H@AFtWVjemM&sdE6UpqtG@MpHjumI7bAQV6PUIUIiqLfbiz#5N0T(1{XpKj
zD$%I@npx|eu{_t|Ame(i>Qvc}uCfTKAX>=RSGAPTZ44BOp{lQauL1Oe`MC)MKSg1{
zI`)DdlhY|?h(8K7mi#bC^(2(?b@9M8d7~G3Xm>F)Hc`EhW?$3%sYH>76wp8DC$_!j{bJeDgwd<@j
zVVlo-W1XFxDu9Lw%v2xFmiPDfX@IuT!|D?QT!@A^N^WbHTdcm1I;`uL>UtU8u+V71
zI;B?TI6p3kVfxe1Ow@htDfaiXVhI;_-2Xju5MtE#RU&sbad`GZqtX@vzt*Tj^|E3(
za2CPyVk1FpNHtajHsTc+<|C
zVo0e$Ph@EeSzYJJfw1rM*xy2pOd5&Zb>4YXX3SW%?QOJ%@46>uiKMb)WU9*)*j@O#
zxYX$`x;|iz2E`M#aC(ruWX>z@2s5Xe#o!z_OZQ@l{1%YHkmSYp-b3z65)r+ATlA7$1(Sj>p?pO$S<=8F`$UcHr|dQ!~DSol>Pzc!a18AD2{|Ffe?
z@9PhP5_6a|2rgkDFI4Mo<=5_aA6}H_mGLPx)aBTDdvoizl$D|G6F+pWR*4sR?NIhb
z31tZX^O3trl{wQ7w)fh!aXt3B>*G8LhnO|N5pi^en$+uWYRc_tmBC!&2lU5(7U1$|
zv)yowEcg0jad*y7dRb3kp4!IWFg@@l;&||+@vYi#pnvRw-Itqd^pm7+d07u3M((9G
znorUqCiBJzI+F2^u9s2&7i4NN3_yJH+O3Mr-4!4tyZw>hb9141hHCBeK<)z~S@>?z
zj~}@}){lvask$ZyuCMQvWo1#>*)Ov`7_}pOUbptL(Lbhu+zyCpLUJR9YBOv84(0_Ub_H^7-`XAh`Gm;y``_yv%=|m%@hO&
zG9J6Ei-qDJKa9XoP50jEHDye!Ae@kZi8a-M1K%F{Dk92Ev;kX~$58b0D;*xHuilg(RrI@;$echa{R4|
z&9(;G*!_Ba_G6mU+i;lsXLj@Nw920+CO^;w0Sj6W{s
zr0M0nB*E&HP}{VdjUQAwxtbUD&C!d!(a))uF6~?zdc*?>Awnq?T?f3+rf%ehnc?BH
zn6I66S&Z3@Uaho5&V?m*U&X!fRf#@ofBP!&Rnp4vtwyZafvGeT%}2~UeCqYEK}>(z
zj?SM^ys
zb3#d@E}(-hQ=a3kOIUsH^5$?Qx7+^aBLxNdqv;b=TlvVtq;Z0q%c;y~KF5k=6v90N
zMNjDpl+q#3P5ClkzPQNaYS8*r)LFQdh{~UHP|K3;$*J_HH!h{NFj>9Dw+6w7e~m1Fzn23@y|?Epia-|&
zxMkw!=MNvYaD^`qZ*Ee1dwVb1_r2I#X-5dp2bZJ15Zz{%!wrgix8*OVk9{xRWsL>a
z7Z+pt0cP7ojnWs?5lA4q0m?nMt*z}s7f|{@rRy0*sLV~yuYo86logJ)@BASsDX`qpX{
z)RY{z%B)DhBmrEllQT1Yz|%O}U~hET47Lj*!UCWZ01J_CBDR=sofR=8<+k|@tO)}5
zH^;zdLk!39vn*@;zUL$3y(_EFxU38`ZeoNj1UO6#-P{`X!i$yG;91$(*=}A4xdX^Q
z=y7pz0vD@au*#Z&76R08gu`Sz&33!}Ch5LvbtnW}*rxSVO{r+A#j_n4bAc{OID>f#wlapP+IHX}qNQtq+qAx3OeHZef
zFocES{iNcZSki#Wfp=z@cz`&S=-afP&bo#^5mVi$DRST0{?t}og34$8lKjvt-cO9V
z0D7s?yVu$4~Tsm~a%}pluoKr>$!|Ow;Mo{Dn<|!0yOshwgUu9Qk@D;6SEhJOY$tW{@61w
zrJS5B$nRh&yX}f>T#_qE8CKHQRng0(l-k#JP02yhH?j_-XOU&Zmg7_;m~}=awcqY^
zs_5G@d{fHK?|g*fpa&_e`Y<78AusMn7@iwQ33Kl*ew_kI?LGD~nc&!=>~Iux!pjGR-9GDPZ@=ke17UXeLC`oo7|we$I+
zn=@@^TsCc&CT&Lv0x>LhV6%ntYFgWN>+Tv3%?=??B~Hf1#T`jg
zvjny_tM*A&uQjYiPk3wfG{y7hK)d`_R#v7d8vdw+6`MKlyk}K=$52WPe5NG{m*A?$Yuj65$3?f`@Ogv|z7yQ6HyA6qGga+Vw||?++duf7t_8vTri^CTQayc4U1vBDp8#
z<{CeJYEuEiC@_|&SL+oE-W+s+n>z#IHZ0X=gLGvTlqVe%$8>&~cP9l%)o0Zj!rn%g
z>xVpZR*7az5(4C`obWm(hZLquxx}x=1mWU^rDcXzUIA9iqN^X3b-QZv`!9uLh`vuB
z-6FljoMbcKe?>E!D2_&nhf0o7R1q(N^?v#E(M{IakhbLqAwHy1RRaqaV}m)O&@l1r
zxk_XIljPLFoZ5GIG4y@3nZy_$pB2h6M$$Z3kHIYIwieHQw7yIo*Eo5zrxIrLro5^{
zU3nomd%_3-Thy-`U$V5qmq+UGTo+xgI(&_~$Zz^8C&|4=wS+QcHmn1}mK>(ztE_kI%}tg&>E6;^Ze8Bcs4dwmFM
z`%9AaoG!W)SVGsJj+^NMPteNZ`ciQ1TEyZ!X3mi!&%E;DQN=z<8g=6n(j)6-iF&RY
zkKRYNpcZ_8p(iNFZ*8ybAaDH?bSh|a^VphmU~l|l`8DIxOS1<2QnX0G#BAJtKR4?0
z@TmnlV!`{i-`~Q4YKi^$F>#vN-R;V~AQEuWIoaQWu7oHR#-60Mll*nlwRYJwYaw7^
zT6BkIfWS;l5wMogmTfS(V&@5r4VuitA;jdgn
z&V6_h8w&Uj0TmLua({PT?UQD}ai*^4ZanTiGc)4|1Y+Q-lLLhla0po~H6;LOw9XDt
z7)S<_i;DuRel`{sjJN@Ab@D&%cQfzI)jr|CTMp6|RE;-!BYTLS4Gm;=9)lJAI8#+C
zGU(D@l8HROiprnjfc+I=*1M)n*}-Nb7~!#2`w_OxFQ^
z+_oe-qQjyhH60&g!6bcmG~n^tbi{m#`6}9yT8ut0`LoH@%3-?tlXYs=`;m3YR+jrp
zn!!Hdzw4gJbu?ck2yMxeuv>Qy7jlK3$WlE(U)y?glp+@A^0IL~?6s5%Z9jiwJ#=Q8
z9#eie|C`1j6@K`qKI;+!J6EwJpSfQWO04ks^9u%@=z6WtB@M6f
zv7q3I5ugeo6uan+2A;An8zzSVokxIe;c=(^PB(BVq$E-JmA2y58wag7YjY9JM4CBr
z?g1S*g?P9LR3Tg6pV*L$%5OkTE2_<@q8`r)C>K5T6}o(A8B?yXl4P3%-XiC&dXo+%6)Gwk^Z*d-=Wpp-(1)uTr9U|
z3;vy)dTTB|HyacWp0>ZfJs+J8x0IS9eybc9k#Y{pn3O=W=z}K|?8jheJqA^
zQZjOKmZ*caZ(Ey+nb(U@x9t?u*;-5W%DQX0+Uwq<&FYl;2UFNSt40h#8$22^pXP`l
z@Qo|-!Niguk@icqu;hU)9{VHIxLs@v|8y?OlxPWpdMk_dx4eDtG>O_CMn*%?0lx`<
zx-Er!BNk_6%-|)N%SF$CQA3G=$(2Vh{k|qvq?vxBhtVp7=qQXon4@lgZb!Fg7#K=?(v0a0
z51^88ZFg5>On58K^xVEp8BHMk8t&uQHqXUR6HePpTGP>|}w&bBavba+$#V_8P&erc@XlE)TO4_!kSQhGE-;^YT!3?fD!WNfLy
z3&ON6q9zX$=Ts0NdI>YJ^G<4kzKj>c!df9V8=}f^sSFH9#jlhpem(t4^(h_3vkV!I
zI2Wuh1?IZX-soM=idXJo%)1_vq3>@$X~QBBc!6hz9QBUTlZ1iNP?8?|j@-{~<|y$k
z_6ELjfTfWt@I@&j{FaNp_qT`z$GY&;8R1eB{kDynv;rJDz(fPG_8ZU^B`=Qy0Q#{7
zE^ro00f;dwbj|cw=zC4Zd
z5@S#4uA(IW2)J2hQ7mb3b4DuU`LvgUT=H+@UUwU!VmzTlnmJz~M{Z{0s|Vpy36;d8
z%6)f5Tj
zP*Esm@O|FJz__1$nwO1@i!?PS0S7}=rdx)yGYjc5f!#1+KxmEFED34P!JD-Ev-<`N
zVX-MpJ7+&!RDYjl-in6;(S^o`(?!a81${uUx}hg#UbMRnDJ(9|qKe5X
zD}!v%DL8yS;3gxz_)id|Sz8XoDovOc`(j>6hSi!F#M!wONbr`NLSAD}
zB`J}fRz`bA0RPqZ@7vt`+J)10h|KOD-sK+;@wwW?vLWAa%|*2E0S_4Hut4|&pOKU6
zIZ;-BXjH?r+=U9?^CHdYdF}@c1+G}nR;;Dt_Hxxf-eC77qX{gqE9Q
zI8E2!%YEszO=3xlD^YNUXP7mkD-hn
z(_`g
zq5}eJiG_5!6k1ZveLVe|WvxnI2|6EB?ojhV#f@V}<9HkUykYX?xA-ZO#q?TwPw&src5i=|&ixc;CvCV<8W&oO^
zN1WuUtg)L)htZ7<*)OU*MN>rh3}hJN7UNisp{lPH`XUpZ#v)t
zRfd_Z_m@>0e;Q_LAFya>Lm!uWzCb5(B%f_Ox*nYIxujxQ(C|)#YxhfG4mG)S7?Wzt
zO|@(U+HMYP9q(!P&(?pd^kG_k0hPBKaf+M!8MF#3A_2L
zXlpX``Q!!K@yenbG8n-b^Jk>==|}o|rDTWrY~0*^wzt!^<2LpBA6X~$86~)8UbWeB
zQ~dd_7XYr~bGhYp6qorKP>VO~tas~2NA)dR%t$v^<@ZEzd+ra3oghzGDP*1RWr@sL^J3u%YQ36hs+VqzbkQg^-7RKuft?jGYe6f^j-sp`)sYn+isWC3o;al|x8ozDZ1
zs2;PaR+r_m<-M0SM>98}mkmc#D_`ok=;l%STCABAAD{n|w*n)lP@$A4NAR(*1;JZB
zw~rw>1({u`Tm{)Ry;v4m4@$;;Et3nE4Ct3!kmc$x-XZ7J
zzGryorjI9e_hRYZ>}K_8v<^6V5<0I=VBw@k&FK64A_4-^e)z&j~Yzq&85`kRnZ;jUnP@2_A-C11%uJ~+H~sfMC{+&MKu
z-r%xzzs705!lP)n=?`Hb$-k-a4hl7gxufb3vGlH*QsYTuY{fpi~aNT(A9mN%%
z?^S_sQ_kI06Rd1wgde`S>kCAI(Rls0SyMw}6*S$z!ZxgyuJ#{NBhV(?=e)TQw5aid?`6=`(bRYYd$pt_eji}QBg(Luflei6+6gqlBL`*za
zYe|e?&h*DFR}O-?nY87-|2k&oF6<))={w_{#MA-4`QSU@=+9W3-#0fmuEXEmCexO?
zakv*dY;SsP?e=SD$j`BLeQ!^P7JRljGS~Wi_WBw?sT)<@{l|W-jw~F3f1fZ6gvv
zYwLLI=C_=;!gZI=8b_y&4}U)P`G$xxq&5xyfTbv{K)MD{_!nqYy~`2GOeCpgZsoZc
zRBx-=Lt`f5cHHiZ_lbXkmOIXfaNn*{#*b
z!Jlc!v-h&~+XP?UH~KCA3W4hC;hWBHzvFtwQS3xpKeEjoYV4uyEz?}gtFMliYkLU3
z-xVmSmNW1BZECam$sk$Y^@-^tC5szMs^VJdIAj@<^wvwJV9h~8+r;Hg6lwPE$BZux
zUpS>&&+pTY@LnDI)zCk~R<3%$vA68~4U5H1(AKN#QsDn%>MPuueB-^9F6kKEF+xDX
z(H$ctL`o1w_vp^ijg)kQlr%_pOSjSu(%tX;z2{ul`2*nEc0b>^KlNPoZpy1|$nhyy
z=dw6xLat$VDBn!49`MlBzEv4m~a+7$bUkxs2HhVAfojP7a*?uQ^-T9^aqS{)CDo*Z0^X1+bHx
z-ElZX1l;L3@B94tpr};~ADOG|P|v>RL(b2LokuSYYfW{RhW8idO*<}|Hs2y{hc1{^
zQmLMU2X0xV!aYCQ2<~ikjJARWq{!<|TyOxby%_-`;E}A;Ge1>}|
zhwWdk?Wr4AnMKdg$HZQ|MW6OnIkU}&TCU-&oS4zmg`2|D(^N`(-J?V;13Q)b+G{Gm
zM=1Ocn=^Kr;Wk;gx}!DEW9Fau4qjjJYElJXJEu-e;gKwT=a9qae;`@5!z#hACwO7U
zrrWoFgAcrPII^4q_tzvpoOEQvp^~J#yUl6*Y_BLx&ephpfgFEaf7|~1sPOXLZ%Pqn
zP76|8zPE2V;%{Dlo6c`5IOcgElzH*5QNgVZRB!`?xt1_0Jp6+1UtO!ZA1ShnNd4Oe
zE<~m5;Z9wTwE)yxIF|UdeYd*r+ywW=s@@{3!}%pprb?*LaZ^ybU2}LOXM1YL^I?Y>
zT)o7W^&KKGS;I3f>~!Z=<-7t9e1R^z%vTJ`yXR)B!dsKx-Jf02#*FAH=e8ex&P^xe
zmC{Y6@!D#FQB{D$Du1}Hkiqm@ZlTE@g9kbsRZf=brs>v5vlF9bQ%DSyC7i5`)wrND
zGsFJ6jhscIS>FqS!AzRullwou`QZXnllOAu`*n?U;ZzIy#HUrpN9OJuR!^MEt3wRQ
zV6V0JiXU*q#I-5W$Zw>0dj=#@jLt60=O0HCI3j)v^)?viFx
z4JcR8j-hxT?n^1f&MwqJDSwSIWT}5laRgev|6P}6>%A(c7!7=P-{z6PF6Tj2uM0bQ
zaBmX@r+q2xLJk?kv1}A{*G(`XHPB=paU@(z6i21%VdzIM(d`w`ukO??3y{WRMxP_x
z@<0H=ncLTAxX1V1o-1cxnVr--c=f9qUU!nu@h$jXoaul!>krDF1*YpsoLh^{Rc+K7
zG+!PYRox6v7p^(Yn9G-+ciOug74EBTvdRydUY;1sejew29H=myue|m+jtpd!jX(I>
z12^isk9)23{4;1d3&Pa(n
zK`s-O8KE5c*TQePyx4_E6u4h(v(nYHN(U(
ze4*WFhgh5q71SP>ZxHSNGSQPL&~Dg+Db>%!thUsn8wo@$&&=AWQRJkLpo^%HUJKhy
z7575&CF&bLhY62x^|P<4s@}nT{d0!3(0WR&BvWw0zhcq*+z;U%vMX3ee5IG-Ll$#j%+wG
z!24oC-$#}QmK2m8wHh+;!0Cl^OtX)2DE_=(a9}N?ue5
z5`bVNU-5X7-XYKz;G<>HIq8b8YY^zI_Is_}jE@?1dJvmSge+m_REgxcMD8bJ`S+wX
zsivUq5*b<gV2jd*AhACbw8+Tm9R>FDQj#V?i#ra5sI6&8Is@%tMWD@l8}`5gYJr
z7zt!Fd<#E0*(i4xy1G!JK(pn1-ele%!WsD=)Djt=7SsV2{E{V@5#2`76*i?XDb~1#
z2$sN|-g}5fSFWxgz{vI}B{91w|uD
z2~8oqn}h|5=vUd%{}V{@#8nutH8!%k#{Tq+Z>1O{$%q^)opKK+Pe*6kb*
z2;F$Iqicdz%#oxL6o!vcNI8@k~
zkdyH5JpUV$d5OX;!{2)$iJQPaBCi4H6$lEpjjB&m%7A)qb64<<36$5*WvI9}JV3KHow8B4DOCUxteS+*6ad>`
zZ2}9L!<;%m!ay*h8*c!)o+XC`Bax8T3XgRp+)1%`y8m^(Cn6VNe^nLNIE}IZWcpBt
z&xdT0Yez3uM9Iy}kjN*Q49K@x0qvIkT7C8rrj~iv=TQzUnbl=cTl_-4JAoE5g!|b<
zz9RKNX}Yu#EzrMJ9^|}7bxr&^|JxjprN-6I~Xhp+pNvaM)
zC9(tdo{8+Ll8l?=Ka}M&^T$Q-nJC4Xp`I1wsZsav_|!DTR+9!j(~arCCRmO%(h|OX
z(=pp6!37i$fYS5ZUIp}nP$lD2LV|w^{0*5#%R~Flpbi!ur$_6_zZy@@V;_tRY?U8M
zc>Uy?^6gkSvW=>LOL_@EVG~^r18K^*0#ZFVG!i&*!~L_*gcK0ThA~Cg2f&=mHRUre
zuhw4Iz&F|gxx~0hxig+$EsHcL$vjSwR_w0cvM3UNH2htE8%XZYv1s8_?75o~=qHHl
z)U_md;=NfK?=KeYQCao8DfYY>UnLsyUs)O%8*e0*eg|-GXl|X4a8ipf(;K*4ZUj#G
z&wU7nh=hOx)L*xOn8z`31K$C`ib2&$)UL##R{PH^ABi5EU*GZA_SFe)P%5`hJ;jn;G=uJi>h7)K6|q<~(B!l7*+Tng;qy@{V$&h_
zjGCv#D0=AFA1-7hT{bJD-pgK)-k4~%*Rh|!JK*A$;4@s+pqgyTu#a0Kmjk6TZ?Ek8
z`+UVd=$5N%-~UspY^bAN+F`O9<{oYk5LbPi}xp5|-zs4I*gYfJZYqSO*E@1qpXjpmnU4p(YOq|1?v#^ZU?;oSs@
zE7h)^&vhvoxzPIge@Mpj^ha_P_pHLV8)A-HyG?sns4F!wZmqr>O=-s@BZi{-J-F_c@#SQ~66hwMeF)kLxaI{1}ecG2_qun5|q}4TnD)-2V+(*^H?f`e;7v
zAerIeH88qd7PB_0?;akKC_?0%Xnz1V<1nU<+mcxt!L&AH0g*#zM&@A`-5dcI-_8;fVH(_RPo53*Zhh5=8Qz;-yt4p!o2EuXJp|
zpwrkb>oewO7|gRGgd3#-(MnA|xe#DoJ@+Nr_(#Up4e^Tf#lF2`yjz^&r-~hpP4d0B
zy??}7Li@^lbK-QT?CQe~XszFuo9<@mg!E(#WG{e=b<9CZdsoEVb889aAM6H=J7jd6
zX>HaHcIlSydhc*IboB3e2_u_q#MCe2OM~m**nq%w&scLvLW>=KoKV4Dt5}-ED36dF
z{{F{D1l=GW#{MmqecEzyd8z%iwcH+f9zbM?mK2(V;9p4pQo(-QvvErKsQfg*I=2`x
zO$~O=3sDQZV%4i(yrboj3E*M0JeL18d1;3;Ro4lKJoiaFp{I0LP)kEl)vP~30Mjn9
zH?a+Y5aZn(mTFQBy%Wr@6nhYbNbY)mP;WhbD&MS$=5T!)E)*8gaCxNqVaarmR5-WP
z(9t?>ztWLHEl}2TU+vE<`e+8<*GHm9;jl5Y@Hpqw9na&;-Pdwz%-iMcl~KQDD@3qS
z$~gDvQOB*W?{;L%?Yy~LlMN)&?X&Q&aec17=RPap!==}JhV2Zyq-Sks`bIu*6;TZA
z3LOxB_-}**;138;xs5Npm`@#{vhycCZRzD6O9~oi-kc46p|nCYHbQxI-2jtitSZ5r
z)vYc!g_iFiNz~!ktTY%~C(h_yWmN
zq3T0oh+W#3*KFdc88)CCc3Ln;zczcIks1w5TrptCv74rVH|B%8bgpWa*_J?Jp-gB_
zo8VJ_?3ZoQ!9_Bf*f|4g`zz;C$EIvVZ#knoMZae(9MP6RPQ#yB9A76LZ%%zMNpX&j
z&jwb7Yr63d12Q8YA3v))Uc}mL1=hR3=fr3q@;_+i%FT8+9H-+Bw!3&*hI5uOpFKc6
z|30~*ZfC>zj)D_e>{KZB)LP6_P|Rb6AwBkIa(A7>g2T{<7;Jnt>
zsH&P^jF23zp@~7&{uI8iTOr#bf}(7rE%9(IxfXND7b3DGWFOG}7=HK$rm}
z=eO@0Ys?2+y*p&9){Q-{oY38T;#+;+mhoqf+@9*0V0sBSTD*613gwcA@%Wd}Z5=_Q
zstfjUAy`?(L&FKeBMxI~i8N#GoExLrv>A6VO*;rf`Ka$}od&SzmN9$GUnBO><)unV
z6RfGOos+E;+RMeFhE@29b6@aopT3d|z({j%-%#Go3?O>b{#+k=Dub=fPBmQi@y$TF
z41#%<=-7VxJVCM46azhmRUNr<-UR&!KK3|;1W?VxnC*=u9iFtV!~MQ_UJ(BS&QWnn
zFN5Xq=qS5KR&G7rCH>MB8RLg7U@&;FoFPp2i|s7hFN;N;jQV??}&J#G!i@TRCU@tub5)#>vgi4aJvpIanD{+w%N(n}QcKRDr%;(gH`U
zM9`6~a6Sr0pt|Vrdw3oTd{NV_P_e}FNX84nQ`0QgDqD9T^pH(p@bA-Tu31kA
zbO(Gb1nOiNWfrnH;^MP)qQPTYLcAK(=VKH2R^_YL`0Lp*z^u+>^eL+YWjWl=ud!nN2eXG3GlE#tq>_a!
z7(otj3GkRvCd9XWMEiYrGrpc6KH`y9X!RNJ+-UR*7mXqW#>Rpvp=l2f)Hs~8F}AUK
z(;9_q&~>I`QiA66dx`Tyy+VH#xXtl0+>IMqeH
z(-;s%_J#+70oNoXRRz4C`vIN&A3Xh<5<1!<|KPBiG5*b*-&N57)>IIW<MA
z0Bm?*m~+ISA%)pQ1PNvLfkH=!;W<``=56GYmOb>U@AMaXtLDBGK}pvad|Mqb3n#;V
zR@4Pi(6V3fgrfDlTE!7Z%P65#&c4Q*L>}BZondD!YDa#GzeoNFO3wxu|lZ>LalS3JFY^1K5+e#+2o~vi8R1=mXjb6HB{TWe>
z&rWdzK?6fD6P$QjC7(z=LD?(6Tf)a2X8_|P{Z#^Pna!mTf
zMin8&W%uh0s@44;2nCid&U(-`#XSYelplN5s8E
zXDUtwU7|6_?fv>ocXT2X`#B`NHADU_?)Vse`D;Q@WL7)ucyo0vz9M!iEtTm~aE%C%
z9xy>z)b-PTLAqR5*q26voVwV`Uqap?k??01Ap5ME_=4wxP$!%?uoX#F-001eKzRw>
zyoV8sdeEh$x0o^4-`b1CVj!GEfP*VPnj7ZH>iUb})Rc*@Q2Uud?7F*
zu`gH(3snkM=H0Z}M{BBs#{jb4kLsx?OwU_%^XY-ja!GZ1a-
zQ^ahuT9veIe{r^8C+u8S`vcv!fL^=-ABw>K1pw6Vm1H3PnxfJ=efnn8=he>HPn!{O
z(%GR`s0+QuJ-zx}Tkz_&p<%%cDUcPzgkCao!s-e#kMgw>V$F9GMRJmEW=b;nN7V~&cpwHgEI
zTr#X(E`Rom9k=Xy+?cU#04@e?E-!NA6Vi|rfyp_2cxN7b#VTmaltiO_p3sp_As=36iGB;WCDn-Cf8byA-%R6
zBO)W(GQ!0^?m0c0aLz^E?ncZLcZbJ3&kqF&z74ONc!zW4azGg(
z91>7xD#CXPZ~~XoyAWuUON{P^;zhEa8}GW}iL8DM&{32!g@8lJ!CK~ceu$J3Yi4@xZ~hsRcY~zB##E#=X~Qev
zxW#4pu7Dj$y%406Uq_{m1+A23#j!>P$-Rpjnx-vso;h@xdMC#s^Nm5?+G5svOJ?ZL
zUkb~NP2|_@yOSqZLl;>4ee4G^>>=;QCneRXtvBbbSdKo{U-W;ekXp)MAw>H&ZFN5y
z`}E3b!$sPRSr9HHsg>&G5;voAxV{l)Hna`2>g~wTa8Vik_3P}PatqGQVwR}4bZ8)X
z+S$e2wC;+)$>nWRMVC2x0{}f#WP@S7WXXl9wpXBYBSRofy%;)2S5P5{AgD9@RYzPK
zR{-HP60_0cf1_>#{xSqcN!Ih4-n0bIHkFFWyyfF6vm?(u&vdQ`z%_|>Mz(~!M8%?{
zRrXmp*s9+D?(XdvxB%BZAXF2BnL^hxY6$2!SW(C;pa4XH`m2)xk-!OYBm%nDN;(O}
z+*fd%sT#YT8m=2`*OF(S!AVLBGfKyAwdOb?wJJOq+aX|n)wKEHkK`b+
zf>lBBS)H+wQH+r=Xi1r}N+5Fx{v9wpP%7`qIfHvKWYQ9y&3A_s6>(*?QJF>n?4#@G
zaRd-F%D6WgQPuKg;nNADx(C0Q8rV<`3O<1L_lH^j#9%
z)`rQ3Ermr!?sjc+Rs!!B`jSZJZpV2J4FJWZIb_gtP|{8>yaxa2612j$-P$iQ%i*ap
zBb*(N@Kg&pvs&$PI7c+{PdKNRc@_Dun`R(UjBMOn>^`!O;Hketp;b_ToS9%+zfmCE
zGL6x4VW4Wt_ll$l6<@p1&wVc4-1Bi9&@a+9ST7~P8ED2uGq`Frfv=)JDCe?WQDm~z
z0Fk?)FtxCr+{DlcD%R8RP(+k7Iv1?fnN@n>
z_Og)A!rb{LpxkrFCNz;r4&OMiwq(T5%wbDSzZ!9W#lO^gv?_EY^ozQ)4`NjsT~=3;
z4-&@LUam&+k8te(d(+Dam6UT){FRIQ*Wz{J0~QENO-A1_ow`3R8O+%Dir3SMD9HQ<
zq7mvu3@oj4XZ9J!Kk$wIs9`I3gr2|v*%`zu|H?biKk3_qOHmzhLCC
zAW=vF)*jM4^ZU8jSd?^T*VukJEDQh;hX*cnw`7g-XO)&R!p~r}T3+7PhDzx2hOd%e
zpsNPYdn+QGLlh!ONV8!lnV+UJK=D-u8*p{|`*Gom(FjwxEO{LIW)z;PNVWne#eED1
z?yMmHoqNp3u%y9j;pl2&TJJh+U?giBrcw3VfeUFvqx9;`E+HS_1U&8-)6cXv
zyfIWAxQ!V+|UTZB;4FPTDCBg<(}+YKNB;ceJ@!31(Y?ozQ{fy_u-L+pm(_e}#t6CGJKVi08S
zR@7jj^TH>pc65xQpHSe+EYj6H5Z=}EN+gPBi-?i2rfJpXL(cqzLa(RE+1Vp)X_3@f
z|3xrqhj!EHhf8h1^0hFl=hVrhEK6$&Y0x?YYq1N7rbsLbzt11Sr(k#IKw2Ps1fZ9@
zU%Q+etCr%BT_<79gX)S)8kGvNNm;k6RwOWmb-GKHXOj_&tNgb&eZAAC-0IlKVXthI
zx*R&UWi=)Oq5n;F2>oJD@?DtKo|F1Jd;NZ@(PUT3r?{;b7>60{Gnwg$k!Brf^+9nQ
z!4En9;vDXfeQKnT3GE;k?n*DQfZst%)`S-qsogzE3G$fbPH4a?72_bmt}QDwf~qHq
zZ7lg(7h(w)i^Ubev{%z48vm*Tp4TY=
zC+Q6vs{iNY@h}Pfd$c2qt-X((UZP*l4WF4$o_~_x!d;ae+AX<3^$3h?sv4W`64ByH
zOeui|1cFl(Pw{e`+OHAZqN+rf6n6JsIBj`0h=QC_7fFXRRUxi61Amvm9u8u5zQpq<
zkPEC~{5sj0gTZQSE6+Cvb<>mST21d`c%1=1&>>O@2)WeKQd**qv`VdB)V5@TJ0C8px>6
z_Y96eoo}
z5?M@ZG+;Zre8|q*>Qtfvl1o
zu-*$1;qjhtrHa>FRB_mSY7ZQ-XJeqJu2*h#v}EFDQ>Eyi;x|39X^|w>G_ZJ-_9ueu
z)d=LmEOV{Wb3FL>9mIZB2ccQ-jZ+!733ey_z`0G1@o+R(vJ*o44&4_vw^=)W>uCX
zn%q8vHdsg`YS?V?X3zHkqJecE(tPINp%8cFziF*v$olrr%ns@}`R>8{9~&gk$Aw@X
zs%o6K4=et?Er~6eQ|5gmAYEdtG-;&
z)xwCd#|&08xMf4$rEp!PstKiHc)1Ts6v~o)bMKyKHOmYIe6npUe;LM~G?c#d3iD}M
zCpRp9Wqd?CUpe44&3G$z`Qvn40H>A2a_hck4Hp!}A%RBc7r~-0#gSS>Ro;E0OVfK^
zM!(I<@z((jQ(cqCkdmk;q0kXM)B$_{6;d~PVp$DDv%X|X2iw8SRytv{&m}nv`Ija+S)0o=Tgs49Lfl0wYM>+
zT)J==Qx}}*k&+IQBjx8eHu2B6HzI==nq3reTyDPpb$f~N2mG*Dxt+*#>^s*`#$
zIZk|6P5ozFDjMCD(#j|}_DCN?zJQwU4<6am_$Kx;c(sV@+U(DQXZb$CTkqh8J64}b
zW;UbcG!*bO*Yq3lkkqLSL=~2(%KW5(st;jJid1gjf8zgG+)Ahp%x=j6f8JhL5hfSp
zd%MInwt6BqSXy5OtO+deUw`!X;B4PeSODU_`mizpx1ii@N?&HWtqj4lhQ0q>XFqN+8&>AF9pF}4O$rTv>b
z>Ele^zg2M!T=?=9J;9+b#(Pme1kIxG57jAR1`Ye94~4a2UuZV#=Lv2(5~oTEH3>h3
zJDBF;ZxNe*KTxb@L7f5=II(;(wUysxt>&P>2&h6?k9q5c&B%(!5iH|yxe6$#kZz5`
zUKbaD>p|gpWy%Zb$Gh36s;8QdggkezGvd&y6KPM647J^!2S=PVaveXoWB=F@+x_(1
zJDy+$%qsr2r^LNNTHG?Ry|#|XFoavd0CzV=!J|^CVeVN>;Lwk@Z{gTiy>5$wWj{?}
z4!cbf)!W=<@J|Lmf5z-LOx)m?}vNFGT;=U317&Zl#2zVgPM+tShwz^&^^IF;U$(HkU
z$Z~)B8P(86+C#Y20*Y33oZII%wVcE-GdKH|or$ANAuceiLxuhGK0VZZ?qVLgS^4!Y
z_E1H3A6un?Cb_B!)2t!yety&KS-{l+)$!o_tutoYXOme!<6(M>%-pZAfea{FM@I0KZ9=&mqtHyAA_ZpN&*&{LJ327$chboSMoz)}fk>5PX+e71D+|?e=;4Z~
z??ZN_#MrM~A(3I(^^5_5Nb<7T@JqsN|JHXPYDiT6w55huBp;D-5`;_qk%h_AsTy?s60&O@A6P5%J3OS&E@?kmJqg#0
zd^x*&@S9LQt?GQ~;yJ#srXJ*m{;hw=Z8!5fi!}VVn6}Q(e}-#rW5aX~LoaGunUxf*
z2!x}wY5hd&?iOiK3cu~tj2L!9sQk!Mo;vKeYqDQVQS#j!7suOP72+4X;jnPk(gcxU*mZlIV(&_9;5I?k{be?~Tgy(}#Txi7MeAa{
zVtbI8El=>+^6??=%nksgW0w7EBM}9pDXJ3BFl0@2s8(~d(-g;_C4sMgI*$nRBtl$c
zdD4W0S1qmM=rO3t4~m=ohiJP#AMCwEbdUHIBQXhO0;>7IbZY8!3^Oixf;1U2@50=R{2?weoX$!AiK`(F*bMimQ(P=`T530Xm1axP`&)Z
zf>I+r%}7lRD3d%I7mh!jXn`JXV{w3QL}pAoQB9YeRmyrMsh)_Obj2RUK%b85A*J{U
z%^tAjgYg}AivFvIV}6bf!7+dvtWv0iH!6
z4D-6$>j}SzRy~!1LDIhwxd)J!A*g#==^0sd1&@OwW4-^1v|bml^pgASQ#uRbiCv{d
z0?R_Qi_sXu2{+PO6m%v&Xft2d-pffjq85x0cvMy#BK46-DgA_1eRJBrNv;)ql-{!m2{SB@3_ayx(33~!{4}`8n<(tf-67+CSCO1L5
zhNAsY<1bitKlyc(euxy$*^aO}>K{Q5y5x_u>CdEIL6Q06K}ZEengWy$z8>0~indAb
zfT_N9zP(v@naPvL)k^9?kD(3>{2QnKVV)11fFxB_E>*m2HlFpFkTKD;%aI$2+JIa}
zu-uixH>%}BlvWZD`Ldu
z*P(ZBH?}!H*n4tRE`ZYjJH_+Uwu*wwRGC_#uEc*v+FtE)nCeW~%0vy*8s07O*&H6{
zH~M0}KT!fCqc0%Oe6|Fs2
zG_N5?UumyPb=FhnrBOwXm6XW{PUUXGd`9Roe#;ypQChZ=NO!}3U+bp(nB_lBC$BE_
z8gmPn67Ve3kgnRe*|9LI_-fm*!3mRKI^Ef@)^cccUo+W0a$s633s;`hw&e5Ak(41p
z^fr}r;UD`2crKsSJ|P5k<4t}{>yrqGyEcev_UaE;$YFB7J(UMBt-y^tf9QYuj_9p)
z|M~Mrs!1+ZG$VEO5lIY5d*~;2Y(ZBA5t&9FT_}_&$riK=WxiPNctrF)CrY{KH@sI?
zS*i;#7rmf}f?5Ep=klp!9zA6|FtHot&>cVOX{M)s|e&x84
z(h)INX7GSpx`5AVRnDRF-i2z{?#a?~h}eclJXWu^PeJHLyMh=S=7eV6^YSoJ$>BrO
zsSYaG1YD8AsyZX0j{0r40}A9IHE*@%qH&JaHeGugb3Wx}!=)D^D?j48k@UdU@#L)$sT|)DdnTGIu
zKfgd;>#s{#LhopI-bH{*)4nef(~av~<3&$0WKNT^ua2!FhFYY>b#b&J)|1#f;i99(
z-k=zTx^qGsn7+&Ls%$6)k>F`)ho-zf^h
znP$1`xlII(&FmsH<#I%a@OA;
z`1s=EOe`hKiJ}E0_ZcN+E{iAvDx<`a~0`%HOLCA
zYrT$g&k|Md#2nd&WYO}pzmneui9HLgysUH>KPT4e=(38tH(NKHSJ3HTrP(9k87Em>qQYJ#w9OUfK1{Zu@=H?WQo42dR7p)bIJ>4xPWT(b(KT$kj2
zb0`zD?#6QA;;xkg-#5*A%S>LI2C*sF*v9qe8VAD}EmRkMnt^cSBbJTgqPHepSht=~
z@|94QE_llwp7bxDrB|0&53UZ(WO5eN$bc?=qtFo^&Fw+8OiOF7oYaM6O3_XT=5q51
zPK}O@;>n-~b-i<;G%hS5r*(4gbi5sa^c19qPm}=&W
z=1n6qf6s*MR%QzOiWX58(-2O1!T3PC@hY{szs&?Kb6<8N@NqxoGb0r3T#?W>pb0;HTNwFbOPbw)8$e7U}qSX^@+G{Kn%ynwoZ7i7<lv7P2>TJdBh9uW~_7=VtJdGvlgWVtbP@}wQnW%|v`bMD8o4(g21ZXv;W-{^IrRi)j}
zelVNH*b9F0)|-s#Yn^iC_xUz&-i|7>#WX-66uemmoW@5(lq!NgY4z#Fso$*gT?T1T
z`t5EvwJVdd+r(E29q`P6pZ@fBkQ@Kirkca6iI&6kITuRNZr-NJPsf@1OPy2EBTE%d
zLq=W(L>~jp$%+^(_y{+?M-^DOjQ?si2ko<%ksAQs67IeUT7NfJ?=U9Gs@>s>$W!a2
zbY;d}l*@vds9qv(RDq0SIVY(QfE!qYHC(NZ*RKu3RZ>;{YDVhvanhE}nZmIun%n(Q
zepfvtA8$F?yr(%@9$4)X?UdY4w!zTH|HeZ!@4b*zvS}|A3yrQYEHRHoak$j@LljHD
zf)nuo=tSE2^9VOO!B8i5kBNjT4I^KE}h*|^}-7hz(#BVlNA_0Qi$g~&(jxyupS
z`Uc&z^v(r%-5PuXBO_V9hB+=Fad_Wjz)5oV5e%~ya$Nk=81^El#BA4*7=?T~rQAMT
z#H|0T1u%fq^efAd|&dr`#g9BZ@hOCp`!4;m!iX>nVL!XK6MBwR{qbgJ1ucSMWs!(
z<{<9mY>y)u@Ab&>4L_gLp4P2GlC%7A0qWgl`UQfA<9MBwmb3C43bC1h7>4YJM*H(?
zx7PH$gn|kzdfuxXacdck8Q}*3)%B?924gH1ff&0q;X^T-Dz}xcwfR-0I&0>;Ad|F<
zFB0eyK%${OPS9~LS?6tpOU03mUAEj`i6%jq&Rw7CttG%Q5
zZ*f95eKJ!hnmw7lp4Zda{q~8KI%KVa$zpW}&!XInP=q;F21z!QAN6=PdRelCJ*(}G
zP&VqMT9%pSxMcyQrBq&;^2EfH@e!%tzR9qLCm_(8IOMrzJv)3WvaD(_;w3X4P}7Eo
zKt}a>sTxF8jct@OCUgmNXyH|;kOL6}#%iz%8FU9&7^~~icP!`PJ_%GSRLvQaj|3F9
z0W>xl;|
zfpGu5rfIkfRW4s~miN1GySQ%kUBtOxqt!-cX(C>()12(0R+}8(C;GPA8lZvYt
z3-Nemxd5IG<;vP(U%9OCi9-aEY%Q$h3VIg4L8)l*)hP*N$e4Yrz8%UTKAR5EaSD3H
zWP1#wR6hQ(bA3yd?Oubf!v0TR5b>!sFxKyJ^Pl}zAKFJm(04D<3)5G12<+bc9=;J(
z>N*IRB7Qvh?Hf||ym_I2cZ>slzT4#%dKxU4!;b8s4a4>Adm}s=O}tG9PBY4oJxsLj
zs84BZe^Ooakdt}m^G?^2Zu!HGBJWdA>t5gZc7)-Jz}#4UcTl=R*I(PfP-NE=JCu%R
zv#rWZLS{g94D40K&|5#u{XR}_`G);gh~kNNku%N_1&D{AzD6Fs{nElzVAmtPX|H~_
z%5~(?9UOasts&GZQY!uMh18e#<^d4q^&u8nj>uli<&e8T+zL5fx?Y|CvonfyR!wv!
z^%KT}=()>ifT<>-S@pL2?{2Rp*6sZv{z{-5w08%myO^)*rYA)CcN{C7vPRupS8fYC
zFcM!_Iv#^#t!oM2Ki;6C
zF8BjJ-`JeW*Tf>rILJHwBxIM5>hnaAk(gM?4_kL(?>Hs5^wZw)@Q~{^mEi-C!3p?s
zWN`A;a_Jus5Zl&r14D=*oH1kMrmHi@LGfwU(6|g05L5;`r=Hjy;1?1afDTG?I$EWd
z4sseqGj(ktReq>Xllc8}Xt`%n;wti6dB1ccGi#BR#8-1Pev@RM#2fZ3z0(igcN%ulBE-|LTnD)Y^%t{;5ko
ze7;c0P*Hp5&QTC=W+X5GkSk!%)QW;;mGTy})6SCR6Xv5tENRSgFSDEaw)fogg>;?M
zwcw=8v@C>}D&;Bq&4B&$4qu`P#@NG!kIqms`;Kx!mo4w+l3
z9{M$Ij~JM+&F+)WoFY9fE?CL)P7j>zHH6b@e?t^Lteu@Dt>{nD2xH@nn_6QFxJhPi
z+ak>JTJ$h!!(L-KkJ$b!g}n-eO$JbW0w9S3uA)Ep6=K%b$cZIS82i@EKJJ|a%wpL9
zzHV0IA7#oh1UeVVSpOEzJhJ}M5Ilyj;LTDd&e+o|hFAIZcut3s&Q4kFC=}nw_Se+8|2kKk-iZ`6;k-46n+X1Lo)q}vJ0Vun
z84&c~FlSUDQB10_&zq@|KKUhV&QtgtG87r%SS=}+Mn~=NaMW1iw<@u8gk8(SLm})Q
zSA(@et~)T}5@bn+5=2|d3Qi!T1aB}Dez#6*E-F=kC(z28?YyT`5g|R{r@?NV%nVWU
zdu{nEm`VvNg~NE!
zJnOYH@zTr*cl%Mnldn$Z)wYo>$u!8o1q2u^|Bt4x;A*R3x_wGwdoNzi%h!%8gOU;$85ZAtq~{qpp}+b5
z+r~Ohf8!g>__Wj;MTXxK3(RD(!Q}dJ%^}^7WSmPQzyar@tAufXa9hVi7@)=McjBS!
zP~?v%^5-iuRY@l=Iwamz|g@G5rdJq?Z*~{#FCQF-P)3Azs;|kZE!($
z0wXCGN%&aslNtE3_LWE@#;lt|;`QDFn)NUqX^&{D*Q@L8&+}~#M`^)qR!S^;+JnkB
zHK2|KMnV^d&j&g*FVj3ip}2LeXWoist2eYCn&Bgu)KNB~>73KN$7Y8h*D=W^`){G#fMVvZAP^^$#k5tb+0
zoVU3959QIlR!b8!K->aQTpU5AKsWYn$Sv0(R~!v*^COeL{jR$;=Qqu6yx$ve%J*^k
z(lwJ?z!yu%A50S$L-_H!wc&*43^DETwW)q~{@C-{t_XU+m_E;Hk5{~^7~Vth2_l7;
z30>++eQ3pT>e_MZ$y)U3!x(T1fp5~)2rqNwEVs!8e|qjCc`V!a#n<~Bd+FT;(HdpJ
z;)tU;Xrl@4RhgZC3C6>XE@3C6kk-@Me9rd&TmJo(vuFufnYgezN(x^>_SIvFBY*U=
z?{G4wrT1$bIw|z1)NT!0M^}7I{El+)o4pXC{@WwmKg7DwABe&n+-QnD0cX;RqV_@n
zUVfw(h^=YPAlgfYE`MeHGs7gFQ23@NKc?dTg-jDA`IWTD1E?)g@9rPm%L56UmU4R9
z%+S=%+e#>Y{4~)4SJ~Sr<8&GK7A)0!k8kf?UiHd#ruJ1UwY-ss3)Q}OE8GkkReCUo#ZG|S|jvZDLraHYa*%5OS!Pi{8y9V`EuNxV
z$KEx#U*^Vyjo-bxaRO@Rt%^H!aq;4m)L^Ex9mtE`4l_9T_=y8S<
zu%wbPBt%0}Epfn8Hi>3JZrY&M(9$uGf^^oZO|$KSYE2W#n0fr&^x=GK@MaE5UGqzx
zo_$nPRYXz7>gwu`hZBaO9)yZ2OyddmlC@pXURwnsE-vmsdq{0!tTFH=MuIXW<%r5`
zqx0USe7U)p{-}n10+o7LgmfJU{^tx4lcR4dkpbbO|HKIjy=7q4uhKzGM9fZcD-p(&|6O<_vSq4I9Z$%|@&K5J;*n;1f-pXlW83w5mG*SYgMp
zgUL6)$TYXBB}uSL7bQ^mn@bBq+=niqz41{L#o9K~$)zshXFcCo6?4fxb4~
z3%->7i19?z$}-*dg%0@U&yljM2a2C(ls0?j6DDA`KMq;|I}n|7q%%FQ#%2)HK4F9`fP}%
zPzoD=ul)UNANNL8noLuiOM^_K?Imjqm&ON>h8c2s#&86A7XH`xz6X|f?drA(B@
zN+lWp4i$VpL9AWZAz5Sn-gJl^d{{b*D6-?@a;}~5R2!Q>S6xV^HAx7%3#99M&1X~@
z0gqIdaU2;b=d#?=Fit~St(TR#Q6S_ba_HbvYE;sHiiwBV#W_FIFsz3mQ&$d+ocyB$
zXbz~}UM?MZ6*pTjT~{@v72KHCS>?cmo#R@Ap|S(hvPRI{e48R3*%-LJ>>BC}8!uV8
z?a=Gg7gGd(DAHKa_YV!RG3iN)B4ua|{g_kZ0!tjRILKTw1;NG@KMO;hT|_FPh|O&e
zv@5D7gHC7<*sN{G4O#L)gQzx3m^ItPvCCs_vUr;CwN6|mJoAV+Z|^ofH+H5
zH_D+LP}U%th@}8HZPW3=@;Ax{g>Y(npAwDpH`w-+SG?UoE*wpYt1OD{4=3naeDfv
z5%)ubMvF~D8A$UB1Jd(SM5NQZ(g3~$sRyZYqUz=}R6H&e>Bwk$Z_si66TM0OV}P6R
z5c^*kJPe^{%aqTjww1mYXDxAs+~1>q&*xO1`$cdqn{Zzd6;GHdT{UdTdRs-^4cF(n
zio7w%R^Ol3S6*4$=A1}sapqu~m=LacZ!$~*5^N_mB4J}tBrK=Gx9XYT`Z@Vj$V9c<
z@5q=}I?G;TR9D}k@#guQIeOWNAtuF+SEBjlrdlcbow>u)e$giLOij6ox>p)^BfT=M
z-H$Anmm1kE5I}qF9Jkm9nxqAj$}#G;&{`4!YIc}ikW;)0=k#DXv1N@)niyN-a@J8s
z9S5GZp{_qGX@4IEmE>P|tti*;AKN2wV5tU>QAs%iU51%t;Q-r=a8*!VU{Lu88U(S7u|95BSbM7mFXIrtRavtZRufTC#I)4D%^6
zeIZv?Ay1VgTWxY>bZ&OO3#VM#C#AMDyWS_a2)Y4`&>N$*u_=J%N8d0JPoWAW=u%=F
z>LjeC>z9kIoa9tFj#x2h38u7TY6IDy@59up8_@y>83GrxHbR32!=;lVKL7bf93wwG
zgbTQhd-XmHYx6ejDujVBkGD~KRV>;WiCy&yvBa_IcQn8~KgQ697lCO!KY^~noM!g~
zqVQ-_Gr%}y<)l^)zE1ff%5MNeU+FBlPWiauQqm7D?%sXrNBS&lYTDP5<2{n)=gFQy
zI9F2IjP1k#gpR!J?>iOa7bo`JBYY0u)w=Ba-qC;g-)o@)_$HQAf8V6g3gz8fi^Qb;
zff1Lk>Z;kzBm3EQ8USxDid`2^w8jTEW%c6v-1>ap@_A<5+CFQZLIyDogf7iujWTK*r>aQMZ;4;=qD9dK02Hha7ED)p&eZhNW_ky5@}O);TEkx@1i7<6=b*y#?&Op;W&~aOZl;c
zTPkp>cMa?`w?qAf1ZX_qR5W*3M7^=T5IYL_===BQAN669&GabOWI`@bwt4rz3l?!~
zj81D3>0FNZA51JH!t?-%#2hktEU#J?alQZ2nkFpeM9Vp1*-$s45_WMfhK2K$AxA$A
zGf&GzBOYkd3u$q75k&FoMGD!uf)l>zN`@kyATfhnq-&kT0WHxI4z2lSDn2uwBV#qJ
zwHn`}k}JsHRqC&(UX?K33!XWkd@y^MUE8BPZsf;;=K&G~fE>|f*EaqUH!GNX2sR$Ni(DFiUS$^4A0
zz3o4wnpDZbS!gY!7JNz-u?t#2G$d-^BvDjfH%KYMPFzS)x!E72%+n9PcTK7#l?
z`=84jyUm3LhjU_kVjlfIZcB2x&MOISn)=aTJv7uWVC;xJX=cW-Xdw<+71p9_ks0}?
ziN8?fZ7ZWrSVqRjR*!N<2nQY5L>Pu=_=wSI5wti*KL7Ya8y*^U5Ki8Y(bnvbO`rEJ
zo3ecF)nb`?1tIhz{crj79f-+@*l|KcdEhS(Y=0hZo8>t
z^h-O`yW;6aKrNZ_Fe$m5$E8M10MpP5svuF&UbFP`k8tF@XmHh7U$6T2#4W$wRb!|W
zu@&A=X@@B7az1;<9C>PN7qVCvQBl+bEa!HrPTj6R`hIxhUq$D#C}tCWeQi0v-DUM<
z>GK?BZ8$HXk6J^Igbft0vG5cj`?>IfbyoYm3LM^k`9?zVyIq6=G8t(^raUpz&U?Sw
z7yr(AE4WPi{;E^v_c@LJSFjop4JPE#!hUvPw9&~nJ82a~cZUj=YH=>YY1yy^Aw*)J
z-FLmwsXuYE7@sJClV<3f?b$4^FLUllO|kD7@v8LgjJ#;vLxV}fBiryfT;->;(C{Q2
zTXuCqta$W)Y@*&nx=F6?&g9;J@nwO|hD*Rs`Gaj|__K5FOHA
z#A|KomB*GfaD~_)-Wc|bsxnI}HEeMA;r7RS$}`(u$1iHn?*&zeam2hajj)%@9#T70
zLw3^AZR>-)inc^4aY4}@nDZG~KwIM~gMj@J)OCiWeyLglL;1)h#9S(%y0k7gm1UHr
zO~$x8>M7JJ%NSJUAHrz^;E!r}YhuTog1HD~p`i{)ByilZ3Z&BGI8i0L<|*Qg9r8LW
ze*~!Prr8-n+){s#(M4kd%#Y}(RoG{3OAf{NN$SHnzfgSRamJW8(_g+VfQOUlZczvI@pJwa2
zy?41^bNJ^Ak7TD-eE4$r)>D?qODN-@zt2X{9G!^Jl{i=c5b0wPLVt@f8Zld#W&u$j
zg@VcO{5V}j;o0NPkUl6ON6F%_vMI++a5t5LU~Z)5)5g`ttPYkOTmSpW|L|{8c0Xv{
z<3M5|4J+!Wv%`8Y-qw`@Y?;j+oyvzw<~9%Pp*$^`^`bk_|+(Pa+$;4Hl}(T
zI-4ylt)7yF1@%D?inGA;U&XyF>)v5NR~oP9IyMhG&HTu~fK~EQFXuaNgfjU*Av!`S
zJWpsQ=AT3&k8GbUFy1`kVvjWjBPs^mHbt*-TjI8xUjN+WOO742AKxJ|*m?9aHEye)+S<8Li(JJFMs%yi;|#9f#~V81;t4J
zj9_sxEdo1JQFj7C-o{Q-lGp?hwj)+bbdy}%XM@bp;MdCPprTO>nZlBybt~0hkl7C+
z*{MNVU3APT>M5_2-OC6dFjf+o)7OFiVIA6vnA?(}+m%;Ftc*kLSV9*w;vFrbgp3<3
zLNrV!u*sD%W|$QQv`sY0DjKz{DmXB!#pPQ{F^yygzbN6tw8C3G$eCvqRa{j+@&+z>
zokgKSLTM!maXKrJsFE}gOH_s|C9+xx;+&$o&Gel!=wy!E%#Pj4cwDy8-$BVWTOyd2
zbHG8~2wPDTq9%~RX%u)4bGWITm}xhMI*OtQzQ#TyGar@ZIT6GY?+!~XpdpJr3=fDu
zO#gLp{axoUWHv#X(O4zQ5KEek^L>mBy8Z3Bh
zzK)QZ(uZH15fY{c1M#hAdgw3OZxudH>OTXwdUtLN&WdoBULDhpdhBuVoo(XLC3kz!
z3n>1=DE3A8KP_~%@A9`fuKlJBS#lW4u&7JODG11yf`oflXpddIi~3#PJUR{D!_A!2
zH}RC!rQ@SK-X_=kpk99D+nf2-iEqXzsU*??4j?`<3uFHQ9Bjh-_Y6PV2l@ZyeTKRO
zS-gJR_;pG4dB_apmJfE?Y91{N`SW|tW$R6$XjiI6k;;QOle?rB^ke{}@fF>(3aVw@
z2CQjgf|c9ZWMj3N52TMBK}AFsxgCip^&JVELT(CVq9GhU`#_D^&R@IF0TnpS7sv6Y
zFP7#Gd#YOoV9H<0x_!~Xr0?G}6kjXL!D$l2FV&v~t8Fn#%16iq@r_b_mzEdBm65otDstBHA%o6nPjqqjss}(oPLQLN`
z@*iW3!f8nswGq3}r)NB}0JFn`3Q1mBS;>mAgx4I4*abq95HTfg7-L!%3VLNsxm4pS
z%u6U<&rrTS)FEV#M_|-+#K?4r<#0oBMV_oOu<=9Lo1^biDk?#Nr_Yl
zi78U#35g)DksAvs6&!RC7Ao*AN~PJSyG5{s<82xVy!AOKo9bNqSTGdz~P5$D&vtm*A^R1^V
z{{sv$n}SrbYTLupsN(mq<+poz_vcr80e_-tLE|ve=Rxbf!y8sU>m7>k{l$2Y@#D+m
zdEwPcS3`fr$_`j!fKuf3fUu%>(a>$O%sTWW_zNp_7xlcWKCugya_4xPaBR)+*Ia@g3R{55RDdh
zlDsXo43(*4LQ!AzFFI^1r2o$a=-J`zzI^l#%~X)Fl}P~>(vd^Ue@%?JxWVuguzT}^
zu_Aut?7t4W^apHxew+HDM^SFY$J!Tr6Mafww-&&?!(zJ=L>5L<--GOXuuJm$-LHbY
zoAM!Sh#OwSx~;z;?wle{0I*`lf;}z?Wp>^Y^)=yzcPNrGM6}dYjYs^!M=3Q&tKFtlna#
zVT3PSHbP=lvGXm0#HQxN&YGDkSpfS5d2vnTzu{lUs-Q*8_*6oz^WCW`k=fPDgyH+?
zSSubchY7JL%BZ%3Z;r2c3g%D2Ii)PJQ53Y3m@PMI!xZnKP}
z#U2$o+yI4dO$zD2@zFWl!T$etnFqUvo)fCUA*HDnR=X?yN9Z0EHlnuFy~O0VU+P8^
zJ400wb5P9urK3TJSw&(CIOht{z@TorjID-5F-tXf!dwXKkU+s2#Jg~-T_tY*5>jBNE^;8;?
zu7`%O%bR0Yow_U955})Bn_fd|M6P23J+wB(&YORal9$QV*fO%dzfvt|>rdouJwfj;
zyYCQ@MP4&9SR76Ej92AOdNm+NCIoK
zuG0^%xLR~*Es1cXA@_Ec@O}#?n*#NH(W+}aR`G~V6&dnW5eQ<5tGd7aQ*G(TMQfBr
z8(($ZmSfherMq+z@~O^IP(l(&d#>@;vz{Bm$-plD!l%Mx9O>Gfb$ITZL4*sR1T^Fs
ztF#JkB?+M-MG?hVgu`U?CA(kZk7l3DE-49j2%TKgC1FINk{=mG^3YSRsBglhon|AD#@F80%y#{3c0_W$`uBz
zX%lkTak!x}(c#SE2cu&iQmdJ^%!yiYP;_TIM2v3OpVb
ziDm2>BUWPCC8liYgXAF~4_Q@i?9>uFt3QPcXpfXvj}(23PfxAk9^gzkkNf9-`<*`p
zz3bL~dua9h+|UmA3@twFuWbZNv=i&W(G^LS46Z0*?LRltLtx~Re`x%e`9W6rP0gx~KhJrZ
z*V`w2i}bQX9Venp$_Wd2__wO6#`kg6JKy*5>igy6^LMn(@moeueKOZr!?@4)SMF=s
zzBkFCchj0N%L#aXfng-QCmX-(`+74)?!jf4#<+e+s$2raM6Wi#D;XZ=CQ-Mua2cIZ
z4URB?jp+1}qNzzKqFY0i`ESPm
zbVc7np@n}41n_of?t5|1E|7EZ&G98H^N=g3WKl@c#YPp);2Ma={A9!A;K8FYc0{ej
zw#>p5=NtOY>)spwLjprfvb_xSJafvFDnTEs#-KJ>J^~?oJ8+>JkW)D>85#N|0u>ym
zAU1V;l3Atj$C90@91R(jtM05+zk@YWI2k58MI7C!
z0be>5f?by8rEbKzin3ga2~b`>ODv-df?k{yiHy)QdZ$^oeamUJu|?taJb{kA9#S5%
zP*GV7qgx__!Juf5Q5->-;!V<^EH(I6IdF*`KZmHqH1S9pDf>JQwSz<2zU8X-)Yayj
z`0o|z8td{qNU{5MCB5a+{&RBLNBVS{ZBU47V)PyvdVHOg7~hYa0Ox7DpUCe32TD?#
zKJT->H@5HjXnN;H^^PxHnMiiSCOEgD9sRZl&yP_CAqNeXQbmWbqrG14j19vfaQ3b<
zYMezI42d9vme-}^$KZ`ctSlupro}OBQ6#02%|+GbM3)6
z%a^5KFu}0W1*1Ojw;FK*Y|G$0~m%8
zzQLua&&@?*i0{V~^H
z68AEheA*1<99rv`qUn-E{0JA{)?
zkx?kXh|3{fMz($l-B`l<8I1aOVVQ>CnZKc-i@yaHEgsJuyGSLwoU{lEOJYb(do;;s
zmAb5NSf+Z$#9`Mm71t5AgaGG;v>@8RMYRPGx}`kf$Po_=d9Z57Qou+@C9g(OQEGV>
zazMhR&LXGN$->6D>t9-|8t0X=ADuBjt0$f6M;yEd9B!#V8Zm7arHPvbEkonbr9cuz
zUA%44E(t1_(Ax?+Bj9FPp|DKsEy|Q?n~HaV&eG
zVwTxU5AmjUw*!^*#K0To;5jqRa|l&rL<+emB@zLeI7Q`ce3pET>$ry2y5#>4yR%-6~F-qhRz&umLT_~WbZ1G2HV
z=~pVZHxz#Ou=R%sk1Zw@Ax4%RBM%P&-Ep^0&oTq2X%%cP|N35h_u)!>c_Q%{Yq5S=6ky}i5OHHLQP{e_my2xZgw+UC7q
zzxf>`6yDv>YGT1y=J%EV(N}3r3XTQy$ut&_YV-E^Sg?*U{&{o!q_!Klf9Q)-oE>K*
z#d-u`jgTl>WWh#~*;gvzMVF
zmQY1rZyMKr2?BO+jG+%0+OnZHgH$=gG)i68n3I6wNGt#T{Rdvv;g{UFC~_6+S~RR&
z-r0LV|AN1aj!|ghQI72ziVL+b*a}xU=xZ1w&LD5hG)HKoseGFC{7fBn9pUut2ZDI{
zViBDrkFgSih$Q_j()f^8lDv}w8lpfF1UGROQ*rqIO-u?1wZ4Fz90cOE0pyJ4a<2~))NA=3~)Zx|5fj6mDbj>mLVCFn-53;{Z