Commit 20e33356 authored by luopl's avatar luopl
Browse files

init

parents
Pipeline #1587 canceled with stages
version: 2.1
# this allows you to use CircleCI's dynamic configuration feature
setup: true
# the path-filtering orb is required to continue a pipeline based on
# the path of an updated fileset
orbs:
path-filtering: circleci/path-filtering@0.1.2
workflows:
# the always-run workflow is always triggered, regardless of the pipeline parameters.
always-run:
jobs:
# the path-filtering/filter job determines which pipeline
# parameters to update.
- path-filtering/filter:
name: check-updated-files
# 3-column, whitespace-delimited mapping. One mapping per
# line:
# <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>
mapping: |
mmdet/.* lint_only false
requirements/.* lint_only false
tests/.* lint_only false
tools/.* lint_only false
configs/.* lint_only false
.circleci/.* lint_only false
base-revision: dev-3.x
# this is the path of the configuration we should trigger once
# path filtering and pipeline parameter value updates are
# complete. In this case, we are using the parent dynamic
# configuration itself.
config-path: .circleci/test.yml
ARG PYTORCH="1.8.1"
ARG CUDA="10.2"
ARG CUDNN="7"
FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
# To fix GPG key error when running apt-get update
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx
version: 2.1
# the default pipeline parameters, which will be updated according to
# the results of the path-filtering orb
parameters:
lint_only:
type: boolean
default: true
jobs:
lint:
docker:
- image: cimg/python:3.7.4
steps:
- checkout
- run:
name: Install pre-commit hook
command: |
pip install pre-commit
pre-commit install
- run:
name: Linting
command: pre-commit run --all-files
- run:
name: Check docstring coverage
command: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 85 mmdet
build_cpu:
parameters:
# The python version must match available image tags in
# https://circleci.com/developer/images/image/cimg/python
python:
type: string
torch:
type: string
torchvision:
type: string
docker:
- image: cimg/python:<< parameters.python >>
resource_class: large
steps:
- checkout
- run:
name: Install Libraries
command: |
sudo apt-get update
sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5
- run:
name: Configure Python & pip
command: |
pip install --upgrade pip
pip install wheel
- run:
name: Install PyTorch
command: |
python -V
python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
- when:
condition:
equal: ["3.9.0", << parameters.python >>]
steps:
- run: pip install "protobuf <= 3.20.1" && sudo apt-get update && sudo apt-get -y install libprotobuf-dev protobuf-compiler cmake
- run: pip install dsdl
- run:
name: Install mmdet dependencies
# numpy may be downgraded after building pycocotools, which causes `ImportError: numpy.core.multiarray failed to import`
# force reinstall pycocotools to ensure pycocotools being built under the currenct numpy
command: |
python -m pip install git+ssh://git@github.com/open-mmlab/mmengine.git@main
pip install -U openmim
mim install 'mmcv >= 2.0.0rc4'
pip install -r requirements/tests.txt -r requirements/optional.txt
pip install --force-reinstall pycocotools
pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
pip install -r requirements/tracking.txt
pip install git+https://github.com/cocodataset/panopticapi.git
pip install git+https://github.com/JonathonLuiten/TrackEval.git
- run:
name: Build and install
command: |
pip install -e .
- run:
name: Run unittests
command: |
python -m coverage run --branch --source mmdet -m pytest tests/
python -m coverage xml
python -m coverage report -m
build_cuda:
parameters:
torch:
type: string
cuda:
type: enum
enum: ["11.1", "11.7", "11.8"]
cudnn:
type: integer
default: 8
machine:
image: linux-cuda-11:default
# docker_layer_caching: true
resource_class: gpu.nvidia.small.multi
steps:
- checkout
- run:
# CLoning repos in VM since Docker doesn't have access to the private key
name: Clone Repos
command: |
git clone -b main --depth 1 ssh://git@github.com/open-mmlab/mmengine.git /home/circleci/mmengine
- run:
name: Install nvidia-container-toolkit and Restart Docker
command: |
sudo apt-get update
sudo apt-get install -y nvidia-container-toolkit
sudo systemctl restart docker
- run:
name: Build Docker image
command: |
docker build .circleci/docker -t mmdetection:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
docker run --gpus all -t -d -v /home/circleci/project:/mmdetection -v /home/circleci/mmengine:/mmengine -w /mmdetection --name mmdetection mmdetection:gpu
docker exec mmdetection apt-get install -y git
- run:
name: Install mmdet dependencies
command: |
docker exec mmdetection pip install -e /mmengine
docker exec mmdetection pip install -U openmim
docker exec mmdetection mim install 'mmcv >= 2.0.0rc4'
docker exec mmdetection pip install -r requirements/tests.txt -r requirements/optional.txt
docker exec mmdetection pip install pycocotools
docker exec mmdetection pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
docker exec mmdetection pip install -r requirements/tracking.txt
docker exec mmdetection pip install git+https://github.com/cocodataset/panopticapi.git
docker exec mmdetection pip install git+https://github.com/JonathonLuiten/TrackEval.git
docker exec mmdetection python -c 'import mmcv; print(mmcv.__version__)'
- run:
name: Build and install
command: |
docker exec mmdetection pip install -e .
- run:
name: Run unittests
command: |
docker exec mmdetection python -m pytest tests/
workflows:
pr_stage_lint:
when: << pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- dev-3.x
pr_stage_test:
when:
not: << pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- dev-3.x
- build_cpu:
name: minimum_version_cpu
torch: 1.8.0
torchvision: 0.9.0
python: 3.7.16
requires:
- lint
- build_cpu:
name: maximum_version_cpu
torch: 2.0.0
torchvision: 0.15.1
python: 3.9.0
requires:
- minimum_version_cpu
- hold:
type: approval
requires:
- maximum_version_cpu
- build_cuda:
name: mainstream_version_gpu
torch: 1.8.1
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "11.1"
requires:
- hold
- build_cuda:
name: maximum_version_gpu
torch: 2.0.0
cuda: "11.7"
cudnn: 8
requires:
- hold
merge_stage_test:
when:
not: << pipeline.parameters.lint_only >>
jobs:
- build_cuda:
name: minimum_version_gpu
torch: 1.8.0
cuda: "11.1"
filters:
branches:
only:
- dev-3.x
This diff is collapsed.
configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py
configs/atss/atss_r50_fpn_1x_coco.py
configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py
configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py
configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py
configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py
configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py
configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py
configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py
configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py
configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py
configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py
configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py
configs/ddod/ddod_r50_fpn_1x_coco.py
configs/detectors/detectors_htc-r50_1x_coco.py
configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
configs/detr/detr_r50_8xb2-150e_coco.py
configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py
configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py
configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py
configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py
configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py
configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py
configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py
configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py
configs/free_anchor/freeanchor_r50_fpn_1x_coco.py
configs/fsaf/fsaf_r50_fpn_1x_coco.py
configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py
configs/gfl/gfl_r50_fpn_1x_coco.py
configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py
configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py
configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py
configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py
configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py
configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py
configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py
configs/htc/htc_r50_fpn_1x_coco.py
configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py
configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py
configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py
configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py
configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py
configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py
configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py
configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py
configs/paa/paa_r50_fpn_1x_coco.py
configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py
configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py
configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py
configs/pvt/retinanet_pvt-t_fpn_1x_coco.py
configs/queryinst/queryinst_r50_fpn_1x_coco.py
configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py
configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py
configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py
configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py
configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py
configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py
configs/rpn/rpn_r50_fpn_1x_coco.py
configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py
configs/scnet/scnet_r50_fpn_1x_coco.py
configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py
configs/solo/solo_r50_fpn_1x_coco.py
configs/solov2/solov2_r50_fpn_1x_coco.py
configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py
configs/ssd/ssd300_coco.py
configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py
configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py
configs/tood/tood_r50_fpn_1x_coco.py
'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py
configs/vfnet/vfnet_r50_fpn_1x_coco.py
configs/yolact/yolact_r50_8xb8-55e_coco.py
configs/yolo/yolov3_d53_8xb8-320-273e_coco.py
configs/yolof/yolof_r50-c5_8xb8-1x_coco.py
configs/yolox/yolox_tiny_8xb8-300e_coco.py
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py
atss/atss_r50_fpn_1x_coco.py
autoassign/autoassign_r50-caffe_fpn_1x_coco.py
boxinst/boxinst_r50_fpn_ms-90k_coco.py
carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py
cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py
cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py
cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py
centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py
centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py
condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py
conditional_detr/conditional-detr_r50_8xb2-50e_coco.py
convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py
cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py
dab_detr/dab-detr_r50_8xb2-50e_coco.py
dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py
ddod/ddod_r50_fpn_1x_coco.py
deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
detectors/detectors_htc-r50_1x_coco.py
detr/detr_r50_8xb2-150e_coco.py
dino/dino-4scale_r50_8xb2-12e_coco.py
double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py
dyhead/atss_r50_fpn_dyhead_1x_coco.py
dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py
efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py
empirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py
faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py
fpg/retinanet_r50_fpg_crop640_50e_coco.py
free_anchor/freeanchor_r50_fpn_1x_coco.py
fsaf/fsaf_r50_fpn_1x_coco.py
gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py
gfl/gfl_r50_fpn_1x_coco.py
glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py
ghm/retinanet_r50_fpn_ghm-1x_coco.py
gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py
gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py
grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py
groie/faste-rcnn_r50_fpn_groie_1x_coco.py
guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py
hrnet/htc_hrnetv2p-w18_20e_coco.py
htc/htc_r50_fpn_1x_coco.py
instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py
lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py
ld/ld_r18-gflv1-r101_fpn_1x_coco.py
libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py
lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py
mask2former/mask2former_r50_8xb2-lsj-50e_coco.py
mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
maskformer/maskformer_r50_ms-16xb1-75e_coco.py
ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py
nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py
nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py
paa/paa_r50_fpn_1x_coco.py
pafpn/faster-rcnn_r50_pafpn_1x_coco.py
panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py
point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py
pvt/retinanet_pvtv2-b0_fpn_1x_coco.py
queryinst/queryinst_r50_fpn_1x_coco.py
regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py
reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py
res2net/faster-rcnn_res2net-101_fpn_2x_coco.py
resnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py
resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py
retinanet/retinanet_r50_fpn_1x_coco.py
rpn/rpn_r50_fpn_1x_coco.py
rtmdet/rtmdet_s_8xb32-300e_coco.py
rtmdet/rtmdet-ins_s_8xb32-300e_coco.py
sabl/sabl-retinanet_r50_fpn_1x_coco.py
scnet/scnet_r50_fpn_1x_coco.py
scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py
seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py
simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py
soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py
solo/solo_r50_fpn_1x_coco.py
solov2/solov2_r50_fpn_1x_coco.py
sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py
ssd/ssd300_coco.py
swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py
tood/tood_r50_fpn_1x_coco.py
tridentnet/tridentnet_r50-caffe_1x_coco.py
vfnet/vfnet_r50_fpn_1x_coco.py
yolact/yolact_r50_8xb8-55e_coco.py
yolo/yolov3_d53_8xb8-320-273e_coco.py
yolof/yolof_r50-c5_8xb8-1x_coco.py
yolox/yolox_s_8xb8-300e_coco.py
deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py
mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py
masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py
ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py
qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py
strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist
from terminaltables import GithubFlavoredMarkdownTable
from tools.analysis_tools.benchmark import repeat_measure_inference_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for cfg_name, value in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join(
[str(s) for s in ms_times_pre_image])
table_data.append([
name, fps_str, mean_fps, ms_times_pre_image_str,
mean_times_pre_image
])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, [
'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
'mean_times_pre_image(ms)'
])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True)
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
assert args.repeat_num >= 1
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = repeat_measure_inference_speed(cfg, checkpoint,
args.max_iter,
args.log_interval,
args.fuse_conv_bn,
args.repeat_num)
if args.repeat_num > 1:
fps_list = [round(fps_, args.round_num) for fps_ in fps]
times_pre_image_list = [
round(1000 / fps_, args.round_num) for fps_ in fps
]
mean_fps = round(
sum(fps_list) / len(fps_list), args.round_num)
mean_times_pre_image = round(
sum(times_pre_image_list) / len(times_pre_image_list),
args.round_num)
print(
f'{cfg_path} '
f'Overall fps: {fps_list}[{mean_fps}] img / s, '
f'times per image: '
f'{times_pre_image_list}[{mean_times_pre_image}] '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=fps_list,
mean_fps=mean_fps,
ms_times_pre_image=times_pre_image_list,
mean_times_pre_image=mean_times_pre_image)
else:
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000 / fps:.{args.round_num}f} '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{cfg_path} error: {repr(e)}')
if args.repeat_num > 1:
result_dict[cfg_path] = dict(
fps=[0],
mean_fps=0,
ms_times_pre_image=[0],
mean_times_pre_image=0)
else:
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
if args.out:
mkdir_or_exist(args.out)
dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
results2markdown(result_dict)
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmpretrain',
'pip install git+https://github.com/lvis-dataset/lvis-api.git',
'pip install -r ../requirements/multimodal.txt',
'pip install -r ../requirements/tracking.txt',
'pip install git+https://github.com/JonathonLuiten/TrackEval.git',
]
default_floating_range = 0.5
model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import os.path as osp
from argparse import ArgumentParser
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.testing import replace_to_ceph
from mmdet.utils import register_all_modules, replace_cfg_vals
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--work-dir', help='the dir to save logs')
parser.add_argument('--ceph', action='store_true')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = parser.parse_args()
return args
# TODO: Need to refactor test.py so that it can be reused.
def fast_test_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(config_name))[0])
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(config_name))[0])
if args.ceph:
replace_to_ceph(cfg)
cfg.load_from = checkpoint
# TODO: temporary plan
if 'visualizer' in cfg:
if 'name' in cfg.visualizer:
del cfg.visualizer.name
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
runner.test()
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules(init_default_scope=False)
config = Config.fromfile(args.config)
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fast_test_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmengine.utils import mkdir_or_exist
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--out-dir', default=None, help='Dir to output file')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, visualizer, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
raise NotImplementedError()
model = init_detector(
cfg, checkpoint, palette=args.palette, device=args.device)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show or args.out_dir is not None:
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mkdir_or_exist(out_dir)
out_file = osp.join(
out_dir,
config_name.split('/')[-1].replace('py', 'jpg'))
visualizer.add_datasample(
'result',
img,
data_sample=result,
draw_gt=False,
show=args.show,
wait_time=args.wait_time,
out_file=out_file,
pred_score_thr=args.score_thr)
return result
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
config = Config.fromfile(args.config)
# init visualizer
visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
visualizer = VISUALIZERS.build(visualizer_cfg)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test_image.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args,
logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import os.path as osp
from argparse import ArgumentParser
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger, print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.testing import replace_to_ceph
from mmdet.utils import register_all_modules, replace_cfg_vals
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--ceph', action='store_true')
parser.add_argument('--save-ckpt', action='store_true')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = parser.parse_args()
return args
# TODO: Need to refactor train.py so that it can be reused.
def fast_train_model(config_name, args, logger=None):
cfg = Config.fromfile(config_name)
cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(config_name))[0])
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(config_name))[0])
ckpt_hook = cfg.default_hooks.checkpoint
by_epoch = ckpt_hook.get('by_epoch', True)
fast_stop_hook = dict(type='FastStopTrainingHook')
fast_stop_hook['by_epoch'] = by_epoch
if args.save_ckpt:
if by_epoch:
interval = 1
stop_iter_or_epoch = 2
else:
interval = 4
stop_iter_or_epoch = 10
fast_stop_hook['stop_iter_or_epoch'] = stop_iter_or_epoch
fast_stop_hook['save_ckpt'] = True
ckpt_hook.interval = interval
if 'custom_hooks' in cfg:
cfg.custom_hooks.append(fast_stop_hook)
else:
custom_hooks = [fast_stop_hook]
cfg.custom_hooks = custom_hooks
# TODO: temporary plan
if 'visualizer' in cfg:
if 'name' in cfg.visualizer:
del cfg.visualizer.name
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
if args.ceph:
replace_to_ceph(cfg)
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
runner.train()
# Sample test whether the train code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules(init_default_scope=False)
config = Config.fromfile(args.config)
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_train.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
try:
fast_train_model(config_name, args, logger)
except RuntimeError as e:
# quick exit is the normal exit message
if 'quick exit' not in repr(e):
logger.error(f'{config_name} " : {repr(e)}')
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
atss/atss_r50_fpn_1x_coco.py
faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py
configs/grounding_dino/grounding_dino_swin-t_finetune_16xb2_1x_coco.py
configs/glip/glip_atss_swin-t_a_fpn_dyhead_16xb2_ms-2x_funtune_coco.py
configs/ddq/ddq-detr-4scale_r50_8xb2-12e_coco.py
panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
retinanet/retinanet_r50_fpn_1x_coco.py
rtmdet/rtmdet_s_8xb32-300e_coco.py
rtmdet/rtmdet-ins_s_8xb32-300e_coco.py
fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py
dino/dino-4scale_r50_8xb2-12e_coco.py
htc/htc_r50_fpn_1x_coco.py
mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py
condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py
lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py
mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py
masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py
qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py
import logging
import re
import tempfile
from argparse import ArgumentParser
from collections import OrderedDict
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from mmengine import Config, DictAction
from mmengine.analysis import get_model_complexity_info
from mmengine.analysis.print_helper import _format_size
from mmengine.fileio import FileClient
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.runner import Runner
from modelindex.load_model_index import load
from rich.console import Console
from rich.table import Table
from rich.text import Text
from tqdm import tqdm
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
console = Console()
MMDET_ROOT = Path(__file__).absolute().parents[1]
def parse_args():
parser = ArgumentParser(description='Valid all models in model-index.yml')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--checkpoint_root',
help='Checkpoint file root path. If set, load checkpoint before test.')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--models', nargs='+', help='models name to inference')
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='The batch size during the inference.')
parser.add_argument(
'--flops', action='store_true', help='Get Flops and Params of models')
parser.add_argument(
'--flops-str',
action='store_true',
help='Output FLOPs and params counts in a string form.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size_divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def inference(config_file, checkpoint, work_dir, args, exp_name):
logger = MMLogger.get_instance(name='MMLogger')
logger.warning('if you want test flops, please make sure torch>=1.12')
cfg = Config.fromfile(config_file)
cfg.work_dir = work_dir
cfg.load_from = checkpoint
cfg.log_level = 'WARN'
cfg.experiment_name = exp_name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# forward the model
result = {'model': config_file.stem}
if args.flops:
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
result['resolution'] = input_shape
try:
cfg = Config.fromfile(config_file)
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = MODELS.build(cfg.model)
input = torch.rand(1, *input_shape)
if torch.cuda.is_available():
model.cuda()
input = input.cuda()
model = revert_sync_batchnorm(model)
inputs = (input, )
model.eval()
outputs = get_model_complexity_info(
model, input_shape, inputs, show_table=False, show_arch=False)
flops = outputs['flops']
params = outputs['params']
activations = outputs['activations']
result['Get Types'] = 'direct'
except: # noqa 772
logger = MMLogger.get_instance(name='MMLogger')
logger.warning(
'Direct get flops failed, try to get flops with data')
cfg = Config.fromfile(config_file)
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
data_loader = Runner.build_dataloader(cfg.val_dataloader)
data_batch = next(iter(data_loader))
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model = model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
_forward = model.forward
data = model.data_preprocessor(data_batch)
del data_loader
model.forward = partial(
_forward, data_samples=data['data_samples'])
outputs = get_model_complexity_info(
model,
input_shape,
data['inputs'],
show_table=False,
show_arch=False)
flops = outputs['flops']
params = outputs['params']
activations = outputs['activations']
result['Get Types'] = 'dataloader'
if args.flops_str:
flops = _format_size(flops)
params = _format_size(params)
activations = _format_size(activations)
result['flops'] = flops
result['params'] = params
return result
def show_summary(summary_data, args):
table = Table(title='Validation Benchmark Regression Summary')
table.add_column('Model')
table.add_column('Validation')
table.add_column('Resolution (c, h, w)')
if args.flops:
table.add_column('Flops', justify='right', width=11)
table.add_column('Params', justify='right')
for model_name, summary in summary_data.items():
row = [model_name]
valid = summary['valid']
color = 'green' if valid == 'PASS' else 'red'
row.append(f'[{color}]{valid}[/{color}]')
if valid == 'PASS':
row.append(str(summary['resolution']))
if args.flops:
row.append(str(summary['flops']))
row.append(str(summary['params']))
table.add_row(*row)
console.print(table)
table_data = {
x.header: [Text.from_markup(y).plain for y in x.cells]
for x in table.columns
}
table_pd = pd.DataFrame(table_data)
table_pd.to_csv('./mmdetection_flops.csv')
# Sample test whether the inference code is correct
def main(args):
register_all_modules()
model_index_file = MMDET_ROOT / 'model-index.yml'
model_index = load(str(model_index_file))
model_index.build_models_with_collections()
models = OrderedDict({model.name: model for model in model_index.models})
logger = MMLogger(
'validation',
logger_name='validation',
log_file='benchmark_test_image.log',
log_level=logging.INFO)
if args.models:
patterns = [
re.compile(pattern.replace('+', '_')) for pattern in args.models
]
filter_models = {}
for k, v in models.items():
k = k.replace('+', '_')
if any([re.match(pattern, k) for pattern in patterns]):
filter_models[k] = v
if len(filter_models) == 0:
print('No model found, please specify models in:')
print('\n'.join(models.keys()))
return
models = filter_models
summary_data = {}
tmpdir = tempfile.TemporaryDirectory()
for model_name, model_info in tqdm(models.items()):
if model_info.config is None:
continue
model_info.config = model_info.config.replace('%2B', '+')
config = Path(model_info.config)
try:
config.exists()
except: # noqa 722
logger.error(f'{model_name}: {config} not found.')
continue
logger.info(f'Processing: {model_name}')
http_prefix = 'https://download.openmmlab.com/mmdetection/'
if args.checkpoint_root is not None:
root = args.checkpoint_root
if 's3://' in args.checkpoint_root:
from petrel_client.common.exception import AccessDeniedError
file_client = FileClient.infer_client(uri=root)
checkpoint = file_client.join_path(
root, model_info.weights[len(http_prefix):])
try:
exists = file_client.exists(checkpoint)
except AccessDeniedError:
exists = False
else:
checkpoint = Path(root) / model_info.weights[len(http_prefix):]
exists = checkpoint.exists()
if exists:
checkpoint = str(checkpoint)
else:
print(f'WARNING: {model_name}: {checkpoint} not found.')
checkpoint = None
else:
checkpoint = None
try:
# build the model from a config file and a checkpoint file
result = inference(MMDET_ROOT / config, checkpoint, tmpdir.name,
args, model_name)
result['valid'] = 'PASS'
except Exception: # noqa 722
import traceback
logger.error(f'"{config}" :\n{traceback.format_exc()}')
result = {'valid': 'FAIL'}
summary_data[model_name] = result
tmpdir.cleanup()
show_summary(summary_data, args)
if __name__ == '__main__':
args = parse_args()
main(args)
# Modified from:
# https://github.com/allenai/allennlp/blob/main/scripts/check_links.py
import argparse
import logging
import os
import pathlib
import re
import sys
from multiprocessing.dummy import Pool
from typing import NamedTuple, Optional, Tuple
import requests
from mmengine.logging import MMLogger
def parse_args():
parser = argparse.ArgumentParser(
description='Goes through all the inline-links '
'in markdown files and reports the breakages')
parser.add_argument(
'--num-threads',
type=int,
default=100,
help='Number of processes to confirm the link')
parser.add_argument('--https-proxy', type=str, help='https proxy')
parser.add_argument(
'--out',
type=str,
default='link_reports.txt',
help='output path of reports')
args = parser.parse_args()
return args
OK_STATUS_CODES = (
200,
401, # the resource exists but may require some sort of login.
403, # ^ same
405, # HEAD method not allowed.
# the resource exists, but our default 'Accept-' header may not
# match what the server can provide.
406,
)
class MatchTuple(NamedTuple):
source: str
name: str
link: str
def check_link(
match_tuple: MatchTuple,
http_session: requests.Session,
logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:
reason: Optional[str] = None
if match_tuple.link.startswith('http'):
result_ok, reason = check_url(match_tuple, http_session)
else:
result_ok = check_path(match_tuple)
if logger is None:
print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
else:
logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
return match_tuple, result_ok, reason
def check_url(match_tuple: MatchTuple,
http_session: requests.Session) -> Tuple[bool, str]:
"""Check if a URL is reachable."""
try:
result = http_session.head(
match_tuple.link, timeout=5, allow_redirects=True)
return (
result.ok or result.status_code in OK_STATUS_CODES,
f'status code = {result.status_code}',
)
except (requests.ConnectionError, requests.Timeout):
return False, 'connection error'
def check_path(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split('#')[0]
full_path = os.path.join(
os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path)
def main():
args = parse_args()
# setup logger
logger = MMLogger.get_instance(name='mmdet', log_file=args.out)
# setup https_proxy
if args.https_proxy:
os.environ['https_proxy'] = args.https_proxy
# setup http_session
http_session = requests.Session()
for resource_prefix in ('http://', 'https://'):
http_session.mount(
resource_prefix,
requests.adapters.HTTPAdapter(
max_retries=5,
pool_connections=20,
pool_maxsize=args.num_threads),
)
logger.info('Finding all markdown files in the current directory...')
project_root = (pathlib.Path(__file__).parent / '..').resolve()
markdown_files = project_root.glob('**/*.md')
all_matches = set()
url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)')
for markdown_file in markdown_files:
with open(markdown_file) as handle:
for line in handle.readlines():
matches = url_regex.findall(line)
for name, link in matches:
if 'localhost' not in link:
all_matches.add(
MatchTuple(
source=str(markdown_file),
name=name,
link=link))
logger.info(f' {len(all_matches)} markdown files found')
logger.info('Checking to make sure we can retrieve each link...')
with Pool(processes=args.num_threads) as pool:
results = pool.starmap(check_link, [(match, http_session, logger)
for match in list(all_matches)])
# collect unreachable results
unreachable_results = [(match_tuple, reason)
for match_tuple, success, reason in results
if not success]
if unreachable_results:
logger.info('================================================')
logger.info(f'Unreachable links ({len(unreachable_results)}):')
for match_tuple, reason in unreachable_results:
logger.info(' > Source: ' + match_tuple.source)
logger.info(' Name: ' + match_tuple.name)
logger.info(' Link: ' + match_tuple.link)
if reason is not None:
logger.info(' Reason: ' + reason)
sys.exit(1)
logger.info('No Unreachable link found.')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = '$WORK_DIR/' + fname
checkpoint = model_info['checkpoint'].strip()
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
work_dir = 'WORK_DIR=$3 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '
commands.append(cpus_pre_task)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def determine_gpus(cfg_name):
gpus = 8
gpus_pre_node = 8
if cfg_name.find('16x') >= 0:
gpus = 16
elif cfg_name.find('4xb4') >= 0:
gpus = 4
gpus_pre_node = 4
elif 'lad' in cfg_name:
gpus = 2
gpus_pre_node = 2
return gpus, gpus_pre_node
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
work_dir = 'WORK_DIR=$2 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${3:-4} '
commands.append(cpus_pre_task)
commands.append('\n')
commands.append('\n')
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = '$WORK_DIR/' + fname
gpus, gpus_pre_node = determine_gpus(cfg)
command_info = f'GPUS={gpus} GPUS_PER_NODE={gpus_pre_node} ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {train_script_name} '
command_info += '$PARTITION '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
command_info += '--cfg-options default_hooks.checkpoint.' \
'max_keep_ckpts=1 '
command_info += '&'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
# Each line should be the relative path to the root directory
# of this repo. Support regular expression as well.
# For example:
.*/__init__.py
#!/bin/bash
readarray -t IGNORED_FILES < $( dirname "$0" )/covignore.cfg
REUSE_COVERAGE_REPORT=${REUSE_COVERAGE_REPORT:-0}
REPO=${1:-"origin"}
BRANCH=${2:-"refactor_dev"}
git fetch $REPO $BRANCH
PY_FILES=""
for FILE_NAME in $(git diff --name-only ${REPO}/${BRANCH}); do
# Only test python files in mmdet/ existing in current branch, and not ignored in covignore.cfg
if [ ${FILE_NAME: -3} == ".py" ] && [ ${FILE_NAME:0:6} == "mmdet/" ] && [ -f "$FILE_NAME" ]; then
IGNORED=false
for IGNORED_FILE_NAME in "${IGNORED_FILES[@]}"; do
# Skip blank lines
if [ -z "$IGNORED_FILE_NAME" ]; then
continue
fi
if [ "${IGNORED_FILE_NAME::1}" != "#" ] && [[ "$FILE_NAME" =~ $IGNORED_FILE_NAME ]]; then
echo "Ignoring $FILE_NAME"
IGNORED=true
break
fi
done
if [ "$IGNORED" = false ]; then
PY_FILES="$PY_FILES $FILE_NAME"
fi
fi
done
# Only test the coverage when PY_FILES are not empty, otherwise they will test the entire project
if [ ! -z "${PY_FILES}" ]
then
if [ "$REUSE_COVERAGE_REPORT" == "0" ]; then
coverage run --branch --source mmdet -m pytest tests/
fi
coverage report --fail-under 80 -m $PY_FILES
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 95 $PY_FILES
fi
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import torch
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.pow(1024, 2) is mean 1 MB
assert_msg = f"Downloaded url '{url}' does not exist " \
f'or size is < min_bytes={min_bytes}'
try:
print(f'Downloading {url} to {out_file}...')
torch.hub.download_url_to_file(url, str(out_file), progress=progress)
assert osp.exists(
out_file) and osp.getsize(out_file) > min_bytes, assert_msg
except Exception as e:
if osp.exists(out_file):
os.remove(out_file)
print(f'ERROR: {e}\nRe-attempting {url} to {out_file} ...')
os.system(f"curl -L '{url}' -o '{out_file}' --retry 3 -C -"
) # curl download, retry and resume on fail
finally:
if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:
os.remove(out_file) # remove partial downloads
if not osp.exists(out_file):
print(f'ERROR: {assert_msg}\n')
print('=========================================\n')
def parse_args():
parser = argparse.ArgumentParser(description='Download checkpoints')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'out', type=str, help='output dir of checkpoints to be stored')
parser.add_argument(
'--nproc', type=int, default=16, help='num of Processes')
parser.add_argument(
'--intranet',
action='store_true',
help='switch to internal network url')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
mkdir_or_exist(args.out)
cfg = Config.fromfile(args.config)
checkpoint_url_list = []
checkpoint_out_list = []
for model in cfg:
model_infos = cfg[model]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
checkpoint = model_info['checkpoint']
out_file = osp.join(args.out, checkpoint)
if not osp.exists(out_file):
url = model_info['url']
if args.intranet is True:
url = url.replace('.com', '.sensetime.com')
url = url.replace('https', 'http')
checkpoint_url_list.append(url)
checkpoint_out_list.append(out_file)
if len(checkpoint_url_list) > 0:
pool = Pool(min(os.cpu_count(), args.nproc))
pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))
else:
print('No files to download!')
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment