Commit 7ae93d70 authored by limm's avatar limm
Browse files

add tests part code

parent abaad570
Pipeline #2815 canceled with stages
globals:
repo_url: https://github.com/open-mmlab/mmaction2/tree/main
codebase_dir: ../mmaction2
checkpoint_force_download: False
images:
video: &video ../mmaction2/demo/demo.mp4
metric_info: &metric_info
Top 1 Accuracy:
metric_key: acc/top1
tolerance: 1
multi_value: 100
dataset: Kinetics-400
Top 5 Accuracy:
metric_key: acc/top5
tolerance: 1
multi_value: 100
dataset: Kinetics-400
convert_image: &convert_image
input_img: *video
test_img: *video
backend_test: &default_backend_test False
sdk:
sdk_dynamic: &sdk_dynamic ""
onnxruntime:
pipeline_ort_static_fp32: &pipeline_ort_static_fp32
convert_image: *convert_image
deploy_config: configs/mmaction/video-recognition/video-recognition_onnxruntime_static.py
backend_test: *default_backend_test
pipeline_ort_static_fp16: &pipeline_ort_static_fp16
convert_image: *convert_image
deploy_config: configs/mmaction/video-recognition/video-recognition_onnxruntime-fp16_static.py
backend_test: *default_backend_test
torchscript:
pipeline_torchscript_fp32: &pipeline_torchscript_fp32
convert_image: *convert_image
deploy_config: configs/mmaction/video-recognition/video-recognition_torchscript.py
backend_test: *default_backend_test
tensorrt:
pipeline_trt_2d_static_fp32: &pipeline_trt_2d_static_fp32
convert_image: *convert_image
deploy_config: configs/mmaction/video-recognition/video-recognition_2d_tensorrt_static-224x224.py
backend_test: *default_backend_test
pipeline_trt_3d_static_fp32: &pipeline_trt_3d_static_fp32
convert_image: *convert_image
deploy_config: configs/mmaction/video-recognition/video-recognition_3d_tensorrt_static-256x256.py
backend_test: *default_backend_test
models:
- name: TSN
metafile: configs/recognition/tsn/metafile.yml
model_configs:
- configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_2d_static_fp32
- *pipeline_torchscript_fp32
- name: SlowFast
metafile: configs/recognition/slowfast/metafile.yml
model_configs:
- configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_3d_static_fp32
- *pipeline_torchscript_fp32
- name: TSM
metafile: configs/recognition/tsm/metafile.yml
model_configs:
- configs/recognition/tsm/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_torchscript_fp32
- name: X3D
metafile: configs/recognition/x3d/metafile.yml
model_configs:
- configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_torchscript_fp32
globals:
repo_url: https://github.com/open-mmlab/mmagic/tree/main
codebase_dir: ../mmagic
checkpoint_force_download: False
images:
img_face: &img_face ../mmagic/tests/data/image/face/000001.png
img_bg: &img_bg ../mmagic/tests/data/image/gt/baboon.png
metric_info: &metric_info
Set14 PSNR: # named after metafile.Results.Metrics
metric_key: Set14/PSNR # eval log key name
tolerance: 4 # metric ±n%
Set14 SSIM:
metric_key: Set14/SSIM
tolerance: 0.02 # metric ±n
Set5 PSNR: # named after metafile.Results.Metrics
metric_key: Set5/PSNR # eval log key name
tolerance: 4 # metric ±n%
Set5 SSIM:
metric_key: Set5/SSIM
tolerance: 0.02 # metric ±n
DIV2K PSNR: # named after metafile.Results.Metrics
metric_key: DIV2K/PSNR # eval log key name
tolerance: 4 # metric ±n%
DIV2K SSIM:
metric_key: DIV2K/SSIM
tolerance: 0.02 # metric ±n
PSNR: # named after metafile.Results.Metrics
metric_key: PSNR # eval log key name
tolerance: 4 # metric ±n%
SSIM:
metric_key: SSIM
tolerance: 0.02 # metric ±n
convert_image: &convert_image
input_img: *img_face
test_img: *img_bg
backend_test: &default_backend_test True
sdk:
sdk_dynamic: &sdk_dynamic configs/mmagic/super-resolution/super-resolution_sdk_dynamic.py
onnxruntime:
pipeline_ort_static_fp32: &pipeline_ort_static_fp32
convert_image: *convert_image
deploy_config: configs/mmagic/super-resolution/super-resolution_onnxruntime_static.py
pipeline_ort_dynamic_fp32: &pipeline_ort_dynamic_fp32
convert_image: *convert_image
deploy_config: configs/mmagic/super-resolution/super-resolution_onnxruntime_dynamic.py
pipeline_ort_dynamic_fp16: &pipeline_ort_dynamic_fp16
convert_image: *convert_image
deploy_config: configs/mmagic/super-resolution/super-resolution_onnxruntime-fp16_dynamic.py
tensorrt:
pipeline_trt_static_fp32: &pipeline_trt_static_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmagic/super-resolution/super-resolution_tensorrt_static-256x256.py
pipeline_trt_static_fp16: &pipeline_trt_static_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmagic/super-resolution/super-resolution_tensorrt-fp16_static-256x256.py
pipeline_trt_static_int8: &pipeline_trt_static_int8
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmagic/super-resolution/super-resolution_tensorrt-int8_static-256x256.py
pipeline_trt_dynamic_fp32: &pipeline_trt_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmagic/super-resolution/super-resolution_tensorrt_dynamic-32x32-512x512.py
pipeline_trt_dynamic_fp16: &pipeline_trt_dynamic_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmagic/super-resolution/super-resolution_tensorrt-fp16_dynamic-32x32-512x512.py
pipeline_trt_dynamic_int8: &pipeline_trt_dynamic_int8
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmagic/super-resolution/super-resolution_tensorrt-int8_dynamic-32x32-512x512.py
openvino:
pipeline_openvino_dynamic_fp32: &pipeline_openvino_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmagic/super-resolution/super-resolution_openvino_dynamic-256x256.py
ncnn:
pipeline_ncnn_dynamic_fp32: &pipeline_ncnn_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmagic/super-resolution/super-resolution_ncnn_dynamic.py
pplnn:
pipeline_pplnn_dynamic_fp32: &pipeline_pplnn_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmagic/super-resolution/super-resolution_pplnn_dynamic-32x32.py
torchscript:
pipeline_ts_fp32: &pipeline_ts_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmagic/super-resolution/super-resolution_torchscript.py
models:
- name: SRCNN
metafile: configs/srcnn/metafile.yml
model_configs:
- configs/srcnn/srcnn_x4k915_1xb16-1000k_div2k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
# - *pipeline_trt_dynamic_fp32
- *pipeline_trt_dynamic_fp16
# - *pipeline_trt_dynamic_int8
- *pipeline_ncnn_dynamic_fp32
# - *pipeline_pplnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: ESRGAN
metafile: configs/esrgan/metafile.yml
model_configs:
- configs/esrgan/esrgan_x4c64b23g32_1xb16-400k_div2k.py
- configs/esrgan/esrgan_psnr-x4c64b23g32_1xb16-1000k_div2k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_dynamic_fp32
# - *pipeline_pplnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: SRGAN
metafile: configs/srgan_resnet/metafile.yml
model_configs:
- configs/srgan_resnet/srgan_x4c64b16_1xb16-1000k_div2k.py
- configs/srgan_resnet/msrresnet_x4c64b16_1xb16-1000k_div2k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_dynamic_fp32
# - *pipeline_pplnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: Real-ESRGAN
metafile: configs/real_esrgan/metafile.yml
model_configs:
- configs/real_esrgan/realesrgan_c64b23g32_4xb12-lr1e-4-400k_df2k-ost.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_dynamic_fp32
# - *pipeline_pplnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: EDSR
metafile: configs/edsr/metafile.yml
model_configs:
- configs/edsr/edsr_x2c64b16_1xb16-300k_div2k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
# - *pipeline_ncnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: RDN
metafile: configs/rdn/metafile.yml
model_configs:
- configs/rdn/rdn_x2c64b16_1xb16-1000k_div2k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
# - *pipeline_ncnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
globals:
repo_url: https://github.com/open-mmlab/mmdetection/tree/main
codebase_dir: ../mmdetection
checkpoint_force_download: False
images:
input_img: &input_img ../mmdetection/demo/demo.jpg
test_img: &test_img ./tests/data/tiger.jpeg
metric_info: &metric_info
box AP: # named after metafile.Results.Metrics
metric_key: coco/bbox_mAP # eval OrderedDict key name
tolerance: 0.2 # metric ±n%
multi_value: 100
mask AP:
metric_key: coco/segm_mAP
tolerance: 1 # metric ±n%
multi_value: 100
PQ:
metric_key: 'coco_panoptic/PQ'
tolerance: 0.5 # metric ±n%
convert_image: &convert_image
input_img: *input_img
test_img: *test_img
backend_test: &default_backend_test True
sdk:
sdk_static: &sdk_static configs/mmdet/detection/detection_sdk_static.py
sdk_dynamic: &sdk_dynamic configs/mmdet/detection/detection_sdk_dynamic.py
# sdk_seg_static: &sdk_seg_static configs/mmdet/instance-seg/instance-seg_sdk_static.py
sdk_seg_dynamic: &sdk_seg_dynamic configs/mmdet/instance-seg/instance-seg_sdk_dynamic.py
onnxruntime:
pipeline_ort_static_fp32: &pipeline_ort_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_onnxruntime_static.py
pipeline_ort_dynamic_fp32: &pipeline_ort_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_onnxruntime_dynamic.py
pipeline_ort_dynamic_fp16: &pipeline_ort_dynamic_fp16
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_onnxruntime-fp16_dynamic.py
pipeline_seg_ort_static_fp32: &pipeline_seg_ort_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/instance-seg/instance-seg_onnxruntime_static.py
pipeline_seg_ort_dynamic_fp32: &pipeline_seg_ort_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/instance-seg/instance-seg_onnxruntime_dynamic.py
pipeline_seg_ort_dynamic_fp16: &pipeline_seg_ort_dynamic_fp16
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/instance-seg/instance-seg_onnxruntime-fp16_dynamic.py
tensorrt:
pipeline_trt_static_fp32: &pipeline_trt_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_tensorrt_static-800x1344.py
pipeline_trt_static_fp16: &pipeline_trt_static_fp16
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_tensorrt-fp16_static-800x1344.py
pipeline_trt_static_int8: &pipeline_trt_static_int8
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_tensorrt-int8_static-800x1344.py
pipeline_trt_dynamic_fp32: &pipeline_trt_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_dynamic
deploy_config: configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py
pipeline_trt_dynamic_fp16: &pipeline_trt_dynamic_fp16
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_dynamic
deploy_config: configs/mmdet/detection/detection_tensorrt-fp16_dynamic-320x320-1344x1344.py
pipeline_trt_dynamic_int8: &pipeline_trt_dynamic_int8
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_dynamic
deploy_config: configs/mmdet/detection/detection_tensorrt-int8_dynamic-320x320-1344x1344.py
# ============= seg ================
pipeline_seg_trt_static_fp32: &pipeline_seg_trt_static_fp32
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_seg_dynamic
deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt_static-800x1344.py
pipeline_seg_trt_static_fp16: &pipeline_seg_trt_static_fp16
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_seg_dynamic
deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt-fp16_static-800x1344.py
pipeline_seg_trt_static_int8: &pipeline_seg_trt_static_int8
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_seg_dynamic
deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt-int8_static-800x1344.py
pipeline_seg_trt_dynamic_fp32: &pipeline_seg_trt_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_seg_dynamic
deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt_dynamic-320x320-1344x1344.py
pipeline_seg_trt_dynamic_fp16: &pipeline_seg_trt_dynamic_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_seg_dynamic
deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt-fp16_dynamic-320x320-1344x1344.py
pipeline_seg_trt_dynamic_int8: &pipeline_seg_trt_dynamic_int8
convert_image: *convert_image
backend_test: *default_backend_test
# sdk_config: *sdk_seg_dynamic
deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt-int8_dynamic-320x320-1344x1344.py
openvino:
pipeline_openvino_dynamic_fp32: &pipeline_openvino_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/detection_openvino_dynamic-800x1344.py
# ============= seg ================
pipeline_seg_openvino_dynamic_fp32: &pipeline_seg_openvino_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/instance-seg/instance-seg_openvino_dynamic-800x1344.py
ncnn:
pipeline_ncnn_static_fp32: &pipeline_ncnn_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/single-stage_ncnn_static-800x1344.py
pipeline_ncnn_dynamic_fp32: &pipeline_ncnn_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet/detection/single-stage_ncnn_dynamic.py
pplnn:
pipeline_pplnn_dynamic_fp32: &pipeline_pplnn_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmdet/detection/detection_pplnn_dynamic-800x1344.py
# ============= seg ================
pipeline_seg_pplnn_dynamic_fp32: &pipeline_seg_pplnn_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmdet/instance-seg/instance-seg_pplnn_dynamic-800x1344.py
torchscript:
pipeline_ts_fp32: &pipeline_ts_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmdet/detection/detection_torchscript.py
# ============= seg ================
pipeline_seg_ts_fp32: &pipeline_seg_ts_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmdet/instance-seg/instance-seg_torchscript.py
models:
- name: YOLOV3
metafile: configs/yolo/metafile.yml
model_configs:
- configs/yolo/yolov3_d53_8xb8-320-273e_coco.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- deploy_config: configs/mmdet/detection/detection_tensorrt-fp16_dynamic-64x64-608x608.py
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- name: SSD
metafile: configs/ssd/metafile.yml
model_configs:
- configs/ssd/ssd300_coco.py
pipelines: # special cases
- *pipeline_ts_fp32
- *pipeline_ort_static_fp32
- deploy_config: configs/mmdet/detection/detection_tensorrt_dynamic-300x300-512x512.py
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
- deploy_config: configs/mmdet/detection/single-stage_ncnn_static-300x300.py
convert_image: *convert_image
backend_test: False
- name: RetinaNet
metafile: configs/retinanet/metafile.yml
model_configs:
- configs/retinanet/retinanet_r50_fpn_1x_coco.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: Cascade Mask R-CNN
metafile: configs/cascade_rcnn/metafile.yml
model_configs:
- configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py
pipelines:
- *pipeline_seg_ts_fp32
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_trt_dynamic_fp32
- *pipeline_seg_openvino_dynamic_fp32
- name: FCOS
metafile: configs/fcos/metafile.yml
model_configs:
- configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- name: FSAF
metafile: configs/fsaf/metafile.yml
model_configs:
- configs/fsaf/fsaf_r50_fpn_1x_coco.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: YOLOX
metafile: configs/yolox/metafile.yml
model_configs:
- configs/yolox/yolox_s_8xb8-300e_coco.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- deploy_config: configs/mmdet/detection/detection_openvino_dynamic-640x640.py
convert_image: *convert_image
backend_test: False
- name: Faster R-CNN
metafile: configs/faster_rcnn/metafile.yml
model_configs:
- configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: ATSS
metafile: configs/atss/metafile.yml
model_configs:
- configs/atss/atss_r50_fpn_1x_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: Cascade R-CNN
metafile: configs/cascade_rcnn/metafile.yml
model_configs:
- configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: GFL
metafile: configs/gfl/metafile.yml
model_configs:
- configs/gfl/gfl_r50_fpn_1x_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: DETR
metafile: configs/detr/metafile.yml
model_configs:
- configs/detr/detr_r50_8xb2-150e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: CenterNet
metafile: configs/centernet/metafile.yml
model_configs:
- configs/centernet/centernet_r18_8xb16-crop512-140e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- deploy_config: configs/mmdet/detection/detection_tensorrt-fp16_dynamic-64x64-800x800.py
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
- name: Mask R-CNN
metafile: configs/mask_rcnn/metafile.yml
model_configs:
- configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
pipelines:
- *pipeline_seg_ts_fp32
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_trt_dynamic_fp32
- *pipeline_seg_openvino_dynamic_fp32
- name: Swin Transformer
metafile: configs/swin/metafile.yml
model_configs:
- configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py
pipelines:
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_trt_dynamic_fp32
- name: RTMDet
metafile: configs/rtmdet/metafile.yml
model_configs:
- configs/rtmdet/rtmdet_s_8xb32-300e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- deploy_config: configs/mmdet/detection/detection_tensorrt_static-640x640.py
convert_image: *convert_image
backend_test: *default_backend_test
- deploy_config: configs/mmdet/detection/single-stage_ncnn_static-640x640.py
convert_image: *convert_image
- name: SOLO
metafile: configs/solo/metafile.yml
model_configs:
- configs/solo/solo_r50_fpn_1x_coco.py
pipelines:
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_openvino_dynamic_fp32
- name: SOLOV2
metafile: configs/solov2/metafile.yml
model_configs:
- configs/solov2/solov2_r50_fpn_1x_coco.py
pipelines:
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_openvino_dynamic_fp32
- name: RTMDetInst
metafile: configs/rtmdet/metafile.yml
model_configs:
- configs/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py
pipelines:
- *pipeline_seg_trt_dynamic_fp32
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_openvino_dynamic_fp32
- name: PanopticFPN
metafile: configs/panoptic_fpn/metafile.yml
model_configs:
- configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
pipelines:
- deploy_config: configs/mmdet/panoptic-seg/panoptic-seg_panoptic-fpn_onnxruntime_dynamic.py
convert_image: *convert_image
backend_test: False
- deploy_config: configs/mmdet/panoptic-seg/panoptic-seg_panoptic-fpn_tensorrt_dynamic-352x512-1344x1344.py
convert_image: *convert_image
backend_test: True
- name: MaskFormer
metafile: configs/maskformer/metafile.yml
model_configs:
- configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py
pipelines:
- deploy_config: configs/mmdet/panoptic-seg/panoptic-seg_maskformer_onnxruntime_dynamic.py
convert_image: *convert_image
backend_test: False
- deploy_config: configs/mmdet/panoptic-seg/panoptic-seg_maskformer_tensorrt_dynamic-320x512-1344x1344.py
convert_image: *convert_image
backend_test: True
- name: Mask2Former
metafile: configs/mask2former/metafile.yml
model_configs:
- configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
pipelines:
- deploy_config: configs/mmdet/panoptic-seg/panoptic-seg_maskformer_onnxruntime_dynamic.py
convert_image: *convert_image
backend_test: *default_backend_test
- deploy_config: configs/mmdet/panoptic-seg/panoptic-seg_maskformer_tensorrt_static-800x1344.py
convert_image: *convert_image
backend_test: False
- name: DINO
metafile: configs/dino/metafile.yml
model_configs:
- configs/dino/dino-4scale_r50_8xb2-12e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: ConditionalDETR
metafile: configs/conditional_detr/metafile.yml
model_configs:
- configs/conditional_detr/conditional-detr_r50_8xb2-50e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: DAB-DETR
metafile: configs/dab_detr/metafile.yml
model_configs:
- configs/dab_detr/dab-detr_r50_8xb2-50e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: DeformableDETR
metafile: configs/deformable_detr/metafile.yml
model_configs:
- configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: CondInst
metafile: configs/condinst/metafile.yml
model_configs:
- configs/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py
pipelines:
- deploy_config: configs/mmdet/instance-seg/instance-seg_onnxruntime_dynamic.py
backend_test: *default_backend_test
- deploy_config: configs/mmdet/instance-seg/instance-seg_tensorrt_dynamic-320x320-1344x1344.py
backend_test: *default_backend_test
- name: HTC
metafile: configs/htc/metafile.yml
model_configs:
- configs/htc/htc_r50_fpn_1x_coco.py
pipelines:
- *pipeline_seg_ort_dynamic_fp32
- *pipeline_seg_trt_dynamic_fp32
globals:
repo_url: https://github.com/open-mmlab/mmdetection3d/tree/main
codebase_dir: ../mmdetection3d
checkpoint_force_download: False
images:
kitti_input: &kitti_input ../mmdetection3d/demo/data/kitti/000008.bin
nus_input: &nus_input tests/data/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151612397179.pcd.bin
nus_pkl_input: &nus_pkl_input tests/test_codebase/test_mmdet3d/data/nuscenes/n015-2018-07-24-11-22-45+0800.pkl
metric_info: &metric_info
AP: # named after metafile.Results.Metrics
eval_name: bbox # test.py --metrics args
metric_key: bbox_mAP # eval OrderedDict key name
tolerance: 1 # metric ±n%
task_name: 3D Object Detection # metafile.Results.Task
dataset: KITTI # metafile.Results.Dataset
mAP:
eval_name: bbox
metric_key: bbox_mAP
tolerance: 1 # metric ±n%
task_name: 3D Object Detection
dataset: nuScenes
NDS:
eval_name: bbox
metric_key: bbox_mAP
tolerance: 1 # metric ±n%
task_name: 3D Object Detection
dataset: nuScenes
backend_test: &default_backend_test False
convert_image: &convert_image
input_img: *kitti_input
test_img: *kitti_input
convert_image_nus: &convert_image_nus
input_img: *nus_input
test_img: *nus_input
convert_image_nus_pkl: &convert_image_nus_pkl
input_img: *nus_pkl_input
test_img: *nus_pkl_input
onnxruntime:
# ======= voxel-detection =======
pipeline_ort_voxel_dynamic_kitti_fp32: &pipeline_ort_voxel_dynamic_kitti_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_onnxruntime_dynamic.py
pipeline_ort_voxel_dynamic_kitti_fp16: &pipeline_ort_voxel_dynamic_kitti_fp16
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_onnxruntime-fp16_dynamic.py
pipeline_ort_voxel_dynamic_nus_fp32: &pipeline_ort_voxel_dynamic_nus_fp32
convert_image: *convert_image_nus
backend_test: False
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_onnxruntime_dynamic.py
# ======= mono-detection =======
pipeline_ort_mono_dynamic_nus_fp32: &pipeline_ort_mono_dynamic_nus_fp32
convert_image: *convert_image_nus_pkl
backend_test: False
deploy_config: configs/mmdet3d/mono-detection/mono-detection_onnxruntime_dynamic.py
pipeline_ort_mono_dynamic_nus_fp16: &pipeline_ort_mono_dynamic_nus_fp16
convert_image: *convert_image_nus_pkl
backend_test: False
deploy_config: configs/mmdet3d/mono-detection/mono-detection_onnxruntime-fp16_dynamic.py
tensorrt:
# ======= voxel-detection =======
pipeline_trt_voxel_dynamic_nus_fp32_64x4: &pipeline_trt_voxel_dynamic_nus_fp32_64x4
convert_image: *convert_image_nus
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_tensorrt_dynamic-nus-64x4.py
pipeline_trt_voxel_dynamic_nus_fp32_20x5: &pipeline_trt_voxel_dynamic_nus_fp32_20x5
convert_image: *convert_image_nus
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_tensorrt_dynamic-nus-20x5.py
pipeline_trt_voxel_dynamic_kitti_fp32: &pipeline_trt_voxel_dynamic_kitti_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_tensorrt_dynamic-kitti-32x4.py
# ======= mono-detection =======
pipeline_trt_mono_dynamic_nus_fp32: &pipeline_trt_mono_dynamic_nus_fp32
convert_image: *convert_image_nus_pkl
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/mono-detection/mono-detection_tensorrt_dynamic-320x320-1344x1344.py
openvino:
# ======= voxel-detection =======
pipeline_openvino_voxel_dynamic_kitti_fp32: &pipeline_openvino_voxel_dynamic_kitti_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_openvino_dynamic-kitti-32x4.py
pipeline_openvino_voxel_dynamic_nus_fp32_64x4: &pipeline_openvino_voxel_dynamic_nus_fp32_64x4
convert_image: *convert_image_nus
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_openvino_dynamic-nus-64x4.py
pipeline_openvino_voxel_dynamic_nus_fp32_20x5: &pipeline_openvino_voxel_dynamic_nus_fp32_20x5
convert_image: *convert_image_nus
backend_test: *default_backend_test
deploy_config: configs/mmdet3d/voxel-detection/voxel-detection_openvino_dynamic-nus-20x5.py
models:
- name: PointPillars
metafile: configs/pointpillars/metafile.yml
model_configs:
- configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py
- configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py
pipelines:
- *pipeline_ort_voxel_dynamic_kitti_fp16
- *pipeline_openvino_voxel_dynamic_kitti_fp32
- *pipeline_trt_voxel_dynamic_kitti_fp32
- name: PointPillars
metafile: configs/pointpillars/metafile.yml
model_configs:
- configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py
pipelines:
- *pipeline_ort_voxel_dynamic_nus_fp32
- *pipeline_openvino_voxel_dynamic_nus_fp32_64x4
- *pipeline_trt_voxel_dynamic_nus_fp32_64x4
- name: CenterPoint
metafile: configs/centerpoint/metafile.yml
model_configs:
- configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
pipelines:
- *pipeline_ort_voxel_dynamic_nus_fp32
- *pipeline_openvino_voxel_dynamic_nus_fp32_20x5
- *pipeline_trt_voxel_dynamic_nus_fp32_20x5
- name: SMOKE
metafile: configs/smoke/metafile.yml
model_configs:
- configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py
pipelines:
- *pipeline_ort_mono_dynamic_nus_fp32
- *pipeline_trt_mono_dynamic_nus_fp32
globals:
repo_url: https://github.com/open-mmlab/mmocr/tree/main
codebase_dir: ../mmocr
checkpoint_force_download: False
images:
img_densetext_det: &img_densetext_det ../mmocr/demo/demo_densetext_det.jpg
img_demo_text_det: &img_demo_text_det ../mmocr/demo/demo_text_det.jpg
img_demo_text_recog: &img_demo_text_recog ../mmocr/demo/demo_text_recog.jpg
metric_info: &metric_info
hmean-iou: # named after metafile.Results.Metrics
metric_key: icdar/hmean # eval key name
tolerance: 0.15 # metric ±n%
IIIT5K word_acc:
metric_key: IIIT5K/recog/word_acc_ignore_case_symbol
tolerance: 0.05 # metric ±n%
dataset: IIIT5K
SVT word_acc:
metric_key: SVT/recog/word_acc_ignore_case_symbol
tolerance: 0.05 # metric ±n%
dataset: SVT
SVTP word_acc:
metric_key: SVTP/recog/word_acc_ignore_case_symbol
tolerance: 0.05 # metric ±n%
dataset: SVTP
convert_image_det: &convert_image_det
input_img: *img_densetext_det
test_img: *img_demo_text_det
convert_image_rec: &convert_image_rec
input_img: *img_demo_text_recog
test_img: *img_demo_text_recog
backend_test: &default_backend_test True
sdk:
sdk_detection_dynamic: &sdk_detection_dynamic configs/mmocr/text-detection/text-detection_sdk_dynamic.py
sdk_recognition_dynamic: &sdk_recognition_dynamic configs/mmocr/text-recognition/text-recognition_sdk_dynamic.py
onnxruntime:
# ======= detection =======
pipeline_ort_detection_static_fp32: &pipeline_ort_detection_static_fp32
convert_image: *convert_image_det
deploy_config: configs/mmocr/text-detection/text-detection_onnxruntime_static.py
pipeline_ort_detection_dynamic_fp32: &pipeline_ort_detection_dynamic_fp32
convert_image: *convert_image_det
deploy_config: configs/mmocr/text-detection/text-detection_onnxruntime_dynamic.py
pipeline_ort_detection_dynamic_fp16: &pipeline_ort_detection_dynamic_fp16
convert_image: *convert_image_det
deploy_config: configs/mmocr/text-detection/text-detection_onnxruntime-fp16_dynamic.py
pipeline_ort_detection_mrcnn_dynamic_fp32: &pipeline_ort_detection_mrcnn_dynamic_fp32
convert_image: *convert_image_det
deploy_config: configs/mmocr/text-detection/text-detection_mrcnn_onnxruntime_dynamic.py
# ======= recognition =======
pipeline_ort_recognition_static_fp32: &pipeline_ort_recognition_static_fp32
convert_image: *convert_image_rec
deploy_config: configs/mmocr/text-recognition/text-recognition_onnxruntime_static.py
pipeline_ort_recognition_dynamic_fp32: &pipeline_ort_recognition_dynamic_fp32
convert_image: *convert_image_rec
deploy_config: configs/mmocr/text-recognition/text-recognition_onnxruntime_dynamic.py
pipeline_ort_recognition_dynamic_fp16: &pipeline_ort_recognition_dynamic_fp16
convert_image: *convert_image_rec
deploy_config: configs/mmocr/text-recognition/text-recognition_onnxruntime-fp16_dynamic.py
tensorrt:
# ======= detection =======
pipeline_trt_detection_static_fp32: &pipeline_trt_detection_static_fp32
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_tensorrt_static-512x512.py
pipeline_trt_detection_static_fp16: &pipeline_trt_detection_static_fp16
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_tensorrt-fp16_static-512x512.py
pipeline_trt_detection_static_int8: &pipeline_trt_detection_static_int8
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_tensorrt-int8_static-512x512.py
pipeline_trt_detection_dynamic_fp32: &pipeline_trt_detection_dynamic_fp32
convert_image: *convert_image_det
backend_test: *default_backend_test
deploy_config: configs/mmocr/text-detection/text-detection_tensorrt_dynamic-320x320-2240x2240.py
pipeline_trt_detection_mrcnn_dynamic_fp32: &pipeline_trt_detection_mrcnn_dynamic_fp32
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_mrcnn_tensorrt_dynamic-320x320-2240x2240.py
pipeline_trt_detection_dynamic_fp16: &pipeline_trt_detection_dynamic_fp16
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_tensorrt-fp16_dynamic-320x320-2240x2240.py
pipeline_trt_detection_mrcnn_dynamic_fp16: &pipeline_trt_detection_mrcnn_dynamic_fp16
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_mrcnn_tensorrt-fp16_dynamic-320x320-2240x2240.py
pipeline_trt_detection_dynamic_int8: &pipeline_trt_detection_dynamic_int8
convert_image: *convert_image_det
backend_test: *default_backend_test
sdk_config: *sdk_detection_dynamic
deploy_config: configs/mmocr/text-detection/text-detection_tensorrt-int8_dynamic-320x320-2240x2240.py
# ======= recognition =======
pipeline_trt_recognition_static_fp32_C1: &pipeline_trt_recognition_static_fp32_C1
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt_static-1x32x32.py
# ABINet models with static shape 32x128
pipeline_trt_recognition_static_fp32_C3: &pipeline_trt_recognition_static_fp32_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt_static-32x128.py
pipeline_trt_recognition_static_fp16_C3: &pipeline_trt_recognition_static_fp16_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-fp16_static-32x128.py
# SAR models with height 48 and channel 3
pipeline_trt_recognition_dynamic_fp32_H48_C3: &pipeline_trt_recognition_dynamic_fp32_H48_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-48x64-48x640.py
pipeline_trt_recognition_dynamic_fp16_H48_C3: &pipeline_trt_recognition_dynamic_fp16_H48_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-fp16_dynamic-48x64-48x640.py
pipeline_trt_recognition_dynamic_int8_H48_C3: &pipeline_trt_recognition_dynamic_int8_H48_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-int8_dynamic-48x64-48x640.py
# CRNN models with height 32 and channel 1
pipeline_trt_recognition_dynamic_fp32_H32_C1: &pipeline_trt_recognition_dynamic_fp32_H32_C1
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-1x32x32-1x32x640.py
pipeline_trt_recognition_dynamic_fp16_H32_C1: &pipeline_trt_recognition_dynamic_fp16_H32_C1
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-fp16_dynamic-1x32x32-1x32x640.py
pipeline_trt_recognition_dynamic_int8_H32_C1: &pipeline_trt_recognition_dynamic_int8_H32_C1
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-int8_dynamic-1x32x32-1x32x640.py
# SATRN models with height 32 and channel 3
pipeline_trt_recognition_dynamic_fp32_H32_C3: &pipeline_trt_recognition_dynamic_fp32_H32_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-32x32-32x640.py
pipeline_trt_recognition_dynamic_fp16_H32_C3: &pipeline_trt_recognition_dynamic_fp16_H32_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-fp16_dynamic-32x32-32x640.py
pipeline_trt_recognition_dynamic_int8_H32_C3: &pipeline_trt_recognition_dynamic_int8_H32_C3
convert_image: *convert_image_rec
backend_test: *default_backend_test
sdk_config: *sdk_recognition_dynamic
deploy_config: configs/mmocr/text-recognition/text-recognition_tensorrt-int8_dynamic-32x32-32x640.py
openvino:
pipeline_openvino_detection_dynamic_fp32: &pipeline_openvino_detection_dynamic_fp32
convert_image: *convert_image_det
backend_test: *default_backend_test
deploy_config: configs/mmocr/text-detection/text-detection_openvino_dynamic-640x640.py
# pipeline_openvino_recognition_dynamic_fp32: &pipeline_openvino_recognition_dynamic_fp32
# convert_image: *convert_image_rec
# backend_test: *default_backend_test
# deploy_config:
ncnn:
pipeline_ncnn_detection_static_fp32: &pipeline_ncnn_detection_static_fp32
convert_image: *convert_image_det
backend_test: False
deploy_config: configs/mmocr/text-detection/text-detection_ncnn_static.py
pipeline_ncnn_recognition_static_fp32: &pipeline_ncnn_recognition_static_fp32
convert_image: *convert_image_rec
backend_test: False
deploy_config: configs/mmocr/text-recognition/text-recognition_ncnn_static.py
pplnn:
pipeline_pplnn_detection_dynamic_fp32: &pipeline_pplnn_detection_dynamic_fp32
convert_image: *convert_image_det
backend_test: False
deploy_config: configs/mmocr/text-detection/text-detection_pplnn_dynamic-640x640.py
pipeline_pplnn_recognition_dynamic_fp32: &pipeline_pplnn_recognition_dynamic_fp32
convert_image: *convert_image_rec
backend_test: False
deploy_config: configs/mmocr/text-recognition/text-recognition_pplnn_dynamic-1x32x32.py
torchscript:
pipeline_ts_detection_fp32: &pipeline_ts_detection_fp32
convert_image: *convert_image_det
backend_test: False
deploy_config: configs/mmocr/text-detection/text-detection_torchscript.py
pipeline_ts_detection_mrcnn_fp32: &pipeline_ts_detection_mrcnn_fp32
convert_image: *convert_image_det
backend_test: False
deploy_config: configs/mmocr/text-detection/text-detection_mrcnn_torchscript.py
pipeline_ts_recognition_fp32: &pipeline_ts_recognition_fp32
convert_image: *convert_image_rec
backend_test: False
deploy_config: configs/mmocr/text-recognition/text-recognition_torchscript.py
models:
- name: DBNet
metafile: configs/textdet/dbnet/metafile.yml
model_configs:
- configs/textdet/dbnet/dbnet_resnet18_fpnc_1200e_icdar2015.py
pipelines:
- *pipeline_ts_detection_fp32
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- *pipeline_ncnn_detection_static_fp32
- *pipeline_pplnn_detection_dynamic_fp32
- *pipeline_openvino_detection_dynamic_fp32
- name: DBNetpp
metafile: configs/textdet/dbnetpp/metafile.yml
model_configs:
- configs/textdet/dbnetpp/dbnetpp_resnet50_fpnc_1200e_icdar2015.py
pipelines:
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- *pipeline_ncnn_detection_static_fp32
- *pipeline_openvino_detection_dynamic_fp32
- name: PANet
metafile: configs/textdet/panet/metafile.yml
model_configs:
- configs/textdet/panet/panet_resnet18_fpem-ffm_600e_icdar2015.py
pipelines:
- *pipeline_ts_detection_fp32
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- *pipeline_ncnn_detection_static_fp32
- *pipeline_pplnn_detection_dynamic_fp32
- *pipeline_openvino_detection_dynamic_fp32
- name: PSENet
metafile: configs/textdet/psenet/metafile.yml
model_configs:
- configs/textdet/psenet/psenet_resnet50_fpnf_600e_icdar2015.py
pipelines:
- *pipeline_ts_detection_fp32
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- *pipeline_ncnn_detection_static_fp32
- *pipeline_pplnn_detection_dynamic_fp32
- *pipeline_openvino_detection_dynamic_fp32
- name: TextSnake
metafile: configs/textdet/textsnake/metafile.yml
model_configs:
- configs/textdet/textsnake/textsnake_resnet50_fpn-unet_1200e_ctw1500.py
pipelines:
- *pipeline_ts_detection_fp32
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp32
- name: MaskRCNN
metafile: configs/textdet/maskrcnn/metafile.yml
model_configs:
- configs/textdet/maskrcnn/mask-rcnn_resnet50_fpn_160e_icdar2015.py
pipelines:
- *pipeline_ts_detection_mrcnn_fp32
- *pipeline_ort_detection_mrcnn_dynamic_fp32
- *pipeline_trt_detection_mrcnn_dynamic_fp32
- name: CRNN
metafile: configs/textrecog/crnn/metafile.yml
model_configs:
- configs/textrecog/crnn/crnn_mini-vgg_5e_mj.py
pipelines:
- *pipeline_ts_recognition_fp32
- *pipeline_ort_recognition_dynamic_fp32
- *pipeline_trt_recognition_dynamic_fp16_H32_C1
- *pipeline_ncnn_recognition_static_fp32
- *pipeline_pplnn_recognition_dynamic_fp32
- name: SAR
metafile: configs/textrecog/sar/metafile.yml
model_configs:
- configs/textrecog/sar/sar_resnet31_parallel-decoder_5e_st-sub_mj-sub_sa_real.py
pipelines:
- *pipeline_ts_recognition_fp32
- *pipeline_ort_recognition_dynamic_fp32
- name: SATRN
metafile: configs/textrecog/satrn/metafile.yml
model_configs:
- configs/textrecog/satrn/satrn_shallow-small_5e_st_mj.py
pipelines:
- *pipeline_ts_recognition_fp32
- *pipeline_ort_recognition_dynamic_fp32
- *pipeline_trt_recognition_dynamic_fp32_H32_C3
- name: ABINet
metafile: configs/textrecog/abinet/metafile.yml
model_configs:
- configs/textrecog/abinet/abinet_20e_st-an_mj.py
pipelines:
- *pipeline_ts_recognition_fp32
- *pipeline_ort_recognition_static_fp32
- *pipeline_trt_recognition_static_fp16_C3
globals:
repo_url: https://github.com/open-mmlab/mmpose/tree/main
codebase_dir: ../mmpose
checkpoint_force_download: False
images:
img_human_pose: &img_human_pose ../mmpose/tests/data/coco/000000000785.jpg
img_human_pose_256x192: &img_human_pose_256x192 ./demo/resources/human-pose.jpg
metric_info: &metric_info
AP: # named after metafile.Results.Metrics
metric_key: coco/AP # eval key name
tolerance: 0.02 # metric ±n
AR:
metric_key: coco/AR
tolerance: 0.02 # metric ±n
convert_image: &convert_image
input_img: *img_human_pose
test_img: *img_human_pose_256x192
backend_test: &default_backend_test True
sdk:
sdk_static: &sdk_static configs/mmpose/pose-detection_sdk_static-256x192.py
onnxruntime:
pipeline_ort_static_fp32: &pipeline_ort_static_fp32
convert_image: *convert_image
deploy_config: configs/mmpose/pose-detection_onnxruntime_static.py
pipeline_ort_static_fp16: &pipeline_ort_static_fp16
convert_image: *convert_image
deploy_config: configs/mmpose/pose-detection_onnxruntime-fp16_static.py
tensorrt:
pipeline_trt_static_fp32: &pipeline_trt_static_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_static
deploy_config: configs/mmpose/pose-detection_tensorrt_static-256x192.py
pipeline_trt_static_fp32_256x256: &pipeline_trt_static_fp32_256x256
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_static
deploy_config: configs/mmpose/pose-detection_tensorrt_static-256x256.py
pipeline_trt_static_fp16: &pipeline_trt_static_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_static
deploy_config: configs/mmpose/pose-detection_tensorrt-fp16_static-256x192.py
pipeline_trt_static_int8: &pipeline_trt_static_int8
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_static
deploy_config: configs/mmpose/pose-detection_tensorrt-int8_static-256x192.py
openvino:
pipeline_openvino_static_fp32: &pipeline_openvino_static_fp32
convert_image: *convert_image
deploy_config: configs/mmpose/pose-detection_openvino_static-256x192.py
pipeline_openvino_static_fp32_256x256: &pipeline_openvino_static_fp32_256x256
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpose/pose-detection_openvino_static-256x256.py
ncnn:
pipeline_ncnn_static_fp32: &pipeline_ncnn_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpose/pose-detection_ncnn_static-256x192.py
pipeline_ncnn_static_fp32_256x256: &pipeline_ncnn_static_fp32_256x256
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpose/pose-detection_ncnn_static-256x256.py
pplnn:
pipeline_pplnn_static_fp32: &pipeline_pplnn_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpose/pose-detection_pplnn_static-256x192.py
torchscript:
pipeline_ts_static_fp32: &pipeline_ts_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmpose/pose-detection_torchscript.py
models:
- name: HRNET
metafile: configs/body_2d_keypoint/topdown_heatmap/coco/hrnet_coco.yml
model_configs:
- configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_static_fp32
- *pipeline_ts_fp32
- *pipeline_pplnn_static_fp32
- name: LiteHRNet
metafile: configs/body_2d_keypoint/topdown_heatmap/coco/litehrnet_coco.yml
model_configs:
- configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_litehrnet-30_8xb64-210e_coco-256x192.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_static_fp32
- *pipeline_ts_fp32
- *pipeline_pplnn_static_fp32
- name: MSPN
metafile: configs/body_2d_keypoint/topdown_heatmap/coco/mspn_coco.yml
model_configs:
- configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_4xmspn50_8xb32-210e_coco-256x192.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_static_fp32
- *pipeline_ts_fp32
- *pipeline_pplnn_static_fp32
- name: Hourglass
metafile: configs/body_2d_keypoint/topdown_heatmap/coco/hourglass_coco.yml
model_configs:
- configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp32_256x256
- *pipeline_ncnn_static_fp32_256x256
- *pipeline_openvino_static_fp32_256x256
- name: SimCC
metafile: configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml
model_configs:
- configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py
pipelines:
- convert_image: *convert_image
deploy_config: configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py
- convert_image: *convert_image
deploy_config: configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py
backend_test: *default_backend_test
sdk_config: configs/mmpose/pose-detection_simcc_sdk_static-256x192.py
- convert_image: *convert_image
deploy_config: configs/mmpose/pose-detection_simcc_ncnn_static-256x192.py
- name: YOLOX-Pose
metafile: configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_coco.yml
model_configs:
- configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_s_8xb32-300e_coco-640.py
pipelines:
- convert_image:
input_img: *img_human_pose
test_img: *img_human_pose
deploy_config: configs/mmpose/pose-detection_yolox-pose_onnxruntime_dynamic.py
- name: RTMO
metafile: configs/body_2d_keypoint/rtmo/body7/rtmo_body7.yml
model_configs:
- configs/body_2d_keypoint/rtmo/body7/rtmo-s_8xb32-600e_body7-640x640.py
pipelines:
- convert_image:
input_img: *img_human_pose
test_img: *img_human_pose
deploy_config: configs/mmpose/pose-detection_rtmo_onnxruntime_dynamic.py
- convert_image:
input_img: *img_human_pose
test_img: *img_human_pose
deploy_config: configs/mmpose/pose-detection_rtmo_tensorrt-fp16_dynamic-640x640.py
globals:
repo_url: https://github.com/open-mmlab/mmpretrain/tree/main
codebase_dir: ../mmpretrain
checkpoint_force_download: False
images:
img_snake: &img_snake ../mmpretrain/demo/demo.JPEG
img_color_cat: &img_color_cat ../mmpretrain/tests/data/color.jpg
metric_info: &metric_info
Top 1 Accuracy: # named after metafile.Results.Metrics
metric_key: accuracy/top1 # key name in output json
tolerance: 1 # metric ±n%
Top 5 Accuracy:
metric_key: accuracy/top5
tolerance: 1 # metric ±n%
convert_image: &convert_image
input_img: *img_snake
test_img: *img_color_cat
backend_test: &default_backend_test True
sdk:
sdk_dynamic: &sdk_dynamic configs/mmpretrain/classification_sdk_dynamic.py
onnxruntime:
pipeline_ort_static_fp32: &pipeline_ort_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_onnxruntime_static.py
pipeline_ort_dynamic_fp32: &pipeline_ort_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_onnxruntime_dynamic.py
pipeline_ort_dynamic_fp16: &pipeline_ort_dynamic_fp16
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_onnxruntime-fp16_dynamic.py
tensorrt:
pipeline_trt_static_fp32: &pipeline_trt_static_fp32
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmpretrain/classification_tensorrt_static-224x224.py
pipeline_trt_static_fp16: &pipeline_trt_static_fp16
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmpretrain/classification_tensorrt-fp16_static-224x224.py
pipeline_trt_static_fp16_384x384: &pipeline_trt_static_fp16_384x384
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmpretrain/classification_tensorrt-fp16_static-384x384.py
pipeline_trt_static_int8: &pipeline_trt_static_int8
convert_image: *convert_image
backend_test: *default_backend_test
deploy_config: configs/mmpretrain/classification_tensorrt-int8_static-224x224.py
pipeline_trt_dynamic_fp32: &pipeline_trt_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmpretrain/classification_tensorrt_dynamic-224x224-224x224.py
pipeline_trt_dynamic_fp16: &pipeline_trt_dynamic_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmpretrain/classification_tensorrt-fp16_dynamic-224x224-224x224.py
pipeline_trt_dynamic_int8: &pipeline_trt_dynamic_int8
convert_image: *convert_image
calib_dataset_cfg:
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmpretrain/classification_tensorrt-int8_dynamic-224x224-224x224.py
openvino:
pipeline_openvino_dynamic_fp32: &pipeline_openvino_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_openvino_dynamic-224x224.py
ncnn:
pipeline_ncnn_static_fp32: &pipeline_ncnn_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_ncnn_static.py
pipeline_ncnn_dynamic_fp32: &pipeline_ncnn_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_ncnn_dynamic.py
pplnn:
pipeline_pplnn_dynamic_fp32: &pipeline_pplnn_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmpretrain/classification_pplnn_dynamic-224x224.py
torchscript:
pipeline_ts_fp32: &pipeline_ts_fp32
convert_image: *convert_image
backend_test: True
deploy_config: configs/mmpretrain/classification_torchscript.py
models:
- name: ResNet
metafile: configs/resnet/metafile.yml
model_configs:
- configs/resnet/resnet18_8xb32_in1k.py # TODO Not benchmark config
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
# - *pipeline_trt_dynamic_fp32
- *pipeline_trt_dynamic_fp16
# - *pipeline_trt_dynamic_int8
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: ResNeXt
metafile: configs/resnext/metafile.yml
model_configs:
- configs/resnext/resnext50-32x4d_8xb32_in1k.py # TODO Not benchmark config
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: DenseNet
metafile: configs/densenet/metafile.yml
model_configs:
- configs/densenet/densenet121_4xb256_in1k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: SE-ResNet
metafile: configs/seresnet/metafile.yml
model_configs:
- configs/seresnet/seresnet50_8xb32_in1k.py # TODO Not benchmark config
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: MobileNetV2
metafile: configs/mobilenet_v2/metafile.yml
model_configs:
- configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: ShuffleNetV1
metafile: configs/shufflenet_v1/metafile.yml
model_configs:
- configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py
pipelines:
- *pipeline_ts_fp32
# - *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32
# - *pipeline_pplnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: ShuffleNetV2
metafile: configs/shufflenet_v2/metafile.yml
model_configs:
- configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py
pipelines:
- *pipeline_ts_fp32
# - *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32
# - *pipeline_pplnn_dynamic_fp32
# - *pipeline_openvino_dynamic_fp32
- name: VisionTransformer
metafile: configs/vision_transformer/metafile.yml
model_configs:
- configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_static_fp16_384x384
- *pipeline_ncnn_static_fp32
- name: SwinTransformer
metafile: configs/swin_transformer/metafile.yml
model_configs:
- configs/swin_transformer/swin-tiny_16xb64_in1k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: MobileOne
metafile: configs/mobileone/metafile.yml
model_configs:
- configs/mobileone/mobileone-s0_8xb32_in1k.py
pipelines:
- *pipeline_trt_static_fp16
- *pipeline_ort_dynamic_fp32
- name: EfficientNet
metafile: configs/efficientnet/metafile.yml
model_configs:
- configs/efficientnet/efficientnet-b0_8xb32_in1k.py
pipelines:
- *pipeline_ort_static_fp32
- convert_image: *convert_image
deploy_config: configs/mmpretrain/classification_tensorrt_dynamic-224x224-224x224.py
- name: Conformer
metafile: configs/conformer/metafile.yml
model_configs:
- configs/conformer/conformer-tiny-p16_8xb128_in1k.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- name: EfficientFormer
metafile: configs/efficientformer/metafile.yml
model_configs:
- configs/efficientformer/efficientformer-l1_8xb128_in1k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: MobileNetV3
metafile: configs/mobilenet_v3/metafile.yml
model_configs:
- configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
globals:
repo_url: https://github.com/open-mmlab/mmrotate/tree/main
codebase_dir: ../mmrotate
checkpoint_force_download: False
images:
img_demo: &img_demo ../mmrotate/demo/demo.jpg
img_dota_demo: &img_dota_demo ../mmrotate/demo/dota_demo.jpg
metric_info: &metric_info
mAP: # named after metafile.Results.Metrics
metric_key: AP # eval key name
tolerance: 0.10 # metric ±n%
convert_image_det: &convert_image_det
input_img: *img_demo
test_img: *img_dota_demo
backend_test: &default_backend_test False
onnxruntime:
# ======= detection =======
pipeline_ort_detection_static_fp32: &pipeline_ort_detection_static_fp32
convert_image: *convert_image_det
backend_test: False
deploy_config: configs/mmrotate/rotated-detection_onnxruntime_static.py
pipeline_ort_detection_dynamic_fp32: &pipeline_ort_detection_dynamic_fp32
convert_image: *convert_image_det
deploy_config: configs/mmrotate/rotated-detection_onnxruntime_dynamic.py
pipeline_ort_detection_dynamic_fp16: &pipeline_ort_detection_dynamic_fp16
convert_image: *convert_image_det
deploy_config: configs/mmrotate/rotated-detection_onnxruntime-fp16_dynamic.py
tensorrt:
# ======= detection =======
pipeline_trt_detection_dynamic_fp32: &pipeline_trt_detection_dynamic_fp32
convert_image: *convert_image_det
backend_test: *default_backend_test
deploy_config: configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py
pipeline_trt_detection_dynamic_fp16: &pipeline_trt_detection_dynamic_fp16
convert_image: *convert_image_det
backend_test: *default_backend_test
deploy_config: configs/mmrotate/rotated-detection_tensorrt-fp16_dynamic-320x320-1024x1024.py
models:
- name: RotatedRetinanet
metafile: configs/rotated_retinanet/metafile.yml
model_configs:
- configs/rotated_retinanet/rotated-retinanet-hbox-oc_r50_fpn_1x_dota.py
pipelines:
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- name: oriented_rcnn
metafile: configs/oriented_rcnn/metafile.yml
model_configs:
- configs/oriented_rcnn/oriented-rcnn-le90_r50_fpn_1x_dota.py
pipelines:
- *pipeline_ort_detection_static_fp32
- *pipeline_trt_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- name: gliding_vertex
metafile: configs/gliding_vertex/metafile.yml
model_configs:
- configs/gliding_vertex/gliding-vertex-rbox_r50_fpn_1x_dota.py
pipelines:
- *pipeline_trt_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
- name: RoITransformer
metafile: configs/roi_trans/metafile.yml
model_configs:
- configs/roi_trans/roi-trans-le90_r50_fpn_1x_dota.py
pipelines:
- *pipeline_ort_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp32
- *pipeline_trt_detection_dynamic_fp16
globals:
repo_url: https://github.com/open-mmlab/mmsegmentation/tree/main
codebase_dir: ../mmsegmentation
checkpoint_force_download: False
images:
img_leftImg8bit: &img_leftImg8bit ../mmsegmentation/tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png
img_loveda_0: &img_loveda_0 ../mmsegmentation/tests/data/pseudo_loveda_dataset/img_dir/0.png
metric_info: &metric_info
mIoU: # named after metafile.Results.Metrics
metric_key: mIoU # eval OrderedDict key name
tolerance: 1 # metric ±n%
convert_image: &convert_image
input_img: *img_leftImg8bit
test_img: *img_loveda_0
backend_test: &default_backend_test True
sdk:
sdk_dynamic: &sdk_dynamic configs/mmseg/segmentation_sdk_dynamic.py
onnxruntime:
pipeline_ort_static_fp32: &pipeline_ort_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_onnxruntime_static-1024x2048.py
pipeline_ort_static_fp32_512x512: &pipeline_ort_static_fp32_512x512
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_onnxruntime_static-512x512.py
pipeline_ort_dynamic_fp32: &pipeline_ort_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_onnxruntime_dynamic.py
pipeline_ort_dynamic_fp16: &pipeline_ort_dynamic_fp16
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_onnxruntime-fp16_dynamic.py
tensorrt:
pipeline_trt_static_fp32: &pipeline_trt_static_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmseg/segmentation_tensorrt_static-1024x2048.py
pipeline_trt_static_fp32_512x512: &pipeline_trt_static_fp32_512x512
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_tensorrt_static-512x512.py
pipeline_trt_static_fp16: &pipeline_trt_static_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmseg/segmentation_tensorrt-fp16_static-1024x2048.py
pipeline_trt_static_int8: &pipeline_trt_static_int8
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmseg/segmentation_tensorrt-int8_static-1024x2048.py
pipeline_trt_static_fp16_512x512: &pipeline_trt_static_fp16_512x512
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_tensorrt-fp16_static-512x512.py
pipeline_trt_dynamic_fp32: &pipeline_trt_dynamic_fp32
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmseg/segmentation_tensorrt_dynamic-512x1024-2048x2048.py
pipeline_trt_dynamic_fp16: &pipeline_trt_dynamic_fp16
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmseg/segmentation_tensorrt-fp16_dynamic-512x1024-2048x2048.py
pipeline_trt_dynamic_int8: &pipeline_trt_dynamic_int8
convert_image: *convert_image
backend_test: *default_backend_test
sdk_config: *sdk_dynamic
deploy_config: configs/mmseg/segmentation_tensorrt-int8_dynamic-512x1024-2048x2048.py
openvino:
pipeline_openvino_dynamic_fp32: &pipeline_openvino_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_openvino_dynamic-1024x2048.py
pipeline_openvino_static_fp32: &pipeline_openvino_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_openvino_static-1024x2048.py
pipeline_openvino_static_fp32_512x512: &pipeline_openvino_static_fp32_512x512
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_openvino_static-512x512.py
ncnn:
pipeline_ncnn_static_fp32: &pipeline_ncnn_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_ncnn_static-512x512.py
pplnn:
pipeline_pplnn_dynamic_fp32: &pipeline_pplnn_dynamic_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_pplnn_dynamic-1024x2048.py
pipeline_pplnn_static_fp32: &pipeline_pplnn_static_fp32
convert_image: *convert_image
backend_test: False
deploy_config: configs/mmseg/segmentation_pplnn_static-1024x2048.py
torchscript:
pipeline_ts_fp32: &pipeline_ts_fp32
convert_image: *convert_image
backend_test: True
deploy_config: configs/mmseg/segmentation_torchscript.py
models:
- name: FCN
metafile: configs/fcn/metafile.yaml
model_configs:
- configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: PSPNet
metafile: configs/pspnet/metafile.yaml
model_configs:
- configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_static_fp32
- *pipeline_openvino_static_fp32
- name: deeplabv3
metafile: configs/deeplabv3/metafile.yaml
model_configs:
- configs/deeplabv3/deeplabv3_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: deeplabv3+
metafile: configs/deeplabv3plus/metafile.yaml
model_configs:
- configs/deeplabv3plus/deeplabv3plus_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- *pipeline_openvino_dynamic_fp32
- name: Fast-SCNN
metafile: configs/fastscnn/metafile.yaml
model_configs:
- configs/fastscnn/fast_scnn_8xb4-160k_cityscapes-512x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_pplnn_static_fp32
- *pipeline_openvino_static_fp32
- name: UNet
metafile: configs/unet/metafile.yaml
model_configs:
- configs/unet/unet-s5-d16_fcn_4xb4-160k_cityscapes-512x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_pplnn_dynamic_fp32
- name: ANN
metafile: configs/ann/metafile.yaml
model_configs:
- configs/ann/ann_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ts_fp32
- name: APCNet
metafile: configs/apcnet/metafile.yaml
model_configs:
- configs/apcnet/apcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_ts_fp32
- name: BiSeNetV1
metafile: configs/bisenetv1/metafile.yaml
model_configs:
- configs/bisenetv1/bisenetv1_r18-d32_4xb4-160k_cityscapes-1024x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: BiSeNetV2
metafile: configs/bisenetv2/metafile.yaml
model_configs:
- configs/bisenetv2/bisenetv2_fcn_4xb4-160k_cityscapes-1024x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: CGNet
metafile: configs/cgnet/metafile.yaml
model_configs:
- configs/cgnet/cgnet_fcn_4xb8-60k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: EMANet
metafile: configs/emanet/metafile.yaml
model_configs:
- configs/emanet/emanet_r50-d8_4xb2-80k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: EncNet
metafile: configs/encnet/metafile.yaml
model_configs:
- configs/encnet/encnet_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: ERFNet
metafile: configs/erfnet/metafile.yaml
model_configs:
- configs/erfnet/erfnet_fcn_4xb4-160k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: FastFCN
metafile: configs/fastfcn/metafile.yaml
model_configs:
- configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4xb2-80k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: GCNet
metafile: configs/gcnet/metafile.yaml
model_configs:
- configs/gcnet/gcnet_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ts_fp32
- name: ICNet
metafile: configs/icnet/metafile.yaml
model_configs:
- configs/icnet/icnet_r18-d8_4xb2-80k_cityscapes-832x832.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- *pipeline_openvino_static_fp32
- *pipeline_ts_fp32
- name: ISANet
metafile: configs/isanet/metafile.yaml
model_configs:
- configs/isanet/isanet_r50-d8_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_static_fp32_512x512
- *pipeline_trt_static_fp16_512x512
- *pipeline_openvino_static_fp32_512x512
- name: OCRNet
metafile: configs/ocrnet/metafile.yaml
model_configs:
- configs/ocrnet/ocrnet_hr18s_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: PointRend
metafile: configs/point_rend/metafile.yaml
model_configs:
- configs/point_rend/pointrend_r50_4xb2-80k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_static_fp32_512x512
- *pipeline_trt_static_fp16
- *pipeline_ts_fp32
- name: Semantic FPN
metafile: configs/sem_fpn/metafile.yaml
model_configs:
- configs/sem_fpn/fpn_r50_4xb2-80k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp32
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: STDC
metafile: configs/stdc/metafile.yaml
model_configs:
- configs/stdc/stdc1_in1k-pre_4xb12-80k_cityscapes-512x1024.py
- configs/stdc/stdc2_in1k-pre_4xb12-80k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_dynamic_fp32
- *pipeline_trt_dynamic_fp16
- *pipeline_ncnn_static_fp32
- *pipeline_openvino_dynamic_fp32
- *pipeline_ts_fp32
- name: UPerNet
metafile: configs/upernet/metafile.yaml
model_configs:
- configs/upernet/upernet_r50_4xb2-40k_cityscapes-512x1024.py
pipelines:
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp16
- name: Segmenter
metafile: configs/segmenter/metafile.yaml
model_configs:
- configs/segmenter/segmenter_vit-s_fcn_8xb1-160k_ade20k-512x512.py
pipelines:
- *pipeline_ort_static_fp32_512x512
- *pipeline_trt_static_fp32_512x512
- *pipeline_openvino_static_fp32_512x512
- *pipeline_ncnn_static_fp32
- name: SegFormer
metafile: configs/segformer/metafile.yaml
model_configs:
- configs/segformer/segformer_mit-b0_8xb1-160k_cityscapes-1024x1024.py
pipelines:
- *pipeline_ts_fp32
- *pipeline_ort_static_fp32
- *pipeline_trt_static_fp32
- *pipeline_openvino_static_fp32
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from multiprocessing import Process
from mmengine import Config
from mmdeploy.apis import create_calib_input_data
calib_file = tempfile.NamedTemporaryFile(suffix='.h5').name
ann_file = 'tests/data/annotation.json'
def get_end2end_deploy_cfg():
deploy_cfg = Config(
dict(
onnx_config=dict(
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
},
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
save_file='end2end.onnx',
input_names=['input'],
output_names=['dets', 'labels'],
input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
)),
backend_config=dict(type='onnxruntime')))
return deploy_cfg
def get_partition_deploy_cfg():
deploy_cfg = get_end2end_deploy_cfg()
deploy_cfg._cfg_dict['partition_config'] = dict(
type='two_stage', apply_marks=True)
return deploy_cfg
def get_model_cfg():
dataset_type = 'CustomDataset'
data_root = 'tests/data/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1, 1),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
model_cfg = Config(
dict(
data=dict(
samples_per_gpu=1,
workers_per_gpu=1,
val=dict(
type=dataset_type,
ann_file=ann_file,
img_prefix=data_root,
pipeline=test_pipeline)),
# model settings
model=dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model testing settings
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))))
return model_cfg
def run_test_create_calib_end2end():
import h5py
model_cfg = get_model_cfg()
deploy_cfg = get_end2end_deploy_cfg()
create_calib_input_data(
calib_file,
deploy_cfg,
model_cfg,
None,
dataset_cfg=model_cfg,
dataset_type='val',
device='cpu')
assert osp.exists(calib_file)
with h5py.File(calib_file, mode='r') as calibrator:
assert calibrator['calib_data'] is not None
assert calibrator['calib_data']['end2end'] is not None
assert calibrator['calib_data']['end2end']['input'] is not None
assert calibrator['calib_data']['end2end']['input']['0'] is not None
# Because Faster-RCNN needs too much memory on GPU, we need to run tests in a
# new process.
def test_create_calib_end2end():
p = Process(target=run_test_create_calib_end2end)
try:
p.start()
finally:
p.join()
def run_test_create_calib_parittion():
import h5py
model_cfg = get_model_cfg()
deploy_cfg = get_partition_deploy_cfg()
create_calib_input_data(
calib_file,
deploy_cfg,
model_cfg,
None,
dataset_cfg=model_cfg,
dataset_type='val',
device='cpu')
assert osp.exists(calib_file)
input_names = ['input', 'bbox_feats']
with h5py.File(calib_file, mode='r') as calibrator:
assert calibrator['calib_data'] is not None
calib_data = calibrator['calib_data']
for i in range(2):
partition_name = f'partition{i}'
assert calib_data[partition_name] is not None
assert calib_data[partition_name][input_names[i]] is not None
assert calib_data[partition_name][input_names[i]]['0'] is not None
def test_create_calib_parittion():
p = Process(target=run_test_create_calib_parittion)
try:
p.start()
finally:
p.join()
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import onnx
import torch
from mmdeploy.apis.onnx import extract_partition
from mmdeploy.core import mark
output_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
def test_extract():
@mark('add', outputs='z')
def add(x, y):
return torch.add(x, y)
class TestModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return add(x, y)
model = TestModel().eval()
# dummy input
x = torch.rand(2, 3, 4)
y = torch.rand(2, 3, 4)
torch.onnx.export(model, (x, y), output_file)
onnx_model = onnx.load(output_file)
extracted = extract_partition(onnx_model, 'add:input', 'add:output')
assert extracted.graph.input[0].name == 'x'
assert extracted.graph.input[1].name == 'y'
assert extracted.graph.output[0].name == 'z'
assert extracted.graph.node[0].op_type == 'Add'
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmengine
import pytest
import torch
import torch.nn as nn
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
test_img = torch.rand([1, 3, 8, 8])
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * 0.5
test_model = TestModel().eval()
def generate_onnx_file(model):
with torch.no_grad():
dynamic_axes = {
'input': {
0: 'batch',
2: 'width',
3: 'height'
},
'output': {
0: 'batch'
}
}
torch.onnx.export(
model,
test_img,
onnx_file,
output_names=['output'],
input_names=['input'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
assert osp.exists(onnx_file)
@backend_checker(Backend.ASCEND)
def test_onnx2ascend():
from mmdeploy.apis.ascend import from_onnx
model = test_model
generate_onnx_file(model)
work_dir, _ = osp.split(onnx_file)
file_name = osp.splitext(onnx_file)[0]
om_path = osp.join(work_dir, file_name + '.om')
model_inputs = mmengine.Config(
dict(
dynamic_batch_size=[1, 2, 4],
input_shapes=dict(input=[-1, 3, 224, 224])))
from_onnx(onnx_file, work_dir, model_inputs)
assert osp.exists(work_dir)
assert osp.exists(om_path)
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import pytest
import torch
import torch.nn as nn
from mmdeploy.backend.ncnn.onnx2ncnn import get_output_model_file
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
test_img = torch.rand([1, 3, 8, 8])
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * 0.5
test_model = TestModel().eval()
def generate_onnx_file(model):
with torch.no_grad():
dynamic_axes = {
'input': {
0: 'batch',
2: 'width',
3: 'height'
},
'output': {
0: 'batch'
}
}
torch.onnx.export(
model,
test_img,
onnx_file,
output_names=['output'],
input_names=['input'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
assert osp.exists(onnx_file)
@backend_checker(Backend.NCNN)
def test_onnx2ncnn():
from mmdeploy.apis.ncnn import from_onnx
model = test_model
generate_onnx_file(model)
work_dir, _ = osp.split(onnx_file)
save_param, save_bin = get_output_model_file(onnx_file, work_dir=work_dir)
file_name = osp.splitext(onnx_file)[0]
from_onnx(onnx_file, osp.join(work_dir, file_name))
assert osp.exists(work_dir)
assert osp.exists(save_param)
assert osp.exists(save_bin)
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmengine import Config
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker, get_random_name
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * 0.5
def generate_onnx_file(model, export_img, onnx_file, input_name, output_name):
with torch.no_grad():
dynamic_axes = {
input_name: {
0: 'batch',
2: 'width',
3: 'height'
},
output_name: {
0: 'batch'
}
}
torch.onnx.export(
model,
export_img,
onnx_file,
output_names=[output_name],
input_names=[input_name],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
assert osp.exists(onnx_file)
def get_outputs(pytorch_model, openvino_model_path, input, input_name,
output_name):
output_pytorch = pytorch_model(input).numpy()
from mmdeploy.backend.openvino import OpenVINOWrapper
openvino_model = OpenVINOWrapper(openvino_model_path)
openvino_output = openvino_model({input_name: input})[output_name]
return output_pytorch, openvino_output
def get_base_deploy_cfg():
deploy_cfg = Config(dict(backend_config=dict(type='openvino')))
return deploy_cfg
def get_deploy_cfg_with_mo_args():
deploy_cfg = Config(dict(backend_config=dict(type='openvino')))
return deploy_cfg
@pytest.mark.parametrize('get_deploy_cfg',
[get_base_deploy_cfg, get_deploy_cfg_with_mo_args])
@backend_checker(Backend.OPENVINO)
def test_onnx2openvino(get_deploy_cfg):
from mmdeploy.apis.openvino import (from_onnx, get_mo_options_from_cfg,
get_output_model_file)
pytorch_model = TestModel().eval()
export_img = torch.rand([1, 3, 8, 8])
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
input_name = get_random_name()
output_name = get_random_name()
generate_onnx_file(pytorch_model, export_img, onnx_file, input_name,
output_name)
input_info = {input_name: export_img.shape}
output_names = [output_name]
openvino_dir = tempfile.TemporaryDirectory().name
deploy_cfg = get_deploy_cfg()
mo_options = get_mo_options_from_cfg(deploy_cfg)
from_onnx(onnx_file, openvino_dir, input_info, output_names, mo_options)
openvino_model_path = get_output_model_file(onnx_file, openvino_dir)
assert osp.exists(openvino_model_path), \
'The file (.xml) for OpenVINO IR has not been created.'
test_img = torch.rand([1, 3, 16, 16])
output_pytorch, openvino_output = get_outputs(pytorch_model,
openvino_model_path,
test_img, input_name,
output_name)
assert np.allclose(output_pytorch, openvino_output), \
'OpenVINO and PyTorch outputs are not the same.'
@backend_checker(Backend.OPENVINO)
def test_get_input_info_from_cfg():
from mmdeploy.apis.openvino import get_input_info_from_cfg
# Test 1
deploy_cfg = Config()
with pytest.raises(KeyError):
get_input_info_from_cfg(deploy_cfg)
# Test 2
input_name = 'input'
height, width = 600, 1000
shape = [1, 3, height, width]
expected_input_info = {input_name: shape}
deploy_cfg = Config({
'backend_config': {
'model_inputs': [{
'opt_shapes': expected_input_info
}]
}
})
input_info = get_input_info_from_cfg(deploy_cfg)
assert input_info == expected_input_info, 'Test 2: ' \
'The expected value of \'input_info\' does not match the received one.'
# Test 3
# The case where the input name in 'onnx_config'
# is different from 'backend_config'.
onnx_config_input_name = get_random_name(1234)
deploy_cfg.merge_from_dict(
{'onnx_config': {
'input_names': [onnx_config_input_name]
}})
expected_input_info = {onnx_config_input_name: shape}
input_info = get_input_info_from_cfg(deploy_cfg)
assert input_info == expected_input_info, 'Test 3: ' \
'The expected value of \'input_info\' does not match the received one.'
# Test 4
# The case where 'backend_config.model_inputs.opt_shapes'
# is given by a list, not a dictionary.
deploy_cfg.merge_from_dict(
{'backend_config': {
'model_inputs': [{
'opt_shapes': [shape]
}]
}})
input_info = get_input_info_from_cfg(deploy_cfg)
assert input_info == expected_input_info, 'Test 4: ' \
'The expected value of \'input_info\' does not match the received one.'
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmengine
import pytest
import torch
import torch.nn as nn
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
test_img = torch.rand([1, 3, 8, 8])
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * 0.5
test_model = TestModel().eval()
def generate_onnx_file(model):
with torch.no_grad():
torch.onnx.export(
model,
test_img,
onnx_file,
output_names=['output'],
input_names=['input'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
assert osp.exists(onnx_file)
def get_deploy_cfg():
deploy_cfg = mmengine.Config(
dict(
backend_config=dict(
type='rknn',
common_config=dict(),
quantization_config=dict(do_quantization=False, dataset=None),
input_size_list=[[3, 8, 8]])))
return deploy_cfg
@backend_checker(Backend.RKNN)
def test_onnx2rknn():
from mmdeploy.backend.rknn.onnx2rknn import onnx2rknn
model = test_model
generate_onnx_file(model)
work_dir, _ = osp.split(onnx_file)
rknn_file = onnx_file.replace('.onnx', '.rknn')
deploy_cfg = get_deploy_cfg()
onnx2rknn(onnx_file, rknn_file, deploy_cfg)
assert osp.exists(work_dir)
assert osp.exists(rknn_file)
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import pytest
import torch
import torch.nn as nn
from mmengine import Config
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
engine_file = tempfile.NamedTemporaryFile(suffix='.engine').name
test_img = torch.rand([1, 3, 8, 8])
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * 0.5
test_model = TestModel().eval().cuda()
def get_deploy_cfg():
deploy_cfg = Config(
dict(
backend_config=dict(
type='tensorrt',
common_config=dict(
fp16_mode=False, max_workspace_size=1 << 20),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 8, 8],
opt_shape=[1, 3, 8, 8],
max_shape=[1, 3, 8, 8])))
])))
return deploy_cfg
def generate_onnx_file(model):
with torch.no_grad():
dynamic_axes = {
'input': {
0: 'batch',
2: 'width',
3: 'height'
},
'output': {
0: 'batch'
}
}
torch.onnx.export(
model,
test_img,
onnx_file,
output_names=['output'],
input_names=['input'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
assert osp.exists(onnx_file)
@backend_checker(Backend.TENSORRT)
def test_onnx2tensorrt():
from mmdeploy.apis.tensorrt import onnx2tensorrt
from mmdeploy.backend.tensorrt import load
model = test_model
generate_onnx_file(model)
deploy_cfg = get_deploy_cfg()
work_dir, save_file = osp.split(engine_file)
onnx2tensorrt(work_dir, save_file, 0, deploy_cfg, onnx_file)
assert osp.exists(work_dir)
assert osp.exists(engine_file)
engine = load(engine_file)
assert engine is not None
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import pytest
import torch
import torch.nn as nn
from mmdeploy.utils import Backend
from mmdeploy.utils.test import backend_checker
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
test_img = torch.rand([1, 3, 8, 8])
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 8, 3, 1, 1)
def forward(self, x):
return self.conv(x)
test_model = TestModel().eval()
def generate_onnx_file(model):
with torch.no_grad():
torch.onnx.export(
model,
test_img,
onnx_file,
output_names=['output'],
input_names=['input'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
assert osp.exists(onnx_file)
@backend_checker(Backend.TVM)
def test_onnx2tvm():
from mmdeploy.apis.tvm import from_onnx, get_library_ext
model = test_model
generate_onnx_file(model)
work_dir, _ = osp.split(onnx_file)
file_name = osp.splitext(onnx_file)[0]
ext = get_library_ext()
lib_path = osp.join(work_dir, file_name + ext)
bytecode_path = osp.join(work_dir, file_name + '.code')
log_file = osp.join(work_dir, file_name + '.log')
shape = {'input': test_img.shape}
dtype = {'input': 'float32'}
target = 'llvm'
# test default tuner
tuner_dict = dict(type='DefaultTuner', target=target)
from_onnx(onnx_file, lib_path, shape=shape, dtype=dtype, tuner=tuner_dict)
assert osp.exists(lib_path)
# test autotvm
lib_path = osp.join(work_dir, file_name + '_autotvm' + ext)
bytecode_path = osp.join(work_dir, file_name + '_autotvm.code')
log_file = osp.join(work_dir, file_name + '_autotvm.log')
tuner_dict = dict(
type='AutoTVMTuner',
target=target,
log_file=log_file,
n_trial=1,
tuner=dict(type='XGBTuner'))
from_onnx(
onnx_file,
lib_path,
use_vm=True,
bytecode_file=bytecode_path,
shape=shape,
dtype=dtype,
tuner=tuner_dict)
assert osp.exists(lib_path)
assert osp.exists(bytecode_path)
# test ansor
lib_path = osp.join(work_dir, file_name + '_ansor' + ext)
bytecode_path = osp.join(work_dir, file_name + '_ansor.code')
log_file = osp.join(work_dir, file_name + '_ansor.log')
tuner_dict = dict(
type='AutoScheduleTuner',
target=target,
log_file=log_file,
num_measure_trials=2)
from_onnx(
onnx_file,
lib_path,
use_vm=True,
bytecode_file=bytecode_path,
shape=shape,
dtype=dtype,
tuner=tuner_dict)
assert osp.exists(lib_path)
assert osp.exists(bytecode_path)
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from typing import Any, List, Tuple
import onnx
import pytest
import torch
import torch.nn as nn
from packaging import version
from mmdeploy.apis.onnx.optimizer import \
model_to_graph__custom_optimizer # noqa
from mmdeploy.core import RewriterContext
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
ort_cfg = dict(
backend_config=dict(type='onnxruntime'), onnx_config=dict(type='onnx'))
def _find_next_node(start: int, nodes: List, op_type: str) -> Tuple[Any, int]:
for idx, n in enumerate(nodes[start:]):
if n.op_type == op_type:
return n, idx
return None, -1
def test_merge_shape_concate():
pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx')
try:
from mmdeploy.backend.torchscript import ts_optimizer
opt_pass = ts_optimizer.onnx._jit_pass_merge_shape_concate
except ImportError:
pytest.skip('pass not found.')
def _optimize_onnx(ctx, graph, params_dict, torch_out):
opt_pass(graph)
return graph, params_dict, torch_out
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.new_zeros(x.shape[-2:])
model = TestModel()
x = torch.rand(1, 3, 4, 8)
with RewriterContext({}, onnx_custom_passes=_optimize_onnx):
torch.onnx.export(
model,
x,
onnx_file,
input_names=['input'],
output_names=['output'],
dynamic_axes=dict(input={
2: 'h',
3: 'w'
}),
opset_version=11)
onnx_model = onnx.load(onnx_file)
graph = onnx_model.graph
nodes = graph.node
shape_idx = 0
for n in nodes:
if n.op_type != 'Shape':
shape_idx += 1
else:
break
assert shape_idx < len(nodes)
assert nodes[shape_idx + 1].op_type == 'Gather'
assert nodes[shape_idx + 2].op_type == 'ConstantOfShape'
def test_peephole():
pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx')
try:
from mmdeploy.backend.torchscript import ts_optimizer
opt_pass = ts_optimizer.onnx._jit_pass_onnx_peephole
except ImportError:
pytest.skip('pass not found.')
def _optimize_onnx(ctx, graph, params_dict, torch_out):
opt_pass(graph)
return graph, params_dict, torch_out
class TestModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x.int()
x = x.int()
x = x.float()
x = x.view(10, -1)
y = x.view(2, -1)
z = x.view(3, -1)
return y, z
model = TestModel()
x = torch.rand(2, 3, 5)
with RewriterContext({}, onnx_custom_passes=_optimize_onnx):
torch.onnx.export(
model,
x,
onnx_file,
input_names=['input'],
output_names=['output1', 'output2'],
dynamic_axes=dict(input={
0: 'b',
1: 'c',
2: 'w'
}),
opset_version=11)
onnx_model = onnx.load(onnx_file)
graph = onnx_model.graph
nodes = graph.node
node, idx = _find_next_node(0, nodes, 'Cast')
assert node is not None
assert node.attribute[0].i == 6
node, idx = _find_next_node(idx + 1, nodes, 'Cast')
assert node is not None
assert node.attribute[0].i == 1
node, idx = _find_next_node(idx + 1, nodes, 'Reshape')
assert node is not None
node, idx = _find_next_node(idx + 1, nodes, 'Reshape')
assert node is not None
def test_flatten_cls_head():
pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx')
try:
from mmdeploy.backend.torchscript import ts_optimizer
opt_pass = ts_optimizer.onnx._jit_pass_flatten_cls_head
except ImportError:
pytest.skip('pass not found.')
def _optimize_onnx(ctx, graph, params_dict, torch_out):
opt_pass(graph)
return graph, params_dict, torch_out
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
batch = x.size(0)
gap = nn.functional.adaptive_avg_pool2d(x, (1, 1))
gap = gap.reshape(batch, -1)
return gap + 1 # gap should not be the output
model = TestModel()
x = torch.rand(1, 4, 8, 8)
with RewriterContext(ort_cfg, onnx_custom_passes=_optimize_onnx):
torch.onnx.export(
model,
x,
onnx_file,
input_names=['input'],
output_names=['output'],
dynamic_axes=dict(input={
2: 'h',
3: 'w'
}),
opset_version=11)
onnx_model = onnx.load(onnx_file)
graph = onnx_model.graph
nodes = graph.node
node, idx = _find_next_node(0, nodes, 'GlobalAveragePool')
assert node is not None
node, idx = _find_next_node(idx + 1, nodes, 'Flatten')
assert node is not None
def test_fuse_select_assign():
pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx')
# TODO fix later
if version.parse(torch.__version__) >= version.parse('2.0.0'):
pytest.skip('ignore torch>=2.0.0')
try:
from mmdeploy.backend.torchscript import ts_optimizer
opt_pass = ts_optimizer.onnx._jit_pass_fuse_select_assign
except ImportError:
pytest.skip('pass not found.')
def _optimize_onnx(ctx, graph, params_dict, torch_out):
opt_pass(graph, params_dict)
return graph, params_dict, torch_out
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
z = x / 2
y = torch.zeros_like(x)
y[x < 0.5] = z[x < 0.5]
return y
model = TestModel()
x = torch.rand(1, 4, 8, 8)
with RewriterContext({}, onnx_custom_passes=_optimize_onnx):
torch.onnx.export(
model,
x,
onnx_file,
input_names=['input'],
output_names=['output'],
dynamic_axes=dict(input={
2: 'h',
3: 'w'
}),
opset_version=11)
onnx_model = onnx.load(onnx_file)
graph = onnx_model.graph
nodes = graph.node
node, _ = _find_next_node(0, nodes, 'Where')
assert node is not None
def test_common_subgraph_elimination():
pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx')
try:
from mmdeploy.backend.torchscript import ts_optimizer
opt_pass = ts_optimizer.onnx._jit_pass_common_subgraph_elimination
except ImportError:
pytest.skip('pass not found.')
def _optimize_onnx(ctx, graph, params_dict, torch_out):
opt_pass(graph, params_dict)
return graph, params_dict, torch_out
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
y = x.unsqueeze(1)
z = x.unsqueeze(1)
return y + z
model = TestModel()
x = torch.rand(1, 2, 3)
with RewriterContext({}, onnx_custom_passes=_optimize_onnx):
torch.onnx.export(
model,
x,
onnx_file,
input_names=['input'],
output_names=['output'],
dynamic_axes=dict(input={
1: 'h',
2: 'w'
}),
opset_version=11)
onnx_model = onnx.load(onnx_file)
graph = onnx_model.graph
nodes = graph.node
unsqueeze_count = 0
for n in nodes:
if n.op_type == 'Unsqueeze':
unsqueeze_count += 1
assert unsqueeze_count == 1
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import onnx
import pytest
import torch
import torch.nn as nn
from mmengine import Config
from mmdeploy.apis.onnx import export
from mmdeploy.utils.config_utils import (get_backend, get_dynamic_axes,
get_onnx_config)
from mmdeploy.utils.test import get_random_name
onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
@pytest.mark.skip(reason='This a not test class but a utility class.')
class TestModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * 0.5
test_model = TestModel().eval().cuda()
test_img = torch.rand([1, 3, 8, 8])
input_name = get_random_name()
output_name = get_random_name()
dynamic_axes_dict = {
input_name: {
0: 'batch',
2: 'height',
3: 'width'
},
output_name: {
0: 'batch'
}
}
dynamic_axes_list = [[0, 2, 3], [0]]
def get_deploy_cfg(input_name, output_name, dynamic_axes):
return Config(
dict(
onnx_config=dict(
dynamic_axes=dynamic_axes,
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_names=[input_name],
output_names=[output_name],
input_shape=None),
codebase_config=dict(type='mmagic', task=''),
backend_config=dict(type='onnxruntime')))
@pytest.mark.parametrize('input_name', [input_name])
@pytest.mark.parametrize('output_name', [output_name])
@pytest.mark.parametrize('dynamic_axes',
[dynamic_axes_dict, dynamic_axes_list])
def test_torch2onnx(input_name, output_name, dynamic_axes):
deploy_cfg = get_deploy_cfg(input_name, output_name, dynamic_axes)
output_prefix = osp.splitext(onnx_file)[0]
context_info = dict(cfg=deploy_cfg)
backend = get_backend(deploy_cfg).value
onnx_cfg = get_onnx_config(deploy_cfg)
opset_version = onnx_cfg.get('opset_version', 11)
input_names = onnx_cfg['input_names']
output_names = onnx_cfg['output_names']
axis_names = input_names + output_names
dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)
verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(
'verbose', False)
keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',
True)
export(
test_model,
test_img,
context_info=context_info,
output_path_prefix=output_prefix,
backend=backend,
input_names=input_names,
output_names=output_names,
opset_version=opset_version,
dynamic_axes=dynamic_axes,
verbose=verbose,
keep_initializers_as_inputs=keep_initializers_as_inputs)
assert osp.exists(onnx_file)
model = onnx.load(onnx_file)
assert model is not None
try:
onnx.checker.check_model(model)
except onnx.checker.ValidationError:
assert False
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import tempfile
import pytest
from mmengine import Config
from mmdeploy.apis import torch2torchscript
from mmdeploy.utils import IR, Backend
from mmdeploy.utils.test import get_random_name
ts_file = tempfile.NamedTemporaryFile(suffix='.pt').name
input_name = get_random_name()
output_name = get_random_name()
def get_deploy_cfg(input_name, output_name):
return Config(
dict(
ir_config=dict(
type=IR.TORCHSCRIPT.value,
input_names=[input_name],
output_names=[output_name],
input_shape=None),
codebase_config=dict(type='mmagic', task='SuperResolution'),
backend_config=dict(type=Backend.TORCHSCRIPT.value)))
def get_model_cfg():
import mmengine
file = 'tests/test_codebase/test_mmagic/data/model.py'
model_cfg = mmengine.Config.fromfile(file)
return model_cfg
@pytest.mark.parametrize('input_name', [input_name])
@pytest.mark.parametrize('output_name', [output_name])
@pytest.mark.skipif(
not importlib.util.find_spec('mmagic'), reason='requires mmagic')
def test_torch2torchscript(input_name, output_name):
import numpy as np
deploy_cfg = get_deploy_cfg(input_name, output_name)
torch2torchscript(
np.random.randint(0, 255, (8, 8, 3)),
'',
ts_file,
deploy_cfg,
model_cfg=get_model_cfg(),
device='cpu')
assert osp.exists(ts_file)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment