You need to sign in or sign up before continuing.
Commit a17c53b8 authored by limm's avatar limm
Browse files

add test_mmdet test_mmocr and test_mmpose

parent a72d0dfa
Pipeline #2819 canceled with stages
This diff is collapsed.
# Copyright (c) OpenMMLab. All rights reserved.
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import mmengine
import numpy as np
import pytest
import torch
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = mmengine.Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmocr', task='TextDetection'),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['output'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
img_shape = (32, 32)
img = np.random.rand(*img_shape, 3).astype(np.uint8)
@pytest.fixture(autouse=True)
def init_task_processor():
try:
import_codebase(Codebase.MMOCR)
except ImportError:
pytest.skip(
f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
def test_build_pytorch_model():
from mmocr.utils.setup_env import register_all_modules
register_all_modules()
from mmocr.models.textdet.detectors.single_stage_text_detector import \
SingleStageTextDetector
model = task_processor.build_pytorch_model(None)
assert isinstance(model, SingleStageTextDetector)
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={
'output': torch.rand(1, *img_shape),
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_build_backend_model(backend_model):
assert isinstance(backend_model, torch.nn.Module)
def test_create_input():
inputs = task_processor.create_input(img, input_shape=img_shape)
assert isinstance(inputs, tuple) and len(inputs) == 2
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(img, results, filename, 'tmp')
assert os.path.exists(filename)
def test_get_tensor_from_input():
input_data = {'inputs': torch.ones(3, 4, 5)}
inputs = task_processor.get_tensor_from_input(input_data)
assert torch.equal(inputs, torch.ones(3, 4, 5))
def test_get_partition_cfg():
with pytest.raises(NotImplementedError):
_ = task_processor.get_partition_cfg(partition_type='')
# Copyright (c) OpenMMLab. All rights reserved.
import mmengine
import pytest
import torch
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
try:
import_codebase(Codebase.MMOCR)
except ImportError:
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
IMAGE_SIZE = 32
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'outputs': torch.rand(1, IMAGE_SIZE, IMAGE_SIZE),
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = mmengine.Config(
{'onnx_config': {
'output_names': ['outputs']
}})
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg = load_config(model_cfg_path)[0]
from mmdeploy.codebase.mmocr.deploy.text_detection_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''],
device='cpu',
deploy_cfg=deploy_cfg,
model_cfg=model_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
@pytest.mark.parametrize(
'ori_shape',
[[IMAGE_SIZE, IMAGE_SIZE], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE]])
def test_forward(self, ori_shape):
imgs = torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)
img_meta = {
'ori_shape': ori_shape,
'img_shape': [IMAGE_SIZE, IMAGE_SIZE],
'scale_factor': [1., 1.],
'img_path': ''
}
from mmengine.structures import InstanceData
from mmocr.structures import TextDetDataSample
pred_instances = InstanceData(metainfo=img_meta)
data_sample = TextDetDataSample(pred_instances=pred_instances)
data_sample.set_metainfo(img_meta)
results = self.end2end_model.forward(imgs, [data_sample])
assert results is not None, 'failed to get output using '\
'End2EndModel'
@backend_checker(Backend.ONNXRUNTIME)
def test_build_text_detection_model():
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = mmengine.Config(
dict(
backend_config=dict(type='onnxruntime'),
onnx_config=dict(output_names=['outputs']),
codebase_config=dict(type='mmocr')))
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmocr.deploy.text_detection_model import (
End2EndModel, build_text_detection_model)
segmentor = build_text_detection_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(segmentor, End2EndModel)
# Copyright (c) OpenMMLab. All rights reserved.
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import mmengine
import numpy as np
import pytest
import torch
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = mmengine.Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmocr', task='TextRecognition'),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['output'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
img_shape = (32, 32)
img = np.random.rand(*img_shape, 3).astype(np.uint8)
@pytest.fixture(autouse=True)
def init_task_processor():
try:
import_codebase(Codebase.MMOCR)
except ImportError:
pytest.skip(
f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
def test_build_pytorch_model():
from mmocr.utils.setup_env import register_all_modules
register_all_modules()
from mmocr.models.textrecog.recognizers import BaseRecognizer
model = task_processor.build_pytorch_model(None)
assert isinstance(model, BaseRecognizer)
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={
'output': torch.rand(1, 9, 37),
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_build_backend_model(backend_model):
assert isinstance(backend_model, torch.nn.Module)
def test_create_input():
inputs = task_processor.create_input(img, input_shape=img_shape)
assert isinstance(inputs, tuple) and len(inputs) == 2
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(img, results, filename, 'tmp')
assert os.path.exists(filename)
def test_get_tensor_from_input():
input_data = {'inputs': torch.ones(3, 4, 5)}
inputs = task_processor.get_tensor_from_input(input_data)
assert torch.equal(inputs, torch.ones(3, 4, 5))
def test_get_partition_cfg():
try:
_ = task_processor.get_partition_cfg(partition_type='')
except NotImplementedError:
pass
# Copyright (c) OpenMMLab. All rights reserved.
import mmengine
import pytest
import torch
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
try:
import_codebase(Codebase.MMOCR)
except ImportError:
pytest.skip(f'{Codebase.MMOCR} is not installed.', allow_module_level=True)
IMAGE_SIZE = 32
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'output': torch.rand(1, 9, 37),
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = mmengine.Config(
{'onnx_config': {
'output_names': ['output']
}})
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg = load_config(model_cfg_path)[0]
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''],
device='cpu',
deploy_cfg=deploy_cfg,
model_cfg=model_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
@pytest.mark.parametrize(
'ori_shape',
[[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]])
def test_forward(self, ori_shape):
imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)]
img_meta = {
'ori_shape': ori_shape,
'img_shape': [IMAGE_SIZE, IMAGE_SIZE, 3],
'scale_factor': [1., 1.]
}
from mmengine.structures import InstanceData
from mmocr.structures import TextRecogDataSample
pred_instances = InstanceData(metainfo=img_meta)
data_sample = TextRecogDataSample(pred_instances=pred_instances)
data_sample.set_metainfo(img_meta)
results = self.end2end_model.forward(imgs, [data_sample])
assert results is not None, 'failed to get output using '\
'End2EndModel'
@backend_checker(Backend.ONNXRUNTIME)
def test_build_text_recognition_model():
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = mmengine.Config(
dict(
backend_config=dict(type='onnxruntime'),
onnx_config=dict(output_names=['outputs']),
codebase_config=dict(type='mmocr')))
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import (
End2EndModel, build_text_recognition_model)
segmentor = build_text_recognition_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(segmentor, End2EndModel)
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (generate_datasample, generate_mmpose_deploy_config,
generate_mmpose_task_processor)
__all__ = [
'generate_datasample', 'generate_mmpose_deploy_config',
'generate_mmpose_task_processor'
]
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmpose.utils import register_all_modules
register_all_modules(True)
{"info": {"description": "COCO 2017 Dataset","url": "http://cocodataset.org","version": "1.0","year": 2017,"contributor": "COCO Consortium","date_created": "2017/09/01"},"licenses": [{"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/","id": 1,"name": "Attribution-NonCommercial-ShareAlike License"},{"url": "http://creativecommons.org/licenses/by-nc/2.0/","id": 2,"name": "Attribution-NonCommercial License"},{"url": "http://creativecommons.org/licenses/by-nc-nd/2.0/","id": 3,"name": "Attribution-NonCommercial-NoDerivs License"},{"url": "http://creativecommons.org/licenses/by/2.0/","id": 4,"name": "Attribution License"},{"url": "http://creativecommons.org/licenses/by-sa/2.0/","id": 5,"name": "Attribution-ShareAlike License"},{"url": "http://creativecommons.org/licenses/by-nd/2.0/","id": 6,"name": "Attribution-NoDerivs License"},{"url": "http://flickr.com/commons/usage/","id": 7,"name": "No known copyright restrictions"},{"url": "http://www.usa.gov/copyright.shtml","id": 8,"name": "United States Government Work"}],"images": [{"license": 4,"file_name": "000000397133.jpg","coco_url": "http://images.cocodataset.org/val2017/000000397133.jpg","height": 427,"width": 640,"date_captured": "2013-11-14 17:02:52","flickr_url": "http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg","id": 397133}], "annotations": [{"segmentation": [[125.12,539.69,140.94,522.43,100.67,496.54,84.85,469.21,73.35,450.52,104.99,342.65,168.27,290.88,179.78,288,189.84,286.56,191.28,260.67,202.79,240.54,221.48,237.66,248.81,243.42,257.44,256.36,253.12,262.11,253.12,275.06,299.15,233.35,329.35,207.46,355.24,206.02,363.87,206.02,365.3,210.34,373.93,221.84,363.87,226.16,363.87,237.66,350.92,237.66,332.22,234.79,314.97,249.17,271.82,313.89,253.12,326.83,227.24,352.72,214.29,357.03,212.85,372.85,208.54,395.87,228.67,414.56,245.93,421.75,266.07,424.63,276.13,437.57,266.07,450.52,284.76,464.9,286.2,479.28,291.96,489.35,310.65,512.36,284.76,549.75,244.49,522.43,215.73,546.88,199.91,558.38,204.22,565.57,189.84,568.45,184.09,575.64,172.58,578.52,145.26,567.01,117.93,551.19,133.75,532.49]],"num_keypoints": 10,"area": 47803.27955,"iscrowd": 0,"keypoints": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,142,309,1,177,320,2,191,398,2,237,317,2,233,426,2,306,233,2,92,452,2,123,468,2,0,0,0,251,469,2,0,0,0,162,551,2],"image_id": 425226,"bbox": [73.35,206.02,300.58,372.5],"category_id": 1,"id": 183126}], "categories": [{"supercategory": "person","id": 1,"name": "person","keypoints": ["nose","left_eye","right_eye","left_ear","right_ear","left_shoulder","right_shoulder","left_elbow","right_elbow","left_wrist","right_wrist","left_hip","right_hip","left_knee","right_knee","left_ankle","right_ankle"],"skeleton": [[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13],[6,7],[6,8],[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]}]}
# Copyright (c) OpenMMLab. All rights reserved.
# model settings
codec = dict(
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
test_cfg = dict(
flip_test=False,
flip_mode='heatmap',
shift_heatmap=True,
)
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(type='ResNet', depth=18),
head=dict(
type='HeatmapHead',
in_channels=512,
out_channels=17,
deconv_out_channels=None,
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
test_cfg=test_cfg)
# dataset settings
dataset_type = 'CocoDataset'
data_mode = 'topdown'
data_root = 'tests/test_codebase/test_mmpose/data/'
file_client_args = dict(backend='disk')
test_pipeline = [
dict(type='LoadImage', file_client_args=file_client_args),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/person_keypoints_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
lazy_init=True,
serialize_data=False,
pipeline=test_pipeline,
))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/person_keypoints_val2017.json')
test_evaluator = val_evaluator
# default_runtime
default_scope = 'mmpose'
default_hooks = dict()
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer')
This diff is collapsed.
This diff is collapsed.
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
IMAGE_H = 192
IMAGE_W = 256
try:
import_codebase(Codebase.MMPOSE)
except ImportError:
pytest.skip(
f'{Codebase.MMPOSE} is not installed.', allow_module_level=True)
from .utils import generate_datasample # noqa: E402
from .utils import generate_mmpose_deploy_config # noqa: E402
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'output': torch.rand(1, 1, IMAGE_H, IMAGE_W),
}
cls.wrapper.set(outputs=cls.outputs)
from mmdeploy.codebase.mmpose.deploy.pose_detection_model import \
End2EndModel
model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = generate_mmpose_deploy_config()
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''],
device='cpu',
deploy_cfg=deploy_cfg,
model_cfg=model_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward(self):
img = torch.rand(1, 3, IMAGE_H, IMAGE_W)
data_samples = [generate_datasample((IMAGE_H, IMAGE_W))]
results = self.end2end_model.forward(img, data_samples)
assert results is not None, 'failed to get output using '\
'End2EndModel'
@backend_checker(Backend.ONNXRUNTIME)
def test_build_pose_detection_model():
model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = generate_mmpose_deploy_config()
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmpose.deploy.pose_detection_model import (
End2EndModel, build_pose_detection_model)
posedetector = build_pose_detection_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(posedetector, End2EndModel)
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment