Commit a72d0dfa authored by limm's avatar limm
Browse files

add test_mmagic and test_mmaction

parent a3d381d2
Pipeline #2818 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmaction.utils import register_all_modules
register_all_modules(True)
# Copyright (c) OpenMMLab. All rights reserved.
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth',
depth=50,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01,
average_clips=None),
data_preprocessor=dict(
type='ActionDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
format_shape='NCHW'),
train_cfg=None,
test_cfg=None)
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=100,
by_epoch=True,
milestones=[40, 80],
gamma=0.1)
]
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=40, norm_type=2))
default_scope = 'mmaction'
default_hooks = dict(
runtime_info=dict(type='RuntimeInfoHook'),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=20, ignore_last=False),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(
type='CheckpointHook', interval=3, save_best='auto', max_keep_ckpts=3),
sampler_seed=dict(type='DistSamplerSeedHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'))
log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='ActionVisualizer', vis_backends=[dict(type='LocalVisBackend')])
log_level = 'INFO'
load_from = None
resume = False
dataset_type = 'VideoDataset'
data_root = 'data/kinetics400/videos_train'
data_root_val = 'data/video'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
ann_file_val = 'data/ann.txt'
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='FormatShape', input_format='NCHW'),
dict(type='PackActionInputs')
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=3,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='FormatShape', input_format='NCHW'),
dict(type='PackActionInputs')
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=25,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='TenCrop', crop_size=224),
dict(type='FormatShape', input_format='NCHW'),
dict(type='PackActionInputs')
]
train_dataloader = dict(
batch_size=32,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='VideoDataset',
ann_file='data/kinetics400/kinetics400_train_list_videos.txt',
data_prefix=dict(video='data/kinetics400/videos_train'),
pipeline=[
dict(type='DecordInit'),
dict(
type='SampleFrames', clip_len=1, frame_interval=1,
num_clips=3),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='FormatShape', input_format='NCHW'),
dict(type='PackActionInputs')
]))
val_dataloader = dict(
batch_size=32,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='VideoDataset',
ann_file='tests/test_codebase/test_mmaction/data/ann.txt',
data_prefix=dict(video='tests/test_codebase/test_mmaction/data/video'),
pipeline=[
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=3,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='FormatShape', input_format='NCHW'),
dict(type='PackActionInputs')
],
test_mode=True))
test_dataloader = dict(
batch_size=1,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='VideoDataset',
ann_file='tests/test_codebase/test_mmaction/data/ann.txt',
data_prefix=dict(video='tests/test_codebase/test_mmaction/data/video'),
pipeline=[
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=25,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='TenCrop', crop_size=224),
dict(type='FormatShape', input_format='NCHW'),
dict(type='PackActionInputs')
],
test_mode=True))
val_evaluator = dict(type='AccMetric')
test_evaluator = dict(type='AccMetric')
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine import Config
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs
try:
import_codebase(Codebase.MMACTION)
except ImportError:
pytest.skip(
f'{Codebase.MMACTION} is not installed.', allow_module_level=True)
@pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME])
@pytest.mark.parametrize('model_cfg_path',
['tests/test_codebase/test_mmaction/data/model.py'])
def test_forward_of_base_recognizer(model_cfg_path, backend):
check_backend(backend)
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmaction', task='VideoRecognition'),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['inputs'],
output_names=['output'])))
model_cfg = load_config(model_cfg_path)[0]
from mmaction.apis import init_recognizer
model = init_recognizer(model_cfg, None, device='cpu')
img = torch.randn(1, 3, 3, 224, 224)
from mmaction.structures import ActionDataSample
data_sample = ActionDataSample()
img_meta = dict(img_shape=(224, 224))
data_sample.set_metainfo(img_meta)
rewrite_inputs = {'inputs': img}
wrapped_model = WrapModel(
model, 'forward', data_samples=[data_sample], mode='predict')
rewrite_outputs, _ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
assert rewrite_outputs is not None
# Copyright (c) OpenMMLab. All rights reserved.
from tempfile import NamedTemporaryFile, TemporaryDirectory
import pytest
import torch
from mmengine import Config
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper
try:
import_codebase(Codebase.MMACTION)
except ImportError:
pytest.skip(
f'{Codebase.MMACTION} is not installed.', allow_module_level=True)
model_cfg_path = 'tests/test_codebase/test_mmaction/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmaction', task='VideoRecognition'),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['output'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
img_shape = (224, 224)
num_classes = 400
video = 'tests/test_codebase/test_mmaction/data/video/demo.mp4'
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={
'output': torch.rand(1, num_classes),
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_build_backend_model(backend_model):
assert isinstance(backend_model, torch.nn.Module)
def test_create_input():
inputs = task_processor.create_input(video, input_shape=img_shape)
assert isinstance(inputs, tuple) and len(inputs) == 2
def test_build_pytorch_model():
from mmaction.models.recognizers.base import BaseRecognizer
model = task_processor.build_pytorch_model(None)
assert isinstance(model, BaseRecognizer)
def test_get_tensor_from_input():
input_data = {'inputs': torch.ones(3, 4, 5)}
inputs = task_processor.get_tensor_from_input(input_data)
assert torch.equal(inputs, torch.ones(3, 4, 5))
def test_get_model_name():
model_name = task_processor.get_model_name()
assert isinstance(model_name, str) and model_name is not None
def test_build_dataset_and_dataloader():
from torch.utils.data import DataLoader, Dataset
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg.test_dataloader.dataset)
assert isinstance(dataset, Dataset), 'Failed to build dataset'
dataloader_cfg = task_processor.model_cfg.test_dataloader
dataloader = task_processor.build_dataloader(dataloader_cfg)
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
def test_build_test_runner(backend_model):
from mmdeploy.codebase.base.runner import DeployTestRunner
temp_dir = TemporaryDirectory().name
runner = task_processor.build_test_runner(backend_model, temp_dir)
assert isinstance(runner, DeployTestRunner)
def test_get_preprocess():
process = task_processor.get_preprocess()
assert process is not None
def test_get_postprocess():
process = task_processor.get_postprocess()
assert isinstance(process, dict)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine import Config
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
IMAGE_SIZE = 224
try:
import_codebase(Codebase.MMACTION)
except ImportError:
pytest.skip(
f'{Codebase.MMACTION} is not installed.', allow_module_level=True)
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'outputs': torch.rand(1, 400),
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = Config({'onnx_config': {'output_names': ['outputs']}})
model_cfg_path = 'tests/test_codebase/test_mmaction/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
from mmdeploy.codebase.mmaction.deploy.video_recognition_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''],
device='cpu',
deploy_cfg=deploy_cfg,
model_cfg=model_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward(self):
inputs = torch.rand(1, 3, 3, IMAGE_SIZE, IMAGE_SIZE)
from mmaction.structures import ActionDataSample
data_sample = ActionDataSample(
metainfo=dict(img_shape=(IMAGE_SIZE, IMAGE_SIZE)))
results = self.end2end_model.forward(
inputs, [data_sample], mode='predict')
assert results is not None, 'failed to get output using '\
'End2EndModel'
@backend_checker(Backend.ONNXRUNTIME)
def test_build_video_recognition_model():
model_cfg_path = 'tests/test_codebase/test_mmaction/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
onnx_config=dict(output_names=['outputs']),
codebase_config=dict(type='mmaction')))
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmaction.deploy.video_recognition_model import (
End2EndModel, build_video_recognition_model)
classifier = build_video_recognition_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(classifier, End2EndModel)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmagic.utils import register_all_modules
register_all_modules(True)
# Copyright (c) OpenMMLab. All rights reserved.
default_scope = 'mmagic'
save_dir = './work_dirs'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=100),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(
type='CheckpointHook',
interval=5000,
out_dir=save_dir,
by_epoch=False,
max_keep_ckpts=10,
save_best='PSNR',
rule='greater',
),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=4),
dist_cfg=dict(backend='nccl'),
)
log_level = 'INFO'
log_processor = dict(type='LogProcessor', window_size=100, by_epoch=False)
load_from = None
resume = False
experiment_name = 'srcnn_x4k915_1xb16-1000k_div2k'
work_dir = f'./work_dirs/{experiment_name}'
save_dir = './work_dirs/'
scale = 4
# model settings
model = dict(
type='BaseEditModel',
generator=dict(
type='SRCNNNet',
channels=(3, 64, 32, 3),
kernel_sizes=(9, 1, 5),
upscale_factor=scale),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
train_cfg=dict(),
test_cfg=dict(metrics=['PSNR'], crop_border=scale),
data_preprocessor=dict(
type='DataPreprocessor',
mean=[0., 0., 0.],
std=[255., 255., 255.],
))
train_pipeline = [
dict(
type='LoadImageFromFile',
key='img',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
dict(
type='LoadImageFromFile',
key='gt',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
dict(type='SetValues', dictionary=dict(scale=scale)),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(
type='Flip',
keys=['img', 'gt'],
flip_ratio=0.5,
direction='horizontal'),
dict(
type='Flip', keys=['img', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['img', 'gt'], transpose_ratio=0.5),
dict(type='PackInputs')
]
val_pipeline = [
dict(
type='LoadImageFromFile',
key='img',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
dict(
type='LoadImageFromFile',
key='gt',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
dict(type='PackInputs')
]
# dataset settings
dataset_type = 'BasicImageDataset'
data_root = 'data'
train_dataloader = dict(
num_workers=4,
batch_size=16,
persistent_workers=False,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
ann_file='meta_info_DIV2K800sub_GT.txt',
metainfo=dict(dataset_type='div2k', task_name='sisr'),
data_root=data_root + '/DIV2K',
data_prefix=dict(
img='DIV2K_train_LR_bicubic/X4_sub', gt='DIV2K_train_HR_sub'),
filename_tmpl=dict(img='{}', gt='{}'),
pipeline=train_pipeline))
val_dataloader = dict(
num_workers=4,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
metainfo=dict(dataset_type='set5', task_name='sisr'),
data_root=data_root + '/Set5',
data_prefix=dict(img='LRbicx4', gt='GTmod12'),
pipeline=val_pipeline))
val_evaluator = [
dict(type='MAE'),
dict(type='PSNR', crop_border=scale),
dict(type='SSIM', crop_border=scale),
]
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=1000000, val_interval=5000)
val_cfg = dict(type='ValLoop')
# optimizer
optim_wrapper = dict(
constructor='DefaultOptimWrapperConstructor',
type='OptimWrapper',
optimizer=dict(type='Adam', lr=2e-4, betas=(0.9, 0.99)))
# learning policy
param_scheduler = dict(
type='CosineRestartLR',
by_epoch=False,
periods=[250000, 250000, 250000, 250000],
restart_weights=[1, 1, 1, 1],
eta_min=1e-7)
default_hooks = dict(
checkpoint=dict(
type='CheckpointHook',
interval=5000,
save_optimizer=True,
by_epoch=False,
out_dir=save_dir,
),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=100),
param_scheduler=dict(type='ParamSchedulerHook'),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
test_pipeline = [
dict(
type='LoadImageFromFile',
key='img',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
dict(
type='LoadImageFromFile',
key='gt',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
dict(type='PackInputs')
]
# test config for Set5
set5_data_root = 'data/Set5'
set5_dataloader = dict(
num_workers=4,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='BasicImageDataset',
metainfo=dict(dataset_type='set5', task_name='sisr'),
data_root=set5_data_root,
data_prefix=dict(img='imgs', gt='imgs'),
pipeline=test_pipeline))
set5_evaluator = [
dict(type='PSNR', crop_border=4, prefix='Set5'),
dict(type='SSIM', crop_border=4, prefix='Set5'),
]
set14_data_root = 'data/Set14'
set14_dataloader = dict(
num_workers=4,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='BasicImageDataset',
metainfo=dict(dataset_type='set14', task_name='sisr'),
data_root=set5_data_root,
data_prefix=dict(img='imgs', gt='imgs'),
pipeline=test_pipeline))
set14_evaluator = [
dict(type='PSNR', crop_border=4, prefix='Set14'),
dict(type='SSIM', crop_border=4, prefix='Set14'),
]
ut_data_root = 'tests/test_codebase/test_mmagic/data'
ut_dataloader = dict(
num_workers=4,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='BasicImageDataset',
metainfo=dict(dataset_type='set14', task_name='sisr'),
data_root=ut_data_root,
data_prefix=dict(img='imgs', gt='imgs'),
pipeline=test_pipeline))
# test config
test_cfg = dict(type='MultiTestLoop')
test_dataloader = [ut_dataloader, ut_dataloader]
test_evaluator = [set5_evaluator, set14_evaluator]
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from typing import Dict, List, Optional
import mmengine
import onnx
import pytest
import torch
from mmdeploy.codebase import import_codebase
from mmdeploy.core import RewriterContext
from mmdeploy.utils import Backend, Codebase, get_onnx_config
try:
import_codebase(Codebase.MMAGIC)
except ImportError:
pytest.skip(
f'{Codebase.MMAGIC} is not installed.', allow_module_level=True)
img = torch.rand(1, 3, 4, 4)
model_file = tempfile.NamedTemporaryFile(suffix='.onnx').name
deploy_cfg = mmengine.Config(
dict(
codebase_config=dict(
type='mmagic',
task='SuperResolution',
),
backend_config=dict(
type='tensorrt',
common_config=dict(fp16_mode=False, max_workspace_size=1 << 10),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 4, 4],
opt_shape=[1, 3, 4, 4],
max_shape=[1, 3, 4, 4])))
]),
ir_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
save_file=model_file,
input_shape=None,
input_names=['input'],
output_names=['output'])))
def test_base_edit_model_forward():
from mmagic.models.base_models.base_edit_model import BaseEditModel
from mmagic.structures import DataSample
from mmdeploy.codebase.mmagic import models # noqa
class DummyBaseEditModel(BaseEditModel):
def __init__(self, generator, pixel_loss):
super().__init__(generator, pixel_loss)
def forward(self,
inputs: torch.Tensor,
data_samples: Optional[List[DataSample]] = None,
mode: str = 'tensor',
**kwargs):
return inputs
generator = dict(
type='SRCNNNet',
channels=(3, 64, 32, 3),
kernel_sizes=(9, 1, 5),
upscale_factor=4)
pixel_loss = dict(type='L1Loss', loss_weight=1.0, reduction='mean')
model = DummyBaseEditModel(generator, pixel_loss).eval()
model_output = model(input, None, mode='predict')
with RewriterContext({}):
backend_output = model(input)
assert model_output == input
assert backend_output == input
def test_srcnn():
from mmagic.models.editors.srcnn import SRCNNNet
pytorch_model = SRCNNNet()
model_inputs = {'x': img}
onnx_file_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
onnx_cfg = get_onnx_config(deploy_cfg)
input_names = [k for k, v in model_inputs.items() if k != 'ctx']
dynamic_axes = onnx_cfg.get('dynamic_axes', None)
if dynamic_axes is not None and not isinstance(dynamic_axes, Dict):
dynamic_axes = zip(input_names, dynamic_axes)
with RewriterContext(
cfg=deploy_cfg, backend=Backend.TENSORRT.value), torch.no_grad():
torch.onnx.export(
pytorch_model,
tuple([v for k, v in model_inputs.items()]),
onnx_file_path,
export_params=True,
input_names=input_names,
output_names=None,
opset_version=11,
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=False)
# The result should be different due to the rewrite.
# So we only check if the file exists
assert osp.exists(onnx_file_path)
model = onnx.load(onnx_file_path)
assert model is not None
try:
onnx.checker.check_model(model)
except onnx.checker.ValidationError:
assert False
# Copyright (c) OpenMMLab. All rights reserved.
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import numpy as np
import pytest
import torch
from mmengine import Config
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
import mmdeploy.apis.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.core.rewriters.rewriter_manager import RewriterContext
from mmdeploy.utils import Codebase, load_config
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper, WrapFunction
try:
import_codebase(Codebase.MMAGIC)
except ImportError:
pytest.skip(
f'{Codebase.MMAGIC} is not installed.', allow_module_level=True)
model_cfg = 'tests/test_codebase/test_mmagic/data/model.py'
model_cfg = load_config(model_cfg)[0]
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmagic', task='SuperResolution'),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['output'])))
input_img = np.random.rand(32, 32, 3)
img_shape = [32, 32]
input = {'img': input_img}
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
@pytest.fixture(autouse=True)
def init_task_processor():
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={
'output': torch.rand(1, 3, 50, 50),
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_build_test_runner():
# Prepare dummy model
from mmagic.structures import DataSample
img_meta = dict(ori_img_shape=(32, 32, 3))
img = torch.rand(3, 32, 32)
data_sample = DataSample(gt_img=img, metainfo=img_meta)
data_sample.set_data(
dict(output=DataSample(pred_img=img, metainfo=img_meta)))
data_sample.set_data(dict(input=img))
outputs = [data_sample]
model = DummyModel(outputs=outputs)
assert model is not None
# Run test
with TemporaryDirectory() as dir:
runner = task_processor.build_test_runner(model, dir)
wrapped_func = WrapFunction(runner.test)
with RewriterContext({}):
_ = wrapped_func()
def test_build_pytorch_model():
from mmagic.models import BaseEditModel
model = task_processor.build_pytorch_model(None)
assert isinstance(model, BaseEditModel)
def test_build_backend_model(backend_model):
assert backend_model is not None
def test_create_input():
inputs = task_processor.create_input(input_img, input_shape=img_shape)
assert inputs is not None
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(input_img, img_shape)
with torch.no_grad():
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(input_img, results, filename, 'window')
assert os.path.exists(filename)
def test_get_tensor_from_input():
assert type(task_processor.get_tensor_from_input(input)) is not dict
def test_get_partition_cfg():
with pytest.raises(NotImplementedError):
task_processor.get_partition_cfg(None)
def test_build_dataset_and_dataloader():
data = dict(
type='BasicImageDataset',
ann_file='test_ann.txt',
metainfo=dict(dataset_type='div2k', task_name='sisr'),
data_root='tests/test_codebase/test_mmagic/data',
data_prefix=dict(img='imgs', gt='imgs'),
pipeline=[
dict(
type='LoadImageFromFile',
key='img',
color_type='color',
channel_order='rgb',
imdecode_backend='cv2'),
])
dataset = task_processor.build_dataset(dataset_cfg=data)
assert isinstance(dataset, Dataset), 'Failed to build dataset'
dataloader_cfg = dict(
num_workers=4,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=data)
dataloader = task_processor.build_dataloader(dataloader_cfg)
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine import Config
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
try:
import_codebase(Codebase.MMAGIC)
except ImportError:
pytest.skip(
f'{Codebase.MMAGIC} is not installed.', allow_module_level=True)
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@pytest.fixture(scope='class')
def end2end_model(self):
# force add backend wrapper regardless of plugins
# make sure ONNXRuntimeEditor can use ORTWrapper inside itself
from mmdeploy.backend.onnxruntime import ORTWrapper
from mmdeploy.codebase.mmagic.deploy.super_resolution_model import \
End2EndModel
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
outputs = {
'outputs': torch.rand(3, 64, 64),
}
wrapper.set(outputs=outputs)
deploy_cfg = Config({'onnx_config': {'output_names': ['outputs']}})
model_cfg = 'tests/test_codebase/test_mmagic/data/model.py'
model_cfg = load_config(model_cfg)[0]
model = End2EndModel(
Backend.ONNXRUNTIME, [''],
'cpu',
model_cfg,
deploy_cfg,
data_preprocessor=model_cfg.model.data_preprocessor)
yield model
def test_forward(self, end2end_model):
input_img = torch.rand(1, 3, 32, 32)
from mmagic.structures import DataSample
img_metas = DataSample(metainfo={'ori_img_shape': [(32, 32, 3)]})
results = end2end_model.forward(input_img, img_metas)
assert results is not None
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment