Commit 481f872d authored by limm's avatar limm
Browse files

add test_mmpretrain test_mmrotate and test_mmseg

parent a17c53b8
Pipeline #2820 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmpretrain.utils import register_all_modules
register_all_modules(True)
# Copyright (c) OpenMMLab. All rights reserved.
_base_ = 'model.py'
norm_cfg = dict(type='BN')
mutator = dict(
type='OneShotMutator',
placeholder_mapping=dict(
all_blocks=dict(
type='OneShotOP',
choices=dict(
shuffle_3x3=dict(
type='ShuffleBlock', kernel_size=3, norm_cfg=norm_cfg),
shuffle_5x5=dict(
type='ShuffleBlock', kernel_size=5, norm_cfg=norm_cfg),
shuffle_7x7=dict(
type='ShuffleBlock', kernel_size=7, norm_cfg=norm_cfg),
shuffle_xception=dict(
type='ShuffleXception', norm_cfg=norm_cfg),
))))
algorithm = dict(
type='SPOS',
architecture=dict(
type='MMPretrainArchitecture',
model={{_base_.model}},
),
mutator=mutator,
distiller=None,
mutable_cfg='tests/test_codebase/test_pretrain/data/'
'mmrazor_mutable_cfg.yaml',
retraining=True)
stage_0_block_0:
chosen:
- shuffle_7x7
stage_0_block_1:
chosen:
- shuffle_5x5
stage_0_block_2:
chosen:
- shuffle_3x3
stage_0_block_3:
chosen:
- shuffle_5x5
stage_1_block_0:
chosen:
- shuffle_7x7
stage_1_block_1:
chosen:
- shuffle_3x3
stage_1_block_2:
chosen:
- shuffle_7x7
stage_1_block_3:
chosen:
- shuffle_3x3
stage_2_block_0:
chosen:
- shuffle_7x7
stage_2_block_1:
chosen:
- shuffle_3x3
stage_2_block_2:
chosen:
- shuffle_7x7
stage_2_block_3:
chosen:
- shuffle_xception
stage_2_block_4:
chosen:
- shuffle_3x3
stage_2_block_5:
chosen:
- shuffle_3x3
stage_2_block_6:
chosen:
- shuffle_3x3
stage_2_block_7:
chosen:
- shuffle_3x3
stage_3_block_0:
chosen:
- shuffle_xception
stage_3_block_1:
chosen:
- shuffle_7x7
stage_3_block_2:
chosen:
- shuffle_xception
stage_3_block_3:
chosen:
- shuffle_xception
# Copyright (c) OpenMMLab. All rights reserved.
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5)))
dataset_type = 'ImageNet'
data_preprocessor = dict(
num_classes=1000,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='ResizeEdge', scale=256, edge='short'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs')
]
train_dataloader = dict(
batch_size=2,
num_workers=1,
dataset=dict(
type='ImageNet',
data_root='tests/test_codebase/test_mmpretrain/data/imgs',
ann_file='ann.txt',
data_prefix='train',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs')
]),
sampler=dict(type='DefaultSampler', shuffle=True))
val_dataloader = dict(
batch_size=2,
num_workers=1,
dataset=dict(
type='ImageNet',
data_root='tests/test_codebase/test_mmpretrain/data/imgs',
ann_file='ann.txt',
data_prefix='val',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='ResizeEdge', scale=256, edge='short'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs')
]),
sampler=dict(type='DefaultSampler', shuffle=False))
val_evaluator = dict(type='Accuracy', topk=(1, 5))
test_dataloader = dict(
batch_size=2,
num_workers=1,
dataset=dict(
type='ImageNet',
data_root='tests/test_codebase/test_mmpretrain/data/imgs',
ann_file='ann.txt',
data_prefix='val',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='ResizeEdge', scale=256, edge='short'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs')
]),
sampler=dict(type='DefaultSampler', shuffle=False))
test_evaluator = dict(type='Accuracy', topk=(1, 5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001))
param_scheduler = dict(
type='MultiStepLR', by_epoch=True, milestones=[30, 60, 90], gamma=0.1)
train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1)
val_cfg = dict()
test_cfg = dict()
auto_scale_lr = dict(base_batch_size=256)
default_scope = 'mmpretrain'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=100),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='VisualizationHook', enable=False))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='UniversalVisualizer', vis_backends=[dict(type='LocalVisBackend')])
log_level = 'INFO'
load_from = None
resume = False
randomness = dict(seed=None, deterministic=False)
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any
import numpy as np
import pytest
import torch
from mmengine import Config
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Codebase, load_config
from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper
try:
import_codebase(Codebase.MMPRETRAIN)
except ImportError:
pytest.skip(
f'{Codebase.MMPRETRAIN} is not installed.', allow_module_level=True)
model_cfg_path = 'tests/test_codebase/test_mmpretrain/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmpretrain', task='Classification'),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['output'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
img_shape = (64, 64)
num_classes = 1000
img = np.random.rand(*img_shape, 3)
@pytest.fixture(autouse=True)
def init_task_processor():
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
@pytest.mark.parametrize('from_mmrazor', [True, False, '123', 0])
def test_build_pytorch_model(from_mmrazor: Any):
from mmpretrain.models.classifiers.base import BaseClassifier
if from_mmrazor is False:
_task_processor = task_processor
else:
_model_cfg_path = 'tests/test_codebase/test_mmpretrain/data/' \
'mmrazor_model.py'
_model_cfg = load_config(_model_cfg_path)[0]
_model_cfg.algorithm.architecture.model.type = 'mmpretrain.' \
'ImageClassifier'
_model_cfg.algorithm.architecture.model.backbone = dict(
type='SearchableShuffleNetV2', widen_factor=1.0)
_deploy_cfg = copy.deepcopy(deploy_cfg)
_deploy_cfg.codebase_config['from_mmrazor'] = from_mmrazor
_task_processor = build_task_processor(_model_cfg, _deploy_cfg, 'cpu')
if not isinstance(from_mmrazor, bool):
with pytest.raises(
TypeError,
match='`from_mmrazor` attribute must be '
'boolean type! '
f'but got: {from_mmrazor}'):
_ = _task_processor.from_mmrazor
return
assert from_mmrazor == _task_processor.from_mmrazor
if from_mmrazor:
pytest.importorskip('mmrazor', reason='mmrazor is not installed.')
model = _task_processor.build_pytorch_model(None)
assert isinstance(model, BaseClassifier)
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={
'output': torch.rand(1, num_classes),
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_build_backend_model(backend_model):
assert isinstance(backend_model, torch.nn.Module)
def test_create_input():
inputs = task_processor.create_input(img, input_shape=img_shape)
assert isinstance(inputs, tuple) and len(inputs) == 2
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + '/tmp.jpg'
task_processor.visualize(img, results, filename, 'window')
assert os.path.exists(filename)
def test_get_tensor_from_input():
input_data = {'inputs': torch.ones(3, 4, 5)}
inputs = task_processor.get_tensor_from_input(input_data)
assert torch.equal(inputs, torch.ones(3, 4, 5))
def test_get_partition_cfg():
try:
_ = task_processor.get_partition_cfg(partition_type='')
except NotImplementedError:
pass
def test_build_dataset_and_dataloader():
from torch.utils.data import DataLoader, Dataset
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg.test_dataloader.dataset)
assert isinstance(dataset, Dataset), 'Failed to build dataset'
dataloader_cfg = task_processor.model_cfg.test_dataloader
dataloader = task_processor.build_dataloader(dataloader_cfg)
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
def test_build_test_runner():
# Prepare dummy model
from mmengine.structures import LabelData
from mmpretrain.structures import DataSample
label = LabelData(
label=torch.tensor([0]),
score=torch.rand(10),
metainfo=dict(num_classes=10))
outputs = [
DataSample(
pred_label=torch.tensor([0]),
_pred_label=label,
gt_label=torch.tensor([0]),
_gt_label=label,
metainfo=dict(
img_shape=(224, 224),
img_path='',
ori_shape=(300, 400),
scale_factor=(0.8525, 0.8533333333333334)))
]
model = DummyModel(outputs=outputs)
assert model is not None
# Run test
with TemporaryDirectory() as dir:
runner = task_processor.build_test_runner(model, dir)
runner.test()
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import Config
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
IMAGE_SIZE = 64
NUM_CLASS = 1000
MODEL_CFG_PATH = 'tests/test_codebase/test_mmpretrain/data/model.py'
try:
import_codebase(Codebase.MMPRETRAIN)
except ImportError:
pytest.skip(
f'{Codebase.MMPRETRAIN} is not installed.', allow_module_level=True)
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'outputs': torch.rand(1, NUM_CLASS),
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = Config({'onnx_config': {'output_names': ['outputs']}})
from mmdeploy.codebase.mmpretrain.deploy.classification_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''], device='cpu', deploy_cfg=deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward(self):
imgs = torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)
from mmpretrain.structures import DataSample
data_sample = DataSample(
metainfo=dict(
scale_factor=(1, 1),
ori_shape=(IMAGE_SIZE, IMAGE_SIZE),
img_shape=(IMAGE_SIZE, IMAGE_SIZE)))
results = self.end2end_model.forward(
imgs, [data_sample], mode='predict')
assert results is not None, 'failed to get output using '\
'End2EndModel'
@backend_checker(Backend.RKNN)
class TestRKNNEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
import mmdeploy.backend.rknn as rknn_apis
from mmdeploy.backend.rknn import RKNNWrapper
rknn_apis.__dict__.update({'RKNNWrapper': RKNNWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(RKNNWrapper)
cls.outputs = [torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE)]
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = Config({
'onnx_config': {
'output_names': ['outputs']
},
'backend_config': {
'common_config': {}
}
})
from mmdeploy.codebase.mmpretrain.deploy.classification_model import \
RKNNEnd2EndModel
class_names = ['' for i in range(NUM_CLASS)]
cls.end2end_model = RKNNEnd2EndModel(
Backend.RKNN, [''],
device='cpu',
class_names=class_names,
deploy_cfg=deploy_cfg)
def test_forward_test(self):
imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE)
results = self.end2end_model.forward_test(imgs)
assert isinstance(results[0], np.ndarray)
@backend_checker(Backend.ONNXRUNTIME)
def test_build_classification_model():
model_cfg = Config.fromfile(MODEL_CFG_PATH)
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
onnx_config=dict(output_names=['outputs']),
codebase_config=dict(type='mmpretrain')))
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmpretrain.deploy.classification_model import (
End2EndModel, build_classification_model)
classifier = build_classification_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(classifier, End2EndModel)
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import Config
from mmdeploy.codebase import import_codebase
from mmdeploy.core.rewriters.rewriter_manager import RewriterContext
from mmdeploy.utils import Backend, Codebase
from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs
try:
from torch.testing import assert_close as torch_assert_close
except Exception:
from torch.testing import assert_allclose as torch_assert_close
try:
import_codebase(Codebase.MMPRETRAIN)
except ImportError:
pytest.skip(
f'{Codebase.MMPRETRAIN} is not installed.', allow_module_level=True)
input = torch.rand(1)
def get_invertedresidual_model():
from mmpretrain.models.backbones.shufflenet_v2 import InvertedResidual
model = InvertedResidual(16, 16)
model.requires_grad_(False)
return model
def get_fcuup_model():
from mmpretrain.models.backbones.conformer import FCUUp
model = FCUUp(16, 16, 16)
model.requires_grad_(False)
return model
def test_baseclassifier_forward():
from mmpretrain.models.classifiers import ImageClassifier
from mmdeploy.codebase.mmpretrain import models # noqa
class DummyClassifier(ImageClassifier):
def __init__(self, backbone):
super().__init__(backbone=backbone)
self.head = lambda x: x
self.predict = lambda x, data_samples: x
def extract_feat(self, batch_inputs: torch.Tensor):
return batch_inputs
input = torch.rand(1, 1000)
backbone_cfg = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch')
model = DummyClassifier(backbone_cfg).eval()
model_output = model(input, None, mode='predict')
with RewriterContext({}):
backend_output = model(input)
torch_assert_close(model_output, input)
torch_assert_close(backend_output, torch.nn.functional.softmax(input, -1))
@pytest.mark.parametrize(
'backend_type',
[Backend.ONNXRUNTIME, Backend.TENSORRT, Backend.NCNN, Backend.OPENVINO])
def test_shufflenetv2_backbone__forward(backend_type: Backend):
check_backend(backend_type, True)
model = get_invertedresidual_model()
model.cpu().eval()
if backend_type.value == 'tensorrt':
deploy_cfg = Config(
dict(
backend_config=dict(
type=backend_type.value,
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 16, 28, 28],
opt_shape=[1, 16, 28, 28],
max_shape=[1, 16, 28, 28])))
]),
onnx_config=dict(
input_shape=[28, 28], output_names=['output']),
codebase_config=dict(type='mmpretrain',
task='Classification')))
else:
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None, output_names=['output']),
codebase_config=dict(type='mmpretrain',
task='Classification')))
imgs = torch.rand((1, 16, 28, 28))
model_outputs = model.forward(imgs)
wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'x': imgs}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if isinstance(rewrite_outputs, dict):
rewrite_outputs = rewrite_outputs['output']
for model_output, rewrite_output in zip(model_outputs, rewrite_outputs):
model_output = model_output.cpu().numpy()
if isinstance(rewrite_output, torch.Tensor):
rewrite_output = rewrite_output.cpu().numpy()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
@pytest.mark.parametrize('backend_type', [Backend.NCNN])
def test_vision_transformer_backbone__forward(backend_type: Backend):
import_codebase(Codebase.MMPRETRAIN)
check_backend(backend_type, True)
from mmpretrain.models.backbones import VisionTransformer
img_size = 224
model = VisionTransformer(arch='small', img_size=img_size)
model.eval()
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=(img_size, img_size)),
codebase_config=dict(type='mmpretrain', task='Classification')))
imgs = torch.rand((1, 3, img_size, img_size))
model_outputs = model.forward(imgs)[0]
wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'x': imgs}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
torch.allclose(model_outputs, rewrite_outputs[0])
@pytest.mark.parametrize(
'backend_type',
[Backend.ONNXRUNTIME, Backend.TENSORRT, Backend.NCNN, Backend.OPENVINO])
@pytest.mark.parametrize('inputs',
[torch.rand(1, 3, 5, 5), (torch.rand(1, 3, 7, 7))])
def test_gap__forward(backend_type: Backend, inputs: list):
check_backend(backend_type, False)
from mmpretrain.models.necks import GlobalAveragePooling
model = GlobalAveragePooling(dim=2)
is_input_tensor = isinstance(inputs, torch.Tensor)
if not is_input_tensor:
assert len(inputs) == 1, 'only test one input'
input_shape = inputs.shape if is_input_tensor else inputs[0].shape
model.cpu().eval()
if backend_type.value == 'tensorrt':
deploy_cfg = Config(
dict(
backend_config=dict(
type=backend_type.value,
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=input_shape,
opt_shape=input_shape,
max_shape=input_shape)))
]),
onnx_config=dict(output_names=['output']),
codebase_config=dict(type='mmpretrain',
task='Classification')))
else:
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None, output_names=['output']),
codebase_config=dict(type='mmpretrain',
task='Classification')))
inputs = torch.rand(input_shape)
model_outputs = model(inputs)
wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'inputs': inputs if is_input_tensor else inputs[0]}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if isinstance(rewrite_outputs, dict):
rewrite_outputs = rewrite_outputs['output']
for model_output, rewrite_output in zip(model_outputs, rewrite_outputs):
model_output = model_output.cpu().numpy()
if isinstance(rewrite_output, torch.Tensor):
rewrite_output = rewrite_output.cpu().numpy()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
@pytest.mark.skipif(
reason='Only support GPU test', condition=not torch.cuda.is_available())
@pytest.mark.parametrize('backend_type', [(Backend.TENSORRT)])
def test_shift_windows_msa_cls(backend_type: Backend):
check_backend(backend_type)
from mmpretrain.models.utils import ShiftWindowMSA
model = ShiftWindowMSA(96, 3, 7)
model.cuda().eval()
output_names = ['output']
deploy_cfg = Config(
dict(
backend_config=dict(
type=backend_type.value,
model_inputs=[
dict(
input_shapes=dict(
query=dict(
min_shape=[1, 60800, 96],
opt_shape=[1, 60800, 96],
max_shape=[1, 60800, 96])))
]),
onnx_config=dict(
input_shape=None,
input_names=['query'],
output_names=output_names)))
query = torch.randn([1, 60800, 96]).cuda()
hw_shape = (torch.tensor(200), torch.tensor(304))
wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'query': query, 'hw_shape': hw_shape}
_ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg,
run_with_backend=False)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmrotate.utils import register_all_modules
register_all_modules(True)
359.0 663.0 369.0 497.0 543.0 509.0 531.0 677.0 plane 0
540.0 884.0 363.0 862.0 392.0 674.0 570.0 695.0 plane 0
788.0 844.0 734.0 701.0 916.0 631.0 970.0 762.0 plane 0
720.0 726.0 668.0 583.0 852.0 494.0 913.0 636.0 plane 0
# Copyright (c) OpenMMLab. All rights reserved.
dataset_type = 'DOTADataset'
data_root = 'tests/test_codebase/test_mmrotate/data/'
ann_file = 'dota_sample/'
file_client_args = dict(backend='disk')
val_pipeline = [
dict(
type='mmdet.LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
test_pipeline = [
dict(
type='mmdet.LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='DOTADataset',
data_root=data_root,
ann_file=ann_file,
data_prefix=dict(img_path='trainval/images/'),
test_mode=True,
pipeline=[
dict(
type='mmdet.LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
dict(
type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
dict(
type='ConvertBoxType',
box_type_mapping=dict(gt_bboxes='rbox')),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='DOTADataset',
data_root=data_root,
ann_file=ann_file,
data_prefix=dict(img_path='trainval/images/'),
test_mode=True,
pipeline=[
dict(
type='mmdet.LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
dict(
type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
dict(
type='ConvertBoxType',
box_type_mapping=dict(gt_bboxes='rbox')),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]))
default_scope = 'mmrotate'
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='RotLocalVisualizer',
vis_backends=[dict(type='LocalVisBackend')],
name='visualizer')
model = dict(
type='mmdet.RetinaNet',
data_preprocessor=dict(
type='mmdet.DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
boxtype2tensor=False),
backbone=dict(
type='mmdet.ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='mmdet.FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='mmdet.RetinaHead',
num_classes=15,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='FakeRotatedAnchorGenerator',
angle_version='le135',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHTRBBoxCoder',
angle_version='le135',
norm_factor=1,
edge_swap=False,
proj_xy=True,
target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='mmdet.L1Loss', loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='mmdet.MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
iou_calculator=dict(type='RBboxOverlaps2D')),
sampler=dict(type='mmdet.PseudoSampler'),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=2000))
{
"type": "RotatedRetinaNet",
"backbone": {
"type": "ResNet",
"depth": 50,
"num_stages": 4,
"out_indices": [
0,
1,
2,
3
],
"frozen_stages": 1,
"norm_cfg": {
"type": "BN",
"requires_grad": true
},
"norm_eval": true,
"style": "pytorch",
"init_cfg": {
"type": "Pretrained",
"checkpoint": "torchvision://resnet50"
}
},
"neck": {
"type": "FPN",
"in_channels": [
256,
512,
1024,
2048
],
"out_channels": 256,
"start_level": 1,
"add_extra_convs": "on_input",
"num_outs": 5
},
"bbox_head": {
"type": "RotatedRetinaHead",
"num_classes": 15,
"in_channels": 256,
"stacked_convs": 4,
"feat_channels": 256,
"anchor_generator": {
"type": "RotatedAnchorGenerator",
"octave_base_scale": 4,
"scales_per_octave": 3,
"ratios": [
0.5,
1.0,
2.0
],
"strides": [
8,
16,
32,
64,
128
]
},
"bbox_coder": {
"type": "DeltaXYWHAOBBoxCoder",
"target_means": [
0.0,
0.0,
0.0,
0.0,
0.0
],
"target_stds": [
1.0,
1.0,
1.0,
1.0,
1.0
]
},
"loss_cls": {
"type": "FocalLoss",
"use_sigmoid": true,
"gamma": 2.0,
"alpha": 0.25,
"loss_weight": 1.0
},
"loss_bbox": {
"type": "L1Loss",
"loss_weight": 1.0
}
},
"train_cfg": {
"assigner": {
"type": "MaxIoUAssigner",
"pos_iou_thr": 0.5,
"neg_iou_thr": 0.4,
"min_pos_iou": 0,
"ignore_iof_thr": -1,
"iou_calculator": {
"type": "RBboxOverlaps2D"
}
},
"allowed_border": -1,
"pos_weight": -1,
"debug": false
},
"test_cfg": {
"nms_pre": 2000,
"min_bbox_size": 0,
"score_thr": 0.05,
"nms": {
"type": "nms",
"iou_threshold": 0.1
},
"max_per_img": 2000
}
}
# Copyright (c) OpenMMLab. All rights reserved.
import os
import random
from typing import Dict, List
import numpy as np
import pytest
import torch
from mmengine import Config
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
get_rewrite_outputs)
try:
import_codebase(Codebase.MMROTATE)
except ImportError:
pytest.skip(
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = False
def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List:
"""Converts output from a dictionary to a list.
The new list will contain only those output values, whose names are in list
'output_names'.
"""
outputs = [
value for name, value in rewrite_output.items() if name in output_names
]
return outputs
def get_anchor_head_model():
"""AnchorHead Config."""
test_cfg = Config(
dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(iou_thr=0.1),
max_per_img=2000))
from mmrotate.models.dense_heads import RotatedAnchorHead
model = RotatedAnchorHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_deploy_cfg(backend_type: Backend, ir_type: str):
return Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(
type=ir_type,
output_names=['dets', 'labels'],
input_shape=None),
codebase_config=dict(
type='mmrotate',
task='RotatedDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.1,
pre_top_k=2000,
keep_top_k=2000,
))))
def get_single_roi_extractor():
"""SingleRoIExtractor Config."""
from mmrotate.models.roi_heads import RotatedSingleRoIExtractor
roi_layer = dict(
type='RoIAlignRotated', out_size=7, sample_num=2, clockwise=True)
out_channels = 1
featmap_strides = [4, 8, 16, 32]
model = RotatedSingleRoIExtractor(roi_layer, out_channels,
featmap_strides).eval()
return model
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_rotated_single_roi_extractor(backend_type: Backend):
check_backend(backend_type, True)
single_roi_extractor = get_single_roi_extractor()
output_names = ['roi_feat']
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmrotate',
task='RotatedDetection',
)))
seed_everything(1234)
out_channels = single_roi_extractor.out_channels
feats = [
torch.rand((1, out_channels, 200, 336)),
torch.rand((1, out_channels, 100, 168)),
torch.rand((1, out_channels, 50, 84)),
torch.rand((1, out_channels, 25, 42)),
]
seed_everything(5678)
rois = torch.tensor(
[[0.0000, 587.8285, 52.1405, 886.2484, 341.5644, 0.0000]])
model_inputs = {
'feats': feats,
'rois': rois,
}
model_outputs = get_model_outputs(single_roi_extractor, 'forward',
model_inputs)
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=single_roi_extractor,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
if isinstance(backend_outputs, dict):
backend_outputs = backend_outputs.values()
for model_output, backend_output in zip(model_outputs[0], backend_outputs):
model_output = model_output.squeeze().cpu().numpy()
backend_output = backend_output.squeeze()
assert np.allclose(
model_output, backend_output, rtol=1e-03, atol=1e-05)
def get_oriented_rpn_head_model():
"""Oriented RPN Head Config."""
test_cfg = Config(
dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.1),
max_per_img=2000))
from mmrotate.models.dense_heads import OrientedRPNHead
model = OrientedRPNHead(
in_channels=1,
anchor_generator=dict(
type='mmdet.AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
use_box_type=True),
bbox_coder=dict(type='MidpointOffsetCoder', angle_version='le90'),
test_cfg=test_cfg,
loss_cls=dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='mmdet.SmoothL1Loss',
beta=0.1111111111111111,
loss_weight=1.0))
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_oriented_rpn_head__predict_by_feat(backend_type: Backend):
check_backend(backend_type)
head = get_oriented_rpn_head_model()
head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmrotate',
task='RotatedDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.1,
pre_top_k=2000,
keep_top_k=2000))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 54, 32, 32), (1, 54, 16, 16),
# (1, 54, 8, 8), (1, 54, 4, 4), (1, 54, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 18, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
head, 'predict_by_feat', batch_img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_gv_ratio_roi_head__predict_bbox(backend_type: Backend):
check_backend(backend_type, True)
from mmrotate.models.roi_heads import GVRatioRoIHead
output_names = ['dets', 'labels']
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmrotate',
task='RotatedDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.1,
pre_top_k=2000,
keep_top_k=2000,
max_output_boxes_per_class=1000))))
test_cfg = Config(
dict(
rcnn=dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=2000)))
head = GVRatioRoIHead(
bbox_roi_extractor=dict(
type='mmdet.SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=3,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='GVBBoxHead',
num_shared_fcs=2,
in_channels=3,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=15,
ratio_thr=0.8,
bbox_coder=dict(
type='DeltaXYWHQBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.2, 0.2)),
fix_coder=dict(type='GVFixCoder'),
ratio_coder=dict(type='GVRatioCoder'),
predict_box_type='rbox',
reg_class_agnostic=True,
loss_cls=dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0, loss_weight=1.0),
loss_fix=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 3.0, loss_weight=1.0),
loss_ratio=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 3.0, loss_weight=16.0),
))
head.cpu().eval()
seed_everything(1234)
x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)]
bboxes = torch.rand(1, 100, 2)
bboxes = torch.cat(
[bboxes, bboxes + torch.rand(1, 100, 2) + torch.rand(1, 100, 1)],
dim=-1)
proposals = [bboxes, torch.randint(0, 10, (1, 100))]
img_metas = [{'img_shape': torch.tensor([224, 224])}]
wrapped_model = WrapModel(
head,
'predict_bbox',
rpn_results_list=proposals,
batch_img_metas=img_metas,
rcnn_test_cfg=test_cfg['rcnn'])
rewrite_inputs = {'x': x}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_gvfixcoder__decode(backend_type: Backend):
check_backend(backend_type)
deploy_cfg = Config(
dict(
onnx_config=dict(output_names=['output'], input_shape=None),
backend_config=dict(type=backend_type.value),
codebase_config=dict(type='mmrotate', task='RotatedDetection')))
from mmrotate.models.task_modules.coders import GVFixCoder
coder = GVFixCoder()
hboxes = torch.rand(1, 10, 4)
fix_deltas = torch.rand(1, 10, 4)
wrapped_model = WrapModel(coder, 'decode')
rewrite_outputs, _ = get_rewrite_outputs(
wrapped_model,
model_inputs={
'hboxes': hboxes,
'fix_deltas': fix_deltas
},
deploy_cfg=deploy_cfg,
run_with_backend=False)
assert rewrite_outputs is not None
def get_rotated_rtmdet_head_model():
"""RTMDet-R Head Config."""
test_cfg = Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=2000))
from mmrotate.models.dense_heads import RotatedRTMDetHead
model = RotatedRTMDetHead(
num_classes=4,
in_channels=1,
anchor_generator=dict(
type='mmdet.MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type='DistanceAnglePointCoder', angle_version='le90'),
loss_cls=dict(
type='mmdet.QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0),
test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_rotated_rtmdet_head_predict_by_feat(backend_type: Backend):
"""Test predict_by_feat rewrite of RTMDet-R."""
check_backend(backend_type, require_plugin=True)
rtm_r_head = get_rotated_rtmdet_head_model()
rtm_r_head.cpu().eval()
s = 128
batch_img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmrotate',
task='RotatedDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.1,
pre_top_k=3000,
keep_top_k=2000,
max_output_boxes_per_class=2000))))
seed_everything(1234)
cls_scores = [
torch.rand(1, rtm_r_head.num_classes, 2 * pow(2, i), 2 * pow(2, i))
for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4, 2 * pow(2, i), 2 * pow(2, i))
for i in range(3, 0, -1)
]
seed_everything(9101)
angle_preds = [
torch.rand(1, rtm_r_head.angle_coder.encode_size, 2 * pow(2, i),
2 * pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'angle_preds': angle_preds,
'batch_img_metas': batch_img_metas,
'with_nms': True
}
model_outputs = get_model_outputs(rtm_r_head, 'predict_by_feat',
model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
rtm_r_head,
'predict_by_feat',
batch_img_metas=batch_img_metas,
with_nms=True)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'angle_preds': angle_preds,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
min_shape = min(model_outputs[0].bboxes.shape[0],
rewrite_outputs[0].shape[1], 5)
for i in range(len(model_outputs)):
assert np.allclose(
model_outputs[i].bboxes.tensor[:min_shape],
rewrite_outputs[0][i, :min_shape, :5],
rtol=1e-03,
atol=1e-05)
assert np.allclose(
model_outputs[i].scores[:min_shape],
rewrite_outputs[0][i, :min_shape, 5],
rtol=1e-03,
atol=1e-05)
assert np.allclose(
model_outputs[i].labels[:min_shape],
rewrite_outputs[1][i, :min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
# Copyright (c) OpenMMLab. All rights reserved.
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import numpy as np
import pytest
import torch
from mmengine import Config
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper
try:
import_codebase(Codebase.MMROTATE)
except ImportError:
pytest.skip(
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(
type='mmrotate',
task='RotatedDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.1,
pre_top_k=2000,
keep_top_k=2000)),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['dets', 'labels'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
img_shape = (32, 32)
img = np.random.rand(*img_shape, 3)
@pytest.fixture(autouse=True)
def init_task_processor():
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
def test_build_pytorch_model():
from mmdet.models import BaseDetector
model = task_processor.build_pytorch_model(None)
assert isinstance(model, BaseDetector)
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(outputs={
'dets': torch.rand(1, 10, 6),
'labels': torch.randint(1, 10, (1, 10))
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_build_backend_model(backend_model):
from mmdeploy.codebase.mmrotate.deploy.rotated_detection_model import \
End2EndModel
assert isinstance(backend_model, End2EndModel)
@pytest.mark.parametrize('device', ['cpu'])
def test_create_input(device):
original_device = task_processor.device
task_processor.device = device
inputs = task_processor.create_input(img, input_shape=img_shape)
assert len(inputs) == 2
task_processor.device = original_device
def test_visualize(backend_model):
input_dict, _ = task_processor.create_input(img, input_shape=img_shape)
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(img, results, filename, 'window')
assert os.path.exists(filename)
def test_get_partition_cfg():
with pytest.raises(NotImplementedError):
_ = task_processor.get_partition_cfg(partition_type='')
def test_build_dataset_and_dataloader():
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg.test_dataloader.dataset)
assert isinstance(dataset, Dataset), 'Failed to build dataset'
dataloader_cfg = task_processor.model_cfg.test_dataloader
dataloader = task_processor.build_dataloader(dataloader_cfg)
assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine import Config
from mmengine.structures import BaseDataElement
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase, load_config
from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker
try:
import_codebase(Codebase.MMROTATE)
except ImportError:
pytest.skip(
f'{Codebase.MMROTATE} is not installed.', allow_module_level=True)
IMAGE_SIZE = 32
@backend_checker(Backend.ONNXRUNTIME)
class TestEnd2EndModel:
@classmethod
def setup_class(cls):
# force add backend wrapper regardless of plugins
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
cls.wrapper = SwitchBackendWrapper(ORTWrapper)
cls.outputs = {
'dets': torch.rand(1, 10, 6),
'labels': torch.rand(1, 10)
}
cls.wrapper.set(outputs=cls.outputs)
deploy_cfg = Config(
{'onnx_config': {
'output_names': ['dets', 'labels']
}})
from mmdeploy.codebase.mmrotate.deploy.rotated_detection_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''], device='cpu', deploy_cfg=deploy_cfg)
@classmethod
def teardown_class(cls):
cls.wrapper.recover()
def test_forward(self):
imgs = torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)
img_metas = [
BaseDataElement(metainfo={
'img_shape': [IMAGE_SIZE, IMAGE_SIZE],
'scale_factor': [1, 1]
})
]
results = self.end2end_model.forward(imgs, img_metas)
assert results is not None, 'failed to get output using End2EndModel'
@backend_checker(Backend.ONNXRUNTIME)
def test_build_rotated_detection_model():
model_cfg_path = 'tests/test_codebase/test_mmrotate/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
ir_config=dict(type='onnx', output_names=['dets', 'labels']),
codebase_config=dict(type='mmrotate')))
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmrotate.deploy.rotated_detection_model import (
End2EndModel, build_rotated_detection_model)
segmentor = build_rotated_detection_model([''], deploy_cfg, 'cpu')
assert isinstance(segmentor, End2EndModel)
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (generate_datasample, generate_mmseg_deploy_config,
generate_mmseg_task_processor)
__all__ = [
'generate_datasample', 'generate_mmseg_deploy_config',
'generate_mmseg_task_processor'
]
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmseg.utils import register_all_modules
register_all_modules(True)
# Copyright (c) OpenMMLab. All rights reserved.
_base_ = 'model.py'
# algorithm setting
algorithm = dict(
type='GeneralDistill',
architecture=dict(
type='MMSegArchitecture',
model={{_base_.model}},
),
distiller=dict(
type='SingleTeacherDistiller',
teacher={{_base_.model}},
teacher_trainable=False,
components=[
dict(
student_module='decode_head.conv_seg',
teacher_module='decode_head.conv_seg',
losses=[
dict(
type='ChannelWiseDivergence',
name='loss_cwd_logits',
tau=1,
loss_weight=5,
)
])
]),
)
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'tests/test_codebase/test_mmseg/data'
crop_size = (128, 128)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=crop_size, keep_ratio=False),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='PackSegInputs')
]
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
lazy_init=True,
serialize_data=False,
data_prefix=dict(img_path='', seg_map_path=''),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = val_evaluator
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
data_preprocessor = dict(
type='SegDataPreProcessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_val=0,
seg_pad_val=255)
model = dict(
type='EncoderDecoder',
data_preprocessor=data_preprocessor,
backbone=dict(
type='FastSCNN',
downsample_dw_channels=(32, 48),
global_in_channels=64,
global_block_channels=(64, 96, 128),
global_block_strides=(2, 2, 1),
global_out_channels=128,
higher_in_channels=64,
lower_in_channels=128,
fusion_out_channels=128,
out_indices=(0, 1, 2),
norm_cfg=norm_cfg,
align_corners=False),
decode_head=dict(
type='DepthwiseSeparableFCNHead',
in_channels=128,
channels=128,
concat_input=False,
num_classes=19,
in_index=-1,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
# from default_runtime
default_scope = 'mmseg'
env_cfg = dict(
cudnn_benchmark=True,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
log_level = 'INFO'
load_from = None
resume = False
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer')
# from schedules
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment