Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdeploy
Commits
a17c53b8
Commit
a17c53b8
authored
Jun 25, 2025
by
limm
Browse files
add test_mmdet test_mmocr and test_mmpose
parent
a72d0dfa
Pipeline
#2819
canceled with stages
Changes
33
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
1699 additions
and
0 deletions
+1699
-0
tests/test_codebase/test_mmocr/test_mmocr_models.py
tests/test_codebase/test_mmocr/test_mmocr_models.py
+655
-0
tests/test_codebase/test_mmocr/test_text_detection.py
tests/test_codebase/test_mmocr/test_text_detection.py
+97
-0
tests/test_codebase/test_mmocr/test_text_detection_models.py
tests/test_codebase/test_mmocr/test_text_detection_models.py
+94
-0
tests/test_codebase/test_mmocr/test_text_recognition.py
tests/test_codebase/test_mmocr/test_text_recognition.py
+98
-0
tests/test_codebase/test_mmocr/test_text_recognition_models.py
.../test_codebase/test_mmocr/test_text_recognition_models.py
+93
-0
tests/test_codebase/test_mmpose/__init__.py
tests/test_codebase/test_mmpose/__init__.py
+8
-0
tests/test_codebase/test_mmpose/conftest.py
tests/test_codebase/test_mmpose/conftest.py
+10
-0
tests/test_codebase/test_mmpose/data/annotations/person_keypoints_val2017.json
...est_mmpose/data/annotations/person_keypoints_val2017.json
+1
-0
tests/test_codebase/test_mmpose/data/model.py
tests/test_codebase/test_mmpose/data/model.py
+67
-0
tests/test_codebase/test_mmpose/test_mmpose_models.py
tests/test_codebase/test_mmpose/test_mmpose_models.py
+314
-0
tests/test_codebase/test_mmpose/test_pose_detection.py
tests/test_codebase/test_mmpose/test_pose_detection.py
+117
-0
tests/test_codebase/test_mmpose/test_pose_detection_model.py
tests/test_codebase/test_mmpose/test_pose_detection_model.py
+78
-0
tests/test_codebase/test_mmpose/utils.py
tests/test_codebase/test_mmpose/utils.py
+67
-0
No files found.
tests/test_codebase/test_mmocr/test_mmocr_models.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
tempfile
import
mmengine
import
numpy
as
np
import
pytest
import
torch
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.core
import
RewriterContext
,
patch_model
from
mmdeploy.utils
import
Backend
,
Codebase
from
mmdeploy.utils.config_utils
import
load_config
from
mmdeploy.utils.test
import
(
WrapModel
,
check_backend
,
get_backend_outputs
,
get_model_outputs
,
get_onnx_model
,
get_rewrite_outputs
)
try
:
import_codebase
(
Codebase
.
MMOCR
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMOCR
}
is not installed.'
,
allow_module_level
=
True
)
from
mmocr.models.textdet.necks
import
FPNC
dictionary
=
dict
(
type
=
'Dictionary'
,
dict_file
=
'tests/test_codebase/test_mmocr/data/lower_english_digits.txt'
,
with_padding
=
True
,
with_end
=
True
)
class
FPNCNeckModel
(
FPNC
):
def
__init__
(
self
,
in_channels
,
init_cfg
=
None
):
super
().
__init__
(
in_channels
,
init_cfg
=
init_cfg
)
self
.
in_channels
=
in_channels
self
.
neck
=
FPNC
(
in_channels
,
init_cfg
=
init_cfg
)
def
forward
(
self
,
inputs
):
neck_inputs
=
[
inputs
.
repeat
([
1
,
channel
,
1
,
1
])
for
channel
in
self
.
in_channels
]
output
=
self
.
neck
.
forward
(
neck_inputs
)
return
output
def
get_bidirectionallstm_model
():
from
mmocr.models.textrecog.layers.lstm_layer
import
BidirectionalLSTM
model
=
BidirectionalLSTM
(
32
,
16
,
16
)
model
.
requires_grad_
(
False
)
return
model
def
get_single_stage_text_detector_model
():
from
mmocr.models.textdet
import
SingleStageTextDetector
backbone
=
dict
(
type
=
'mmdet.ResNet'
,
depth
=
18
,
num_stages
=
4
,
out_indices
=
(
0
,
1
,
2
,
3
),
frozen_stages
=-
1
,
norm_cfg
=
dict
(
type
=
'BN'
,
requires_grad
=
True
),
init_cfg
=
dict
(
type
=
'Pretrained'
,
checkpoint
=
'torchvision://resnet18'
),
norm_eval
=
False
,
style
=
'caffe'
)
neck
=
dict
(
type
=
'FPNC'
,
in_channels
=
[
64
,
128
,
256
,
512
],
lateral_channels
=
256
)
det_head
=
dict
(
type
=
'DBHead'
,
in_channels
=
256
,
module_loss
=
dict
(
type
=
'DBModuleLoss'
),
postprocessor
=
dict
(
type
=
'DBPostprocessor'
,
text_repr_type
=
'quad'
))
model
=
SingleStageTextDetector
(
backbone
,
det_head
,
neck
)
model
.
requires_grad_
(
False
)
return
model
def
get_crnn_decoder_model
(
rnn_flag
):
from
mmocr.models.textrecog.decoders
import
CRNNDecoder
model
=
CRNNDecoder
(
32
,
dictionary
,
rnn_flag
=
rnn_flag
)
model
.
requires_grad_
(
False
)
return
model
def
get_fpnc_neck_model
():
model
=
FPNCNeckModel
([
2
,
4
,
8
,
16
])
model
.
requires_grad_
(
False
)
return
model
def
get_base_recognizer_model
():
from
mmocr.models.textrecog.recognizers
import
CRNN
cfg
=
dict
(
preprocessor
=
None
,
backbone
=
dict
(
type
=
'MiniVGG'
,
leaky_relu
=
False
,
input_channels
=
1
),
encoder
=
None
,
decoder
=
dict
(
type
=
'CRNNDecoder'
,
in_channels
=
512
,
rnn_flag
=
True
,
module_loss
=
dict
(
type
=
'CTCModuleLoss'
,
letter_case
=
'lower'
),
postprocessor
=
dict
(
type
=
'CTCPostProcessor'
),
dictionary
=
dictionary
),
data_preprocessor
=
dict
(
type
=
'mmocr.TextRecogDataPreprocessor'
,
mean
=
[
127
],
std
=
[
127
]))
model
=
CRNN
(
backbone
=
cfg
[
'backbone'
],
encoder
=
None
,
decoder
=
cfg
[
'decoder'
],
data_preprocessor
=
cfg
[
'data_preprocessor'
])
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
NCNN
])
def
test_bidirectionallstm
(
backend
:
Backend
):
"""Test forward rewrite of bidirectionallstm."""
check_backend
(
backend
)
bilstm
=
get_bidirectionallstm_model
()
bilstm
.
cpu
().
eval
()
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
output_names
=
[
'output'
],
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
input
=
torch
.
rand
(
1
,
1
,
32
)
# to get outputs of pytorch model
model_inputs
=
{
'input'
:
input
,
}
model_outputs
=
get_model_outputs
(
bilstm
,
'forward'
,
model_inputs
)
# to get outputs of onnx model after rewrite
wrapped_model
=
WrapModel
(
bilstm
,
'forward'
)
rewrite_inputs
=
{
'input'
:
input
}
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
True
)
if
is_backend_output
:
model_output
=
model_outputs
.
cpu
().
numpy
()
rewrite_output
=
rewrite_outputs
[
0
].
cpu
().
numpy
()
assert
np
.
allclose
(
model_output
,
rewrite_output
,
rtol
=
1e-3
,
atol
=
1e-4
)
else
:
assert
rewrite_outputs
is
not
None
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
def
test_nrtr_decoder__get_source_mask
(
backend
:
Backend
):
from
mmocr.models.textrecog
import
NRTRDecoder
deploy_cfg
=
mmengine
.
Config
(
dict
(
onnx_config
=
dict
(
input_names
=
[
'input'
],
output_names
=
[
'output'
],
input_shape
=
None
,
dynamic_axes
=
{
'input'
:
{
0
:
'batch'
,
},
'output'
:
{
0
:
'batch'
,
}
}),
backend_config
=
dict
(
type
=
backend
.
value
,
model_inputs
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
)))
src_seq
=
torch
.
rand
(
1
,
200
,
256
)
batch_src_seq
=
src_seq
.
expand
(
3
,
200
,
256
)
decoder
=
NRTRDecoder
(
dictionary
=
dict
(
type
=
'Dictionary'
,
dict_file
=
'tests/test_codebase/test_mmocr/'
'data/lower_english_digits.txt'
,
with_start
=
True
,
with_end
=
True
,
same_start_end
=
True
,
with_padding
=
True
,
with_unknown
=
True
))
wrapped_model
=
WrapModel
(
decoder
,
'_get_source_mask'
)
model_inputs
=
{
'src_seq'
:
src_seq
,
'valid_ratios'
:
torch
.
Tensor
([
1.0
])}
batch_model_inputs
=
{
'input'
:
batch_src_seq
}
ir_file_path
=
get_onnx_model
(
wrapped_model
,
model_inputs
,
deploy_cfg
)
backend_outputs
=
get_backend_outputs
(
ir_file_path
,
batch_model_inputs
,
deploy_cfg
)[
0
].
numpy
()
num_elements
=
np
.
prod
(
backend_outputs
.
shape
[
1
:])
# batch results should be same
assert
np
.
sum
(
backend_outputs
[
0
]
==
backend_outputs
[
1
])
==
num_elements
\
and
np
.
sum
(
backend_outputs
[
1
]
==
backend_outputs
[
2
])
==
num_elements
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
def
test_satrn_encoder__get_source_mask
(
backend
:
Backend
):
from
mmocr.models.textrecog
import
SATRNEncoder
deploy_cfg
=
mmengine
.
Config
(
dict
(
onnx_config
=
dict
(
input_names
=
[
'input'
],
output_names
=
[
'output'
],
input_shape
=
None
,
dynamic_axes
=
{
'input'
:
{
0
:
'batch'
,
},
'output'
:
{
0
:
'batch'
,
}
}),
backend_config
=
dict
(
type
=
backend
.
value
,
model_inputs
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
)))
encoder
=
SATRNEncoder
(
d_k
=
4
,
d_v
=
4
,
d_model
=
32
,
d_inner
=
32
*
4
)
feat
=
torch
.
randn
(
1
,
32
,
32
,
32
)
batch_feat
=
feat
.
expand
(
3
,
32
,
32
,
32
)
wrapped_model
=
WrapModel
(
encoder
,
'forward'
)
model_inputs
=
{
'feat'
:
feat
}
batch_model_inputs
=
{
'input'
:
batch_feat
}
ir_file_path
=
get_onnx_model
(
wrapped_model
,
model_inputs
,
deploy_cfg
)
backend_outputs
=
get_backend_outputs
(
ir_file_path
,
batch_model_inputs
,
deploy_cfg
)[
0
].
numpy
()
num_elements
=
np
.
prod
(
backend_outputs
.
shape
[
1
:])
# batch results should be same
assert
np
.
sum
(
backend_outputs
[
0
]
==
backend_outputs
[
1
])
==
num_elements
\
and
np
.
sum
(
backend_outputs
[
1
]
==
backend_outputs
[
2
])
==
num_elements
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
def
test_simple_test_of_single_stage_text_detector
(
backend
:
Backend
):
"""Test simple_test single_stage_text_detector."""
check_backend
(
backend
)
single_stage_text_detector
=
get_single_stage_text_detector_model
()
single_stage_text_detector
.
eval
()
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextDetection'
,
)))
input
=
torch
.
rand
(
1
,
3
,
64
,
64
)
model_outputs
=
single_stage_text_detector
.
_forward
(
input
)
wrapped_model
=
WrapModel
(
single_stage_text_detector
,
'_forward'
)
rewrite_inputs
=
{
'inputs'
:
input
}
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
True
)
if
is_backend_output
:
rewrite_outputs
=
rewrite_outputs
[
0
]
model_outputs
=
model_outputs
.
cpu
().
numpy
()
rewrite_outputs
=
rewrite_outputs
.
cpu
().
numpy
()
assert
np
.
allclose
(
model_outputs
,
rewrite_outputs
,
rtol
=
1e-03
,
atol
=
1e-05
)
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
NCNN
])
@
pytest
.
mark
.
parametrize
(
'rnn_flag'
,
[
True
,
False
])
def
test_crnndecoder
(
backend
:
Backend
,
rnn_flag
:
bool
):
"""Test forward rewrite of crnndecoder."""
check_backend
(
backend
)
crnn_decoder
=
get_crnn_decoder_model
(
rnn_flag
)
crnn_decoder
.
cpu
().
eval
()
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
input
=
torch
.
rand
(
1
,
32
,
1
,
64
)
out_enc
=
None
data_samples
=
None
# to get outputs of pytorch model
model_inputs
=
{
'feat'
:
input
,
'out_enc'
:
out_enc
,
'data_samples'
:
data_samples
}
model_outputs
=
get_model_outputs
(
crnn_decoder
,
'forward_train'
,
model_inputs
)
# to get outputs of onnx model after rewrite
wrapped_model
=
WrapModel
(
crnn_decoder
,
'forward_train'
,
out_enc
=
out_enc
,
data_samples
=
data_samples
)
rewrite_inputs
=
{
'feat'
:
input
}
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
True
)
rewrite_outputs
=
[
rewrite_outputs
[
-
1
]]
if
is_backend_output
:
for
model_output
,
rewrite_output
in
zip
(
model_outputs
,
rewrite_outputs
):
model_output
=
model_output
.
squeeze
().
cpu
().
numpy
()
rewrite_output
=
rewrite_output
.
squeeze
()
print
(
model_outputs
,
rewrite_output
)
assert
np
.
allclose
(
model_output
,
rewrite_output
,
rtol
=
1e-03
,
atol
=
1e-04
)
else
:
assert
rewrite_outputs
is
not
None
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
@
pytest
.
mark
.
parametrize
(
'data_samples'
,
[[[{}]],
[[{
'resize_shape'
:
[
32
,
32
],
'valid_ratio'
:
1.0
}]]])
@
pytest
.
mark
.
parametrize
(
'is_dynamic'
,
[
True
,
False
])
def
test_forward_of_encoder_decoder_recognizer
(
data_samples
,
is_dynamic
,
backend
):
"""Test forward base_recognizer."""
check_backend
(
backend
)
base_recognizer
=
get_base_recognizer_model
()
base_recognizer
.
eval
()
if
not
is_dynamic
:
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
else
:
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
,
dynamic_axes
=
{
'input'
:
{
0
:
'batch'
,
2
:
'height'
,
3
:
'width'
},
'output'
:
{
0
:
'batch'
,
2
:
'height'
,
3
:
'width'
}
}),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
input
=
torch
.
rand
(
1
,
1
,
32
,
32
)
model_outputs
=
base_recognizer
.
forward
(
input
)
wrapped_model
=
WrapModel
(
base_recognizer
,
'forward'
,
data_samples
=
data_samples
[
0
])
rewrite_inputs
=
{
'batch_inputs'
:
input
,
}
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
if
is_backend_output
:
rewrite_outputs
=
rewrite_outputs
[
0
]
model_outputs
=
model_outputs
.
cpu
().
numpy
()
rewrite_outputs
=
rewrite_outputs
.
cpu
().
numpy
()
assert
np
.
allclose
(
model_outputs
,
rewrite_outputs
,
rtol
=
1e-03
,
atol
=
1e-05
)
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
TENSORRT
])
def
test_forward_of_fpnc
(
backend
:
Backend
):
"""Test forward rewrite of fpnc."""
check_backend
(
backend
)
fpnc
=
get_fpnc_neck_model
().
cuda
()
fpnc
.
eval
()
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
,
common_config
=
dict
(
max_workspace_size
=
1
<<
20
),
model_inputs
=
[
dict
(
input_shapes
=
dict
(
inputs
=
dict
(
min_shape
=
[
1
,
1
,
64
,
64
],
opt_shape
=
[
1
,
1
,
64
,
64
],
max_shape
=
[
1
,
1
,
64
,
64
])))
]),
onnx_config
=
dict
(
input_shape
=
None
,
input_names
=
[
'inputs'
],
output_names
=
[
'output'
]),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextDetection'
)))
input
=
torch
.
rand
(
1
,
1
,
64
,
64
).
cuda
()
model_inputs
=
{
'inputs'
:
input
,
}
model_outputs
=
get_model_outputs
(
fpnc
,
'forward'
,
model_inputs
)
wrapped_model
=
WrapModel
(
fpnc
,
'forward'
)
rewrite_inputs
=
{
'inputs'
:
input
,
}
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
run_with_backend
=
False
,
deploy_cfg
=
deploy_cfg
)
if
is_backend_output
:
rewrite_outputs
=
rewrite_outputs
[
0
]
model_outputs
=
model_outputs
.
cpu
().
numpy
()
rewrite_outputs
=
rewrite_outputs
.
cpu
().
numpy
()
assert
np
.
allclose
(
model_outputs
,
rewrite_outputs
,
rtol
=
1e-03
,
atol
=
1e-05
)
def
get_sar_model_cfg
(
decoder_type
:
str
):
model
=
dict
(
type
=
'SARNet'
,
data_preprocessor
=
dict
(
type
=
'mmocr.TextRecogDataPreprocessor'
,
mean
=
[
127
,
127
,
127
],
std
=
[
127
,
127
,
127
]),
backbone
=
dict
(
type
=
'ResNet31OCR'
),
encoder
=
dict
(
type
=
'mmocr.SAREncoder'
,
enc_bi_rnn
=
False
,
enc_do_rnn
=
0.1
,
enc_gru
=
False
),
decoder
=
dict
(
type
=
f
'mmocr.
{
decoder_type
}
'
,
enc_bi_rnn
=
False
,
dec_bi_rnn
=
False
,
dec_do_rnn
=
0
,
dec_gru
=
False
,
pred_dropout
=
0.1
,
d_k
=
512
,
pred_concat
=
True
,
postprocessor
=
dict
(
type
=
'AttentionPostprocessor'
),
module_loss
=
dict
(
type
=
'CEModuleLoss'
,
ignore_first_char
=
True
,
reduction
=
'mean'
),
dictionary
=
dict
(
type
=
'Dictionary'
,
dict_file
=
'tests/test_codebase/test_mmocr/'
'data/lower_english_digits.txt'
,
with_start
=
True
,
with_end
=
True
,
same_start_end
=
True
,
with_padding
=
True
,
with_unknown
=
True
),
max_seq_len
=
30
))
return
mmengine
.
Config
(
dict
(
model
=
model
))
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
@
pytest
.
mark
.
parametrize
(
'decoder_type'
,
[
'SequentialSARDecoder'
,
'ParallelSARDecoder'
])
def
test_sar_model
(
backend
:
Backend
,
decoder_type
):
check_backend
(
backend
)
import
os.path
as
osp
import
onnx
from
mmocr.models.textrecog
import
SARNet
sar_cfg
=
get_sar_model_cfg
(
decoder_type
)
sar_cfg
.
model
.
pop
(
'type'
)
pytorch_model
=
SARNet
(
**
(
sar_cfg
.
model
))
model_inputs
=
{
'inputs'
:
torch
.
rand
(
1
,
3
,
48
,
160
),
'data_samples'
:
None
}
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
# patch model
pytorch_model
.
cfg
=
sar_cfg
patched_model
=
patch_model
(
pytorch_model
,
cfg
=
deploy_cfg
,
backend
=
backend
.
value
)
onnx_file_path
=
tempfile
.
NamedTemporaryFile
(
suffix
=
'.onnx'
).
name
input_names
=
[
k
for
k
,
v
in
model_inputs
.
items
()
if
k
!=
'ctx'
]
# model_forward = patched_model.forward
# from functools import partial
# patched_model.forward = partial(patched_model.forward,
# **{'data_samples': [data_sample]})
with
RewriterContext
(
cfg
=
deploy_cfg
,
backend
=
backend
.
value
),
torch
.
no_grad
():
torch
.
onnx
.
export
(
patched_model
,
tuple
([
v
for
k
,
v
in
model_inputs
.
items
()]),
onnx_file_path
,
export_params
=
True
,
input_names
=
input_names
,
output_names
=
None
,
opset_version
=
11
,
dynamic_axes
=
None
,
keep_initializers_as_inputs
=
False
)
# The result should be different due to the rewrite.
# So we only check if the file exists
assert
osp
.
exists
(
onnx_file_path
)
model
=
onnx
.
load
(
onnx_file_path
)
assert
model
is
not
None
try
:
onnx
.
checker
.
check_model
(
model
)
except
onnx
.
checker
.
ValidationError
:
assert
False
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
def
test_mmdet_wrapper__forward
(
backend
):
check_backend
(
backend
)
from
mmdet.structures
import
DetDataSample
from
mmengine.structures
import
InstanceData
from
mmocr.models.textdet
import
MMDetWrapper
cfg
,
=
load_config
(
'tests/test_codebase/test_mmocr/data/mrcnn.py'
)
model
=
MMDetWrapper
(
cfg
.
model
.
cfg
)
model
.
eval
()
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
,
common_config
=
dict
(
max_workspace_size
=
1
<<
20
)),
onnx_config
=
dict
(
input_shape
=
None
,
input_names
=
[
'inputs'
],
output_names
=
[
'output'
]),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextDetection'
,
post_processing
=
dict
(
score_threshold
=
0.05
,
confidence_threshold
=
0.005
,
iou_threshold
=
0.5
,
max_output_boxes_per_class
=
200
,
pre_top_k
=
5000
,
keep_top_k
=
100
,
background_label_id
=-
1
,
export_postprocess_mask
=
False
))))
input
=
torch
.
rand
(
1
,
3
,
64
,
64
)
img_meta
=
{
'ori_shape'
:
[
64
,
64
],
'img_shape'
:
[
64
,
64
],
'scale_factor'
:
[
1.
,
1.
],
'img_path'
:
''
}
pred_instances
=
InstanceData
(
metainfo
=
img_meta
)
data_sample
=
DetDataSample
(
pred_instances
=
pred_instances
)
data_sample
.
set_metainfo
(
img_meta
)
wrapped_model
=
WrapModel
(
model
,
'forward'
,
data_samples
=
[
data_sample
])
rewrite_inputs
=
{
'inputs'
:
input
}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
assert
rewrite_outputs
is
not
None
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
def
test_abi_language_decoder___get_length
(
backend
):
check_backend
(
backend
)
from
mmocr.models.textrecog.decoders
import
ABILanguageDecoder
model
=
ABILanguageDecoder
(
dictionary
=
dictionary
)
input
=
torch
.
randn
(
1
,
26
,
37
)
model_inputs
=
{
'logit'
:
input
}
model_outputs
=
get_model_outputs
(
model
,
'_get_length'
,
model_inputs
)
wrapped_model
=
WrapModel
(
model
,
'_get_length'
)
rewrite_inputs
=
{
'logit'
:
input
}
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
)
if
is_backend_output
:
rewrite_outputs
=
rewrite_outputs
[
0
]
model_outputs
=
model_outputs
.
float
().
cpu
().
numpy
()
rewrite_outputs
=
rewrite_outputs
.
cpu
().
numpy
()
print
(
model_outputs
,
rewrite_outputs
)
assert
np
.
allclose
(
model_outputs
,
rewrite_outputs
,
rtol
=
1e-03
,
atol
=
1e-05
)
@
pytest
.
mark
.
parametrize
(
'backend'
,
[
Backend
.
ONNXRUNTIME
])
def
test__positional_encoding
(
backend
):
check_backend
(
backend
)
from
mmocr.models.common.modules
import
PositionalEncoding
pytorch_model
=
PositionalEncoding
(
64
,
20
)
input
=
torch
.
rand
(
1
,
20
,
64
)
model_inputs
=
{
'x'
:
input
}
model_outputs
=
get_model_outputs
(
pytorch_model
,
'forward'
,
model_inputs
)
wrapped_model
=
WrapModel
(
pytorch_model
,
'forward'
)
rewrite_inputs
=
{
'x'
:
input
}
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
.
value
),
onnx_config
=
dict
(
input_shape
=
None
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
,
)))
rewrite_outputs
,
is_backend_output
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
)
if
is_backend_output
:
rewrite_outputs
=
rewrite_outputs
[
0
]
model_outputs
=
model_outputs
.
float
().
cpu
().
numpy
()
rewrite_outputs
=
rewrite_outputs
.
cpu
().
numpy
()
print
(
model_outputs
,
rewrite_outputs
)
assert
np
.
allclose
(
model_outputs
,
rewrite_outputs
,
rtol
=
1e-03
,
atol
=
1e-05
)
tests/test_codebase/test_mmocr/test_text_detection.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
os
from
tempfile
import
NamedTemporaryFile
,
TemporaryDirectory
import
mmengine
import
numpy
as
np
import
pytest
import
torch
import
mmdeploy.backend.onnxruntime
as
ort_apis
from
mmdeploy.apis
import
build_task_processor
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Codebase
,
load_config
from
mmdeploy.utils.test
import
SwitchBackendWrapper
model_cfg_path
=
'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
'onnxruntime'
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextDetection'
),
onnx_config
=
dict
(
type
=
'onnx'
,
export_params
=
True
,
keep_initializers_as_inputs
=
False
,
opset_version
=
11
,
input_shape
=
None
,
input_names
=
[
'input'
],
output_names
=
[
'output'
])))
onnx_file
=
NamedTemporaryFile
(
suffix
=
'.onnx'
).
name
task_processor
=
None
img_shape
=
(
32
,
32
)
img
=
np
.
random
.
rand
(
*
img_shape
,
3
).
astype
(
np
.
uint8
)
@
pytest
.
fixture
(
autouse
=
True
)
def
init_task_processor
():
try
:
import_codebase
(
Codebase
.
MMOCR
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMOCR
}
is not installed.'
,
allow_module_level
=
True
)
global
task_processor
task_processor
=
build_task_processor
(
model_cfg
,
deploy_cfg
,
'cpu'
)
def
test_build_pytorch_model
():
from
mmocr.utils.setup_env
import
register_all_modules
register_all_modules
()
from
mmocr.models.textdet.detectors.single_stage_text_detector
import
\
SingleStageTextDetector
model
=
task_processor
.
build_pytorch_model
(
None
)
assert
isinstance
(
model
,
SingleStageTextDetector
)
@
pytest
.
fixture
def
backend_model
():
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
wrapper
=
SwitchBackendWrapper
(
ORTWrapper
)
wrapper
.
set
(
outputs
=
{
'output'
:
torch
.
rand
(
1
,
*
img_shape
),
})
yield
task_processor
.
build_backend_model
([
''
])
wrapper
.
recover
()
def
test_build_backend_model
(
backend_model
):
assert
isinstance
(
backend_model
,
torch
.
nn
.
Module
)
def
test_create_input
():
inputs
=
task_processor
.
create_input
(
img
,
input_shape
=
img_shape
)
assert
isinstance
(
inputs
,
tuple
)
and
len
(
inputs
)
==
2
def
test_visualize
(
backend_model
):
input_dict
,
_
=
task_processor
.
create_input
(
img
,
input_shape
=
img_shape
)
results
=
backend_model
.
test_step
(
input_dict
)[
0
]
with
TemporaryDirectory
()
as
dir
:
filename
=
dir
+
'tmp.jpg'
task_processor
.
visualize
(
img
,
results
,
filename
,
'tmp'
)
assert
os
.
path
.
exists
(
filename
)
def
test_get_tensor_from_input
():
input_data
=
{
'inputs'
:
torch
.
ones
(
3
,
4
,
5
)}
inputs
=
task_processor
.
get_tensor_from_input
(
input_data
)
assert
torch
.
equal
(
inputs
,
torch
.
ones
(
3
,
4
,
5
))
def
test_get_partition_cfg
():
with
pytest
.
raises
(
NotImplementedError
):
_
=
task_processor
.
get_partition_cfg
(
partition_type
=
''
)
tests/test_codebase/test_mmocr/test_text_detection_models.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
mmengine
import
pytest
import
torch
import
mmdeploy.backend.onnxruntime
as
ort_apis
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Backend
,
Codebase
,
load_config
from
mmdeploy.utils.test
import
SwitchBackendWrapper
,
backend_checker
try
:
import_codebase
(
Codebase
.
MMOCR
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMOCR
}
is not installed.'
,
allow_module_level
=
True
)
IMAGE_SIZE
=
32
@
backend_checker
(
Backend
.
ONNXRUNTIME
)
class
TestEnd2EndModel
:
@
classmethod
def
setup_class
(
cls
):
# force add backend wrapper regardless of plugins
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
# simplify backend inference
cls
.
wrapper
=
SwitchBackendWrapper
(
ORTWrapper
)
cls
.
outputs
=
{
'outputs'
:
torch
.
rand
(
1
,
IMAGE_SIZE
,
IMAGE_SIZE
),
}
cls
.
wrapper
.
set
(
outputs
=
cls
.
outputs
)
deploy_cfg
=
mmengine
.
Config
(
{
'onnx_config'
:
{
'output_names'
:
[
'outputs'
]
}})
model_cfg_path
=
'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
from
mmdeploy.codebase.mmocr.deploy.text_detection_model
import
\
End2EndModel
cls
.
end2end_model
=
End2EndModel
(
Backend
.
ONNXRUNTIME
,
[
''
],
device
=
'cpu'
,
deploy_cfg
=
deploy_cfg
,
model_cfg
=
model_cfg
)
@
classmethod
def
teardown_class
(
cls
):
cls
.
wrapper
.
recover
()
@
pytest
.
mark
.
parametrize
(
'ori_shape'
,
[[
IMAGE_SIZE
,
IMAGE_SIZE
],
[
2
*
IMAGE_SIZE
,
2
*
IMAGE_SIZE
]])
def
test_forward
(
self
,
ori_shape
):
imgs
=
torch
.
rand
(
1
,
3
,
IMAGE_SIZE
,
IMAGE_SIZE
)
img_meta
=
{
'ori_shape'
:
ori_shape
,
'img_shape'
:
[
IMAGE_SIZE
,
IMAGE_SIZE
],
'scale_factor'
:
[
1.
,
1.
],
'img_path'
:
''
}
from
mmengine.structures
import
InstanceData
from
mmocr.structures
import
TextDetDataSample
pred_instances
=
InstanceData
(
metainfo
=
img_meta
)
data_sample
=
TextDetDataSample
(
pred_instances
=
pred_instances
)
data_sample
.
set_metainfo
(
img_meta
)
results
=
self
.
end2end_model
.
forward
(
imgs
,
[
data_sample
])
assert
results
is
not
None
,
'failed to get output using '
\
'End2EndModel'
@
backend_checker
(
Backend
.
ONNXRUNTIME
)
def
test_build_text_detection_model
():
model_cfg_path
=
'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
'onnxruntime'
),
onnx_config
=
dict
(
output_names
=
[
'outputs'
]),
codebase_config
=
dict
(
type
=
'mmocr'
)))
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
# simplify backend inference
with
SwitchBackendWrapper
(
ORTWrapper
)
as
wrapper
:
wrapper
.
set
(
model_cfg
=
model_cfg
,
deploy_cfg
=
deploy_cfg
)
from
mmdeploy.codebase.mmocr.deploy.text_detection_model
import
(
End2EndModel
,
build_text_detection_model
)
segmentor
=
build_text_detection_model
([
''
],
model_cfg
,
deploy_cfg
,
'cpu'
)
assert
isinstance
(
segmentor
,
End2EndModel
)
tests/test_codebase/test_mmocr/test_text_recognition.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
os
from
tempfile
import
NamedTemporaryFile
,
TemporaryDirectory
import
mmengine
import
numpy
as
np
import
pytest
import
torch
import
mmdeploy.backend.onnxruntime
as
ort_apis
from
mmdeploy.apis
import
build_task_processor
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Codebase
,
load_config
from
mmdeploy.utils.test
import
SwitchBackendWrapper
model_cfg_path
=
'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
'onnxruntime'
),
codebase_config
=
dict
(
type
=
'mmocr'
,
task
=
'TextRecognition'
),
onnx_config
=
dict
(
type
=
'onnx'
,
export_params
=
True
,
keep_initializers_as_inputs
=
False
,
opset_version
=
11
,
input_shape
=
None
,
input_names
=
[
'input'
],
output_names
=
[
'output'
])))
onnx_file
=
NamedTemporaryFile
(
suffix
=
'.onnx'
).
name
task_processor
=
None
img_shape
=
(
32
,
32
)
img
=
np
.
random
.
rand
(
*
img_shape
,
3
).
astype
(
np
.
uint8
)
@
pytest
.
fixture
(
autouse
=
True
)
def
init_task_processor
():
try
:
import_codebase
(
Codebase
.
MMOCR
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMOCR
}
is not installed.'
,
allow_module_level
=
True
)
global
task_processor
task_processor
=
build_task_processor
(
model_cfg
,
deploy_cfg
,
'cpu'
)
def
test_build_pytorch_model
():
from
mmocr.utils.setup_env
import
register_all_modules
register_all_modules
()
from
mmocr.models.textrecog.recognizers
import
BaseRecognizer
model
=
task_processor
.
build_pytorch_model
(
None
)
assert
isinstance
(
model
,
BaseRecognizer
)
@
pytest
.
fixture
def
backend_model
():
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
wrapper
=
SwitchBackendWrapper
(
ORTWrapper
)
wrapper
.
set
(
outputs
=
{
'output'
:
torch
.
rand
(
1
,
9
,
37
),
})
yield
task_processor
.
build_backend_model
([
''
])
wrapper
.
recover
()
def
test_build_backend_model
(
backend_model
):
assert
isinstance
(
backend_model
,
torch
.
nn
.
Module
)
def
test_create_input
():
inputs
=
task_processor
.
create_input
(
img
,
input_shape
=
img_shape
)
assert
isinstance
(
inputs
,
tuple
)
and
len
(
inputs
)
==
2
def
test_visualize
(
backend_model
):
input_dict
,
_
=
task_processor
.
create_input
(
img
,
input_shape
=
img_shape
)
results
=
backend_model
.
test_step
(
input_dict
)[
0
]
with
TemporaryDirectory
()
as
dir
:
filename
=
dir
+
'tmp.jpg'
task_processor
.
visualize
(
img
,
results
,
filename
,
'tmp'
)
assert
os
.
path
.
exists
(
filename
)
def
test_get_tensor_from_input
():
input_data
=
{
'inputs'
:
torch
.
ones
(
3
,
4
,
5
)}
inputs
=
task_processor
.
get_tensor_from_input
(
input_data
)
assert
torch
.
equal
(
inputs
,
torch
.
ones
(
3
,
4
,
5
))
def
test_get_partition_cfg
():
try
:
_
=
task_processor
.
get_partition_cfg
(
partition_type
=
''
)
except
NotImplementedError
:
pass
tests/test_codebase/test_mmocr/test_text_recognition_models.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
mmengine
import
pytest
import
torch
import
mmdeploy.backend.onnxruntime
as
ort_apis
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Backend
,
Codebase
,
load_config
from
mmdeploy.utils.test
import
SwitchBackendWrapper
,
backend_checker
try
:
import_codebase
(
Codebase
.
MMOCR
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMOCR
}
is not installed.'
,
allow_module_level
=
True
)
IMAGE_SIZE
=
32
@
backend_checker
(
Backend
.
ONNXRUNTIME
)
class
TestEnd2EndModel
:
@
classmethod
def
setup_class
(
cls
):
# force add backend wrapper regardless of plugins
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
# simplify backend inference
cls
.
wrapper
=
SwitchBackendWrapper
(
ORTWrapper
)
cls
.
outputs
=
{
'output'
:
torch
.
rand
(
1
,
9
,
37
),
}
cls
.
wrapper
.
set
(
outputs
=
cls
.
outputs
)
deploy_cfg
=
mmengine
.
Config
(
{
'onnx_config'
:
{
'output_names'
:
[
'output'
]
}})
model_cfg_path
=
'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
from
mmdeploy.codebase.mmocr.deploy.text_recognition_model
import
\
End2EndModel
cls
.
end2end_model
=
End2EndModel
(
Backend
.
ONNXRUNTIME
,
[
''
],
device
=
'cpu'
,
deploy_cfg
=
deploy_cfg
,
model_cfg
=
model_cfg
)
@
classmethod
def
teardown_class
(
cls
):
cls
.
wrapper
.
recover
()
@
pytest
.
mark
.
parametrize
(
'ori_shape'
,
[[
IMAGE_SIZE
,
IMAGE_SIZE
,
3
],
[
2
*
IMAGE_SIZE
,
2
*
IMAGE_SIZE
,
3
]])
def
test_forward
(
self
,
ori_shape
):
imgs
=
[
torch
.
rand
(
1
,
3
,
IMAGE_SIZE
,
IMAGE_SIZE
)]
img_meta
=
{
'ori_shape'
:
ori_shape
,
'img_shape'
:
[
IMAGE_SIZE
,
IMAGE_SIZE
,
3
],
'scale_factor'
:
[
1.
,
1.
]
}
from
mmengine.structures
import
InstanceData
from
mmocr.structures
import
TextRecogDataSample
pred_instances
=
InstanceData
(
metainfo
=
img_meta
)
data_sample
=
TextRecogDataSample
(
pred_instances
=
pred_instances
)
data_sample
.
set_metainfo
(
img_meta
)
results
=
self
.
end2end_model
.
forward
(
imgs
,
[
data_sample
])
assert
results
is
not
None
,
'failed to get output using '
\
'End2EndModel'
@
backend_checker
(
Backend
.
ONNXRUNTIME
)
def
test_build_text_recognition_model
():
model_cfg_path
=
'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
'onnxruntime'
),
onnx_config
=
dict
(
output_names
=
[
'outputs'
]),
codebase_config
=
dict
(
type
=
'mmocr'
)))
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
# simplify backend inference
with
SwitchBackendWrapper
(
ORTWrapper
)
as
wrapper
:
wrapper
.
set
(
model_cfg
=
model_cfg
,
deploy_cfg
=
deploy_cfg
)
from
mmdeploy.codebase.mmocr.deploy.text_recognition_model
import
(
End2EndModel
,
build_text_recognition_model
)
segmentor
=
build_text_recognition_model
([
''
],
model_cfg
,
deploy_cfg
,
'cpu'
)
assert
isinstance
(
segmentor
,
End2EndModel
)
tests/test_codebase/test_mmpose/__init__.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
from
.utils
import
(
generate_datasample
,
generate_mmpose_deploy_config
,
generate_mmpose_task_processor
)
__all__
=
[
'generate_datasample'
,
'generate_mmpose_deploy_config'
,
'generate_mmpose_task_processor'
]
tests/test_codebase/test_mmpose/conftest.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
pytest
@
pytest
.
fixture
(
autouse
=
True
)
def
init_test
():
# init default scope
from
mmpose.utils
import
register_all_modules
register_all_modules
(
True
)
tests/test_codebase/test_mmpose/data/annotations/person_keypoints_val2017.json
0 → 100644
View file @
a17c53b8
{
"info"
:
{
"description"
:
"COCO 2017 Dataset"
,
"url"
:
"http://cocodataset.org"
,
"version"
:
"1.0"
,
"year"
:
2017
,
"contributor"
:
"COCO Consortium"
,
"date_created"
:
"2017/09/01"
},
"licenses"
:
[{
"url"
:
"http://creativecommons.org/licenses/by-nc-sa/2.0/"
,
"id"
:
1
,
"name"
:
"Attribution-NonCommercial-ShareAlike License"
},{
"url"
:
"http://creativecommons.org/licenses/by-nc/2.0/"
,
"id"
:
2
,
"name"
:
"Attribution-NonCommercial License"
},{
"url"
:
"http://creativecommons.org/licenses/by-nc-nd/2.0/"
,
"id"
:
3
,
"name"
:
"Attribution-NonCommercial-NoDerivs License"
},{
"url"
:
"http://creativecommons.org/licenses/by/2.0/"
,
"id"
:
4
,
"name"
:
"Attribution License"
},{
"url"
:
"http://creativecommons.org/licenses/by-sa/2.0/"
,
"id"
:
5
,
"name"
:
"Attribution-ShareAlike License"
},{
"url"
:
"http://creativecommons.org/licenses/by-nd/2.0/"
,
"id"
:
6
,
"name"
:
"Attribution-NoDerivs License"
},{
"url"
:
"http://flickr.com/commons/usage/"
,
"id"
:
7
,
"name"
:
"No known copyright restrictions"
},{
"url"
:
"http://www.usa.gov/copyright.shtml"
,
"id"
:
8
,
"name"
:
"United States Government Work"
}],
"images"
:
[{
"license"
:
4
,
"file_name"
:
"000000397133.jpg"
,
"coco_url"
:
"http://images.cocodataset.org/val2017/000000397133.jpg"
,
"height"
:
427
,
"width"
:
640
,
"date_captured"
:
"2013-11-14 17:02:52"
,
"flickr_url"
:
"http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg"
,
"id"
:
397133
}],
"annotations"
:
[{
"segmentation"
:
[[
125.12
,
539.69
,
140.94
,
522.43
,
100.67
,
496.54
,
84.85
,
469.21
,
73.35
,
450.52
,
104.99
,
342.65
,
168.27
,
290.88
,
179.78
,
288
,
189.84
,
286.56
,
191.28
,
260.67
,
202.79
,
240.54
,
221.48
,
237.66
,
248.81
,
243.42
,
257.44
,
256.36
,
253.12
,
262.11
,
253.12
,
275.06
,
299.15
,
233.35
,
329.35
,
207.46
,
355.24
,
206.02
,
363.87
,
206.02
,
365.3
,
210.34
,
373.93
,
221.84
,
363.87
,
226.16
,
363.87
,
237.66
,
350.92
,
237.66
,
332.22
,
234.79
,
314.97
,
249.17
,
271.82
,
313.89
,
253.12
,
326.83
,
227.24
,
352.72
,
214.29
,
357.03
,
212.85
,
372.85
,
208.54
,
395.87
,
228.67
,
414.56
,
245.93
,
421.75
,
266.07
,
424.63
,
276.13
,
437.57
,
266.07
,
450.52
,
284.76
,
464.9
,
286.2
,
479.28
,
291.96
,
489.35
,
310.65
,
512.36
,
284.76
,
549.75
,
244.49
,
522.43
,
215.73
,
546.88
,
199.91
,
558.38
,
204.22
,
565.57
,
189.84
,
568.45
,
184.09
,
575.64
,
172.58
,
578.52
,
145.26
,
567.01
,
117.93
,
551.19
,
133.75
,
532.49
]],
"num_keypoints"
:
10
,
"area"
:
47803.27955
,
"iscrowd"
:
0
,
"keypoints"
:
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
142
,
309
,
1
,
177
,
320
,
2
,
191
,
398
,
2
,
237
,
317
,
2
,
233
,
426
,
2
,
306
,
233
,
2
,
92
,
452
,
2
,
123
,
468
,
2
,
0
,
0
,
0
,
251
,
469
,
2
,
0
,
0
,
0
,
162
,
551
,
2
],
"image_id"
:
425226
,
"bbox"
:
[
73.35
,
206.02
,
300.58
,
372.5
],
"category_id"
:
1
,
"id"
:
183126
}],
"categories"
:
[{
"supercategory"
:
"person"
,
"id"
:
1
,
"name"
:
"person"
,
"keypoints"
:
[
"nose"
,
"left_eye"
,
"right_eye"
,
"left_ear"
,
"right_ear"
,
"left_shoulder"
,
"right_shoulder"
,
"left_elbow"
,
"right_elbow"
,
"left_wrist"
,
"right_wrist"
,
"left_hip"
,
"right_hip"
,
"left_knee"
,
"right_knee"
,
"left_ankle"
,
"right_ankle"
],
"skeleton"
:
[[
16
,
14
],[
14
,
12
],[
17
,
15
],[
15
,
13
],[
12
,
13
],[
6
,
12
],[
7
,
13
],[
6
,
7
],[
6
,
8
],[
7
,
9
],[
8
,
10
],[
9
,
11
],[
2
,
3
],[
1
,
2
],[
1
,
3
],[
2
,
4
],[
3
,
5
],[
4
,
6
],[
5
,
7
]]}]}
tests/test_codebase/test_mmpose/data/model.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
# model settings
codec
=
dict
(
type
=
'MSRAHeatmap'
,
input_size
=
(
192
,
256
),
heatmap_size
=
(
48
,
64
),
sigma
=
2
)
test_cfg
=
dict
(
flip_test
=
False
,
flip_mode
=
'heatmap'
,
shift_heatmap
=
True
,
)
model
=
dict
(
type
=
'TopdownPoseEstimator'
,
data_preprocessor
=
dict
(
type
=
'PoseDataPreprocessor'
,
mean
=
[
123.675
,
116.28
,
103.53
],
std
=
[
58.395
,
57.12
,
57.375
],
bgr_to_rgb
=
True
),
backbone
=
dict
(
type
=
'ResNet'
,
depth
=
18
),
head
=
dict
(
type
=
'HeatmapHead'
,
in_channels
=
512
,
out_channels
=
17
,
deconv_out_channels
=
None
,
loss
=
dict
(
type
=
'KeypointMSELoss'
,
use_target_weight
=
True
),
decoder
=
codec
),
test_cfg
=
test_cfg
)
# dataset settings
dataset_type
=
'CocoDataset'
data_mode
=
'topdown'
data_root
=
'tests/test_codebase/test_mmpose/data/'
file_client_args
=
dict
(
backend
=
'disk'
)
test_pipeline
=
[
dict
(
type
=
'LoadImage'
,
file_client_args
=
file_client_args
),
dict
(
type
=
'GetBBoxCenterScale'
),
dict
(
type
=
'TopdownAffine'
,
input_size
=
codec
[
'input_size'
]),
dict
(
type
=
'PackPoseInputs'
)
]
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
1
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/person_keypoints_val2017.json'
,
data_prefix
=
dict
(
img
=
'val2017/'
),
test_mode
=
True
,
lazy_init
=
True
,
serialize_data
=
False
,
pipeline
=
test_pipeline
,
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/person_keypoints_val2017.json'
)
test_evaluator
=
val_evaluator
# default_runtime
default_scope
=
'mmpose'
default_hooks
=
dict
()
vis_backends
=
[
dict
(
type
=
'LocalVisBackend'
)]
visualizer
=
dict
(
type
=
'PoseLocalVisualizer'
,
vis_backends
=
vis_backends
,
name
=
'visualizer'
)
tests/test_codebase/test_mmpose/test_mmpose_models.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
mmengine
import
pytest
import
torch
from
mmengine.config
import
ConfigDict
from
mmengine.structures
import
InstanceData
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Backend
,
Codebase
from
mmdeploy.utils.test
import
WrapModel
,
check_backend
,
get_rewrite_outputs
try
:
from
torch.testing
import
assert_close
as
torch_assert_close
except
Exception
:
from
torch.testing
import
assert_allclose
as
torch_assert_close
try
:
import_codebase
(
Codebase
.
MMPOSE
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMPOSE
}
is not installed.'
,
allow_module_level
=
True
)
from
.utils
import
generate_mmpose_deploy_config
# noqa: E402
from
.utils
import
generate_mmpose_task_processor
# noqa: E402
def
get_heatmap_head
():
from
mmpose.models.heads
import
HeatmapHead
model
=
HeatmapHead
(
2
,
4
,
deconv_out_channels
=
(
16
,
16
,
16
),
loss
=
dict
(
type
=
'KeypointMSELoss'
,
use_target_weight
=
False
))
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
ONNXRUNTIME
])
def
test_heatmaphead_forward
(
backend_type
:
Backend
):
check_backend
(
backend_type
,
True
)
model
=
get_heatmap_head
()
model
.
cpu
().
eval
()
deploy_cfg
=
generate_mmpose_deploy_config
(
backend_type
.
value
)
feats
=
[
torch
.
rand
(
1
,
2
,
32
,
48
)]
wrapped_model
=
WrapModel
(
model
,
'forward'
)
rewrite_inputs
=
{
'feats'
:
feats
}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
assert
isinstance
(
rewrite_outputs
,
torch
.
Tensor
)
def
get_msmu_head
():
from
mmpose.models.heads
import
MSPNHead
model
=
MSPNHead
(
num_stages
=
1
,
num_units
=
1
,
out_shape
=
(
32
,
48
),
unit_channels
=
16
,
level_indices
=
[
1
])
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
ONNXRUNTIME
])
def
test_msmuhead_forward
(
backend_type
:
Backend
):
check_backend
(
backend_type
,
True
)
model
=
get_msmu_head
()
model
.
cpu
().
eval
()
deploy_cfg
=
generate_mmpose_deploy_config
(
backend_type
.
value
)
feats
=
[[
torch
.
rand
(
1
,
16
,
32
,
48
)]]
wrapped_model
=
WrapModel
(
model
,
'forward'
)
rewrite_inputs
=
{
'feats'
:
feats
}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
assert
isinstance
(
rewrite_outputs
,
torch
.
Tensor
)
def
get_cross_resolution_weighting_model
():
from
mmpose.models.backbones.litehrnet
import
CrossResolutionWeighting
class
DummyModel
(
torch
.
nn
.
Module
):
def
__init__
(
self
):
super
().
__init__
()
self
.
model
=
CrossResolutionWeighting
([
16
,
16
],
ratio
=
8
)
def
forward
(
self
,
x
):
assert
isinstance
(
x
,
torch
.
Tensor
)
return
self
.
model
([
x
,
x
])
model
=
DummyModel
()
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
ONNXRUNTIME
])
def
test_estimator_forward
(
backend_type
:
Backend
):
check_backend
(
backend_type
,
True
)
deploy_cfg
=
generate_mmpose_deploy_config
(
backend_type
.
value
)
task_processor
=
generate_mmpose_task_processor
(
deploy_cfg
=
deploy_cfg
)
model
=
task_processor
.
build_pytorch_model
()
model
.
requires_grad_
(
False
)
model
.
cpu
().
eval
()
wrapped_model
=
WrapModel
(
model
,
'forward'
,
data_samples
=
None
)
rewrite_inputs
=
{
'inputs'
:
torch
.
rand
(
1
,
3
,
256
,
192
)}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
run_with_backend
=
False
,
deploy_cfg
=
deploy_cfg
)
assert
isinstance
(
rewrite_outputs
,
torch
.
Tensor
)
def
get_scale_norm_model
():
from
mmpose.models.utils.rtmcc_block
import
ScaleNorm
model
=
ScaleNorm
(
48
)
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
NCNN
])
def
test_scale_norm_forward
(
backend_type
:
Backend
):
check_backend
(
backend_type
,
True
)
deploy_cfg
=
generate_mmpose_deploy_config
(
backend_type
.
value
)
model
=
get_scale_norm_model
()
x
=
torch
.
rand
(
1
,
17
,
48
)
wrapped_model
=
WrapModel
(
model
,
'forward'
)
model_outputs
=
model
.
forward
(
x
)
rewrite_inputs
=
{
'x'
:
x
}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
torch_assert_close
(
rewrite_outputs
,
model_outputs
)
def
get_rtmcc_block_model
():
from
mmpose.models.utils.rtmcc_block
import
RTMCCBlock
model
=
RTMCCBlock
(
48
,
48
,
48
)
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
NCNN
])
def
test_rtmcc_block_forward
(
backend_type
:
Backend
):
check_backend
(
backend_type
,
True
)
deploy_cfg
=
generate_mmpose_deploy_config
(
backend_type
.
value
)
model
=
get_rtmcc_block_model
()
inputs
=
torch
.
rand
(
1
,
17
,
48
)
wrapped_model
=
WrapModel
(
model
,
'_forward'
)
model_outputs
=
model
.
_forward
(
inputs
)
rewrite_inputs
=
{
'inputs'
:
inputs
}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
torch_assert_close
(
rewrite_outputs
,
model_outputs
)
def
get_scale_model
():
from
mmpose.models.utils.rtmcc_block
import
Scale
model
=
Scale
(
48
)
model
.
requires_grad_
(
False
)
return
model
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
NCNN
])
def
test_scale_forward
(
backend_type
:
Backend
):
check_backend
(
backend_type
,
True
)
deploy_cfg
=
generate_mmpose_deploy_config
(
backend_type
.
value
)
model
=
get_scale_model
()
x
=
torch
.
rand
(
1
,
17
,
48
)
wrapped_model
=
WrapModel
(
model
,
'forward'
)
model_outputs
=
model
.
forward
(
x
)
rewrite_inputs
=
{
'x'
:
x
}
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
deploy_cfg
=
deploy_cfg
,
run_with_backend
=
False
)
torch_assert_close
(
rewrite_outputs
,
model_outputs
)
@
pytest
.
mark
.
parametrize
(
'backend_type'
,
[
Backend
.
ONNXRUNTIME
])
def
test_yolox_pose_head
(
backend_type
:
Backend
):
try
:
from
mmyolo.utils.setup_env
import
register_all_modules
from
models.yolox_pose_head
import
YOLOXPoseHead
# noqa: F401,F403
register_all_modules
(
True
)
except
ImportError
:
pytest
.
skip
(
'mmpose/projects/yolox-pose is not installed.'
,
allow_module_level
=
True
)
deploy_cfg
=
mmengine
.
Config
.
fromfile
(
'configs/mmpose/pose-detection_yolox-pose_onnxruntime_dynamic.py'
)
check_backend
(
backend_type
,
True
)
head
=
YOLOXPoseHead
(
head_module
=
dict
(
type
=
'YOLOXPoseHeadModule'
,
num_classes
=
1
,
in_channels
=
256
,
feat_channels
=
256
,
widen_factor
=
0.5
,
stacked_convs
=
2
,
num_keypoints
=
17
,
featmap_strides
=
(
8
,
16
,
32
),
use_depthwise
=
False
,
norm_cfg
=
dict
(
type
=
'BN'
,
momentum
=
0.03
,
eps
=
0.001
),
act_cfg
=
dict
(
type
=
'SiLU'
,
inplace
=
True
),
),
loss_cls
=
dict
(
type
=
'mmdet.CrossEntropyLoss'
,
use_sigmoid
=
True
,
reduction
=
'sum'
,
loss_weight
=
1.0
),
loss_bbox
=
dict
(
type
=
'mmdet.IoULoss'
,
mode
=
'square'
,
eps
=
1e-16
,
reduction
=
'sum'
,
loss_weight
=
5.0
),
loss_obj
=
dict
(
type
=
'mmdet.CrossEntropyLoss'
,
use_sigmoid
=
True
,
reduction
=
'sum'
,
loss_weight
=
1.0
),
loss_pose
=
dict
(
type
=
'OksLoss'
,
metainfo
=
'configs/_base_/datasets/coco.py'
,
loss_weight
=
30.0
),
loss_bbox_aux
=
dict
(
type
=
'mmdet.L1Loss'
,
reduction
=
'sum'
,
loss_weight
=
1.0
),
train_cfg
=
ConfigDict
(
assigner
=
dict
(
type
=
'PoseSimOTAAssigner'
,
center_radius
=
2.5
,
iou_calculator
=
dict
(
type
=
'mmdet.BboxOverlaps2D'
),
oks_calculator
=
dict
(
type
=
'OksLoss'
,
metainfo
=
'configs/_base_/datasets/coco.py'
))),
test_cfg
=
ConfigDict
(
yolox_style
=
True
,
multi_label
=
False
,
score_thr
=
0.001
,
max_per_img
=
300
,
nms
=
dict
(
type
=
'nms'
,
iou_threshold
=
0.65
)))
class
TestYOLOXPoseHeadModel
(
torch
.
nn
.
Module
):
def
__init__
(
self
,
yolox_pose_head
):
super
(
TestYOLOXPoseHeadModel
,
self
).
__init__
()
self
.
yolox_pose_head
=
yolox_pose_head
def
forward
(
self
,
x1
,
x2
,
x3
):
inputs
=
[
x1
,
x2
,
x3
]
data_sample
=
InstanceData
()
data_sample
.
set_metainfo
(
dict
(
ori_shape
=
(
640
,
640
),
scale_factor
=
(
1.0
,
1.0
)))
return
self
.
yolox_pose_head
.
predict
(
inputs
,
batch_data_samples
=
[
data_sample
])
model
=
TestYOLOXPoseHeadModel
(
head
)
model
.
cpu
().
eval
()
model_inputs
=
[
torch
.
randn
(
1
,
128
,
8
,
8
),
torch
.
randn
(
1
,
128
,
4
,
4
),
torch
.
randn
(
1
,
128
,
2
,
2
)
]
with
torch
.
no_grad
():
pytorch_output
=
model
(
*
model_inputs
)[
0
]
pred_bboxes
=
torch
.
from_numpy
(
pytorch_output
.
bboxes
).
unsqueeze
(
0
)
pred_bboxes_scores
=
torch
.
from_numpy
(
pytorch_output
.
scores
).
reshape
(
1
,
-
1
,
1
)
pred_kpts
=
torch
.
from_numpy
(
pytorch_output
.
keypoints
).
unsqueeze
(
0
)
pred_kpts_scores
=
torch
.
from_numpy
(
pytorch_output
.
keypoint_scores
).
unsqueeze
(
0
).
unsqueeze
(
-
1
)
pytorch_output
=
[
torch
.
cat
([
pred_bboxes
,
pred_bboxes_scores
],
dim
=-
1
),
torch
.
cat
([
pred_kpts
,
pred_kpts_scores
],
dim
=-
1
)
]
wrapped_model
=
WrapModel
(
model
,
'forward'
)
rewrite_inputs
=
{
'x1'
:
model_inputs
[
0
],
'x2'
:
model_inputs
[
1
],
'x3'
:
model_inputs
[
2
]
}
deploy_cfg
.
onnx_config
.
input_names
=
[
'x1'
,
'x2'
,
'x3'
]
rewrite_outputs
,
_
=
get_rewrite_outputs
(
wrapped_model
=
wrapped_model
,
model_inputs
=
rewrite_inputs
,
run_with_backend
=
True
,
deploy_cfg
=
deploy_cfg
)
# keep bbox coord >= 0
rewrite_outputs
[
0
]
=
rewrite_outputs
[
0
].
clamp
(
min
=
0
)
torch_assert_close
(
rewrite_outputs
,
pytorch_output
)
tests/test_codebase/test_mmpose/test_pose_detection.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
from
tempfile
import
NamedTemporaryFile
,
TemporaryDirectory
import
numpy
as
np
import
pytest
import
torch
import
mmdeploy.backend.onnxruntime
as
ort_apis
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Codebase
,
load_config
from
mmdeploy.utils.test
import
SwitchBackendWrapper
try
:
import_codebase
(
Codebase
.
MMPOSE
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMPOSE
.
value
}
is not installed.'
,
allow_module_level
=
True
)
from
.utils
import
(
generate_datasample
,
generate_mmpose_deploy_config
,
generate_mmpose_task_processor
)
model_cfg_path
=
'tests/test_codebase/test_mmpose/data/model.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
generate_mmpose_deploy_config
()
onnx_file
=
NamedTemporaryFile
(
suffix
=
'.onnx'
).
name
task_processor
=
generate_mmpose_task_processor
()
img_shape
=
(
192
,
256
)
heatmap_shape
=
(
48
,
64
)
# mmpose.apis.inference.LoadImage uses opencv, needs float32 in
# cv2.cvtColor.
img
=
np
.
random
.
rand
(
*
img_shape
,
3
).
astype
(
np
.
float32
)
img_path
=
'tests/data/tiger.jpeg'
num_output_channels
=
17
@
pytest
.
mark
.
parametrize
(
'imgs'
,
[
img
,
img_path
])
def
test_create_input
(
imgs
):
inputs
=
task_processor
.
create_input
(
imgs
,
input_shape
=
img_shape
)
assert
isinstance
(
inputs
,
tuple
)
and
len
(
inputs
)
==
2
def
test_build_pytorch_model
():
from
mmpose.models.pose_estimators.base
import
BasePoseEstimator
model
=
task_processor
.
build_pytorch_model
(
None
)
assert
isinstance
(
model
,
BasePoseEstimator
)
@
pytest
.
fixture
def
backend_model
():
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
wrapper
=
SwitchBackendWrapper
(
ORTWrapper
)
wrapper
.
set
(
outputs
=
{
'output'
:
torch
.
rand
(
1
,
num_output_channels
,
*
heatmap_shape
),
})
yield
task_processor
.
build_backend_model
([
''
])
wrapper
.
recover
()
def
test_build_backend_model
(
backend_model
):
assert
isinstance
(
backend_model
,
torch
.
nn
.
Module
)
def
test_visualize
():
datasample
=
generate_datasample
(
img
.
shape
[:
2
])
output_file
=
NamedTemporaryFile
(
suffix
=
'.jpg'
).
name
task_processor
.
visualize
(
img
,
datasample
,
output_file
,
show_result
=
False
,
window_name
=
'test'
)
def
test_get_tensor_from_input
():
data
=
torch
.
ones
(
3
,
4
,
5
)
input_data
=
{
'inputs'
:
data
}
inputs
=
task_processor
.
get_tensor_from_input
(
input_data
)
assert
torch
.
equal
(
inputs
,
data
)
def
test_get_partition_cfg
():
try
:
_
=
task_processor
.
get_partition_cfg
(
partition_type
=
''
)
except
NotImplementedError
:
pass
def
test_get_model_name
():
model_name
=
task_processor
.
get_model_name
()
assert
isinstance
(
model_name
,
str
)
and
model_name
is
not
None
def
test_build_dataset_and_dataloader
():
from
torch.utils.data
import
DataLoader
,
Dataset
val_dataloader
=
model_cfg
[
'val_dataloader'
]
dataset
=
task_processor
.
build_dataset
(
dataset_cfg
=
val_dataloader
[
'dataset'
])
assert
isinstance
(
dataset
,
Dataset
),
'Failed to build dataset'
dataloader
=
task_processor
.
build_dataloader
(
val_dataloader
)
assert
isinstance
(
dataloader
,
DataLoader
),
'Failed to build dataloader'
def
test_build_test_runner
(
backend_model
):
from
mmdeploy.codebase.base.runner
import
DeployTestRunner
temp_dir
=
TemporaryDirectory
().
name
runner
=
task_processor
.
build_test_runner
(
backend_model
,
temp_dir
)
assert
isinstance
(
runner
,
DeployTestRunner
)
def
test_get_preprocess
():
process
=
task_processor
.
get_preprocess
()
assert
process
is
not
None
def
test_get_postprocess
():
process
=
task_processor
.
get_postprocess
()
assert
isinstance
(
process
,
dict
)
tests/test_codebase/test_mmpose/test_pose_detection_model.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
pytest
import
torch
import
mmdeploy.backend.onnxruntime
as
ort_apis
from
mmdeploy.codebase
import
import_codebase
from
mmdeploy.utils
import
Backend
,
Codebase
,
load_config
from
mmdeploy.utils.test
import
SwitchBackendWrapper
,
backend_checker
IMAGE_H
=
192
IMAGE_W
=
256
try
:
import_codebase
(
Codebase
.
MMPOSE
)
except
ImportError
:
pytest
.
skip
(
f
'
{
Codebase
.
MMPOSE
}
is not installed.'
,
allow_module_level
=
True
)
from
.utils
import
generate_datasample
# noqa: E402
from
.utils
import
generate_mmpose_deploy_config
# noqa: E402
@
backend_checker
(
Backend
.
ONNXRUNTIME
)
class
TestEnd2EndModel
:
@
classmethod
def
setup_class
(
cls
):
# force add backend wrapper regardless of plugins
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
# simplify backend inference
cls
.
wrapper
=
SwitchBackendWrapper
(
ORTWrapper
)
cls
.
outputs
=
{
'output'
:
torch
.
rand
(
1
,
1
,
IMAGE_H
,
IMAGE_W
),
}
cls
.
wrapper
.
set
(
outputs
=
cls
.
outputs
)
from
mmdeploy.codebase.mmpose.deploy.pose_detection_model
import
\
End2EndModel
model_cfg_path
=
'tests/test_codebase/test_mmpose/data/model.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
generate_mmpose_deploy_config
()
cls
.
end2end_model
=
End2EndModel
(
Backend
.
ONNXRUNTIME
,
[
''
],
device
=
'cpu'
,
deploy_cfg
=
deploy_cfg
,
model_cfg
=
model_cfg
)
@
classmethod
def
teardown_class
(
cls
):
cls
.
wrapper
.
recover
()
def
test_forward
(
self
):
img
=
torch
.
rand
(
1
,
3
,
IMAGE_H
,
IMAGE_W
)
data_samples
=
[
generate_datasample
((
IMAGE_H
,
IMAGE_W
))]
results
=
self
.
end2end_model
.
forward
(
img
,
data_samples
)
assert
results
is
not
None
,
'failed to get output using '
\
'End2EndModel'
@
backend_checker
(
Backend
.
ONNXRUNTIME
)
def
test_build_pose_detection_model
():
model_cfg_path
=
'tests/test_codebase/test_mmpose/data/model.py'
model_cfg
=
load_config
(
model_cfg_path
)[
0
]
deploy_cfg
=
generate_mmpose_deploy_config
()
from
mmdeploy.backend.onnxruntime
import
ORTWrapper
ort_apis
.
__dict__
.
update
({
'ORTWrapper'
:
ORTWrapper
})
# simplify backend inference
with
SwitchBackendWrapper
(
ORTWrapper
)
as
wrapper
:
wrapper
.
set
(
model_cfg
=
model_cfg
,
deploy_cfg
=
deploy_cfg
)
from
mmdeploy.codebase.mmpose.deploy.pose_detection_model
import
(
End2EndModel
,
build_pose_detection_model
)
posedetector
=
build_pose_detection_model
([
''
],
model_cfg
,
deploy_cfg
,
'cpu'
)
assert
isinstance
(
posedetector
,
End2EndModel
)
tests/test_codebase/test_mmpose/utils.py
0 → 100644
View file @
a17c53b8
# Copyright (c) OpenMMLab. All rights reserved.
import
mmengine
import
numpy
import
torch
from
mmengine.structures
import
InstanceData
,
PixelData
from
mmdeploy.apis
import
build_task_processor
from
mmdeploy.utils
import
IR
,
Backend
,
Codebase
,
Task
,
load_config
def
generate_datasample
(
img_size
,
heatmap_size
=
(
64
,
48
)):
from
mmpose.structures
import
PoseDataSample
h
,
w
=
img_size
[:
2
]
metainfo
=
dict
(
img_shape
=
(
h
,
w
,
3
),
crop_size
=
(
h
,
w
),
input_size
=
(
h
,
w
),
input_center
=
numpy
.
asarray
((
h
/
2
,
w
/
2
)),
input_scale
=
numpy
.
asarray
((
h
,
w
)),
heatmap_size
=
heatmap_size
)
pred_instances
=
InstanceData
()
pred_instances
.
bboxes
=
numpy
.
array
([[
0.0
,
0.0
,
1.0
,
1.0
]])
pred_instances
.
bbox_scales
=
torch
.
ones
(
1
,
2
).
numpy
()
pred_instances
.
bbox_scores
=
torch
.
ones
(
1
).
numpy
()
pred_instances
.
bbox_centers
=
torch
.
ones
(
1
,
2
).
numpy
()
pred_instances
.
keypoints
=
torch
.
rand
((
1
,
17
,
2
))
pred_instances
.
keypoints_visible
=
torch
.
rand
((
1
,
17
,
1
))
gt_fields
=
PixelData
()
gt_fields
.
heatmaps
=
torch
.
rand
((
17
,
64
,
48
))
data_sample
=
PoseDataSample
(
metainfo
=
metainfo
)
data_sample
.
pred_instances
=
pred_instances
data_sample
.
gt_instances
=
pred_instances
data_sample
.
gt_fields
=
gt_fields
return
data_sample
def
generate_mmpose_deploy_config
(
backend
=
Backend
.
ONNXRUNTIME
.
value
,
cfg_options
=
None
):
deploy_cfg
=
mmengine
.
Config
(
dict
(
backend_config
=
dict
(
type
=
backend
),
codebase_config
=
dict
(
type
=
Codebase
.
MMPOSE
.
value
,
task
=
Task
.
POSE_DETECTION
.
value
),
onnx_config
=
dict
(
type
=
IR
.
ONNX
.
value
,
export_params
=
True
,
keep_initializers_as_inputs
=
False
,
opset_version
=
11
,
input_shape
=
None
,
input_names
=
[
'input'
],
output_names
=
[
'output'
])))
if
cfg_options
is
not
None
:
deploy_cfg
.
update
(
cfg_options
)
return
deploy_cfg
def
generate_mmpose_task_processor
(
model_cfg
=
None
,
deploy_cfg
=
None
):
if
model_cfg
is
None
:
model_cfg
=
'tests/test_codebase/test_mmpose/data/model.py'
if
deploy_cfg
is
None
:
deploy_cfg
=
generate_mmpose_deploy_config
()
model_cfg
,
deploy_cfg
=
load_config
(
model_cfg
,
deploy_cfg
)
task_processor
=
build_task_processor
(
model_cfg
,
deploy_cfg
,
'cpu'
)
return
task_processor
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment