Commit 68661967 authored by limm's avatar limm
Browse files

add config module

parent 4353fa59
Pipeline #2808 canceled with stages
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/onnxruntime.py'
]
onnx_config = dict(input_shape=None)
_base_ = ['./text-recognition_dynamic.py', '../../_base_/backends/pplnn.py']
onnx_config = dict(input_shape=None)
backend_config = dict(model_inputs=dict(opt_shape=[1, 1, 32, 32]))
_base_ = ['./text-recognition_dynamic.py', '../../_base_/backends/sdk.py']
codebase_config = dict(model_type='sdk')
backend_config = dict(pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadOCRAnnotations', with_text=True),
dict(
type='PackTextRecogInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio'))
])
_base_ = ['../../_base_/onnx_config.py']
codebase_config = dict(type='mmocr', task='TextRecognition')
# 1 channel input for CRNN models
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt-fp16.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 64],
max_shape=[1, 1, 32, 640])))
])
# 3 channel and 32 height input for SATRN models
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt-fp16.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 32, 64],
max_shape=[1, 3, 32, 640])))
])
# 3 channel and 48 height for SAR models
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt-fp16.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 48, 64],
opt_shape=[1, 3, 48, 64],
max_shape=[1, 3, 48, 640])))
])
# 1 channel input for CRNN models
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt-fp16.py'
]
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 32],
max_shape=[1, 1, 32, 32])))
])
# ABINet models use static input 32x128
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt-fp16.py'
]
onnx_config = dict(input_shape=[128, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 128],
opt_shape=[1, 3, 32, 128],
max_shape=[1, 3, 32, 128])))
])
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt-fp16.py'
]
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 32, 32],
max_shape=[1, 3, 32, 32])))
])
# 1 channel input for CRNN models
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt-int8.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 64],
max_shape=[1, 1, 32, 640])))
])
# 3 channel and 32 height input for SATRN models
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt-int8.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 32, 64],
max_shape=[1, 3, 32, 640])))
])
# 3 channel and 48 height for SAR models
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt-int8.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 48, 64],
opt_shape=[1, 3, 48, 64],
max_shape=[1, 3, 48, 640])))
])
# 1 channel input for CRNN models
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt-int8.py'
]
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 32],
max_shape=[1, 1, 32, 32])))
])
# ABINet models use static input 32x128
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt-int8.py'
]
onnx_config = dict(input_shape=[128, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 128],
opt_shape=[1, 3, 32, 128],
max_shape=[1, 3, 32, 128])))
])
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt-int8.py'
]
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 32, 32],
max_shape=[1, 3, 32, 32])))
])
# 1 channel input for CRNN models
_base_ = ['./text-recognition_dynamic.py', '../../_base_/backends/tensorrt.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 64],
max_shape=[1, 1, 32, 640])))
])
# 3 channel and 32 height input for SATRN models
_base_ = ['./text-recognition_dynamic.py', '../../_base_/backends/tensorrt.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 32, 64],
max_shape=[1, 3, 32, 640])))
])
# 3 channel and 48 height for SAR models
_base_ = ['./text-recognition_dynamic.py', '../../_base_/backends/tensorrt.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 48, 64],
opt_shape=[1, 3, 48, 64],
max_shape=[1, 3, 48, 640])))
])
# 1 channel input for CRNN models
_base_ = ['./text-recognition_static.py', '../../_base_/backends/tensorrt.py']
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 32],
max_shape=[1, 1, 32, 32])))
])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment