Commit 68661967 authored by limm's avatar limm
Browse files

add config module

parent 4353fa59
Pipeline #2808 canceled with stages
_base_ = ['./classification_dynamic.py', '../_base_/backends/tensorrt-int8.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 224, 224],
opt_shape=[4, 3, 224, 224],
max_shape=[8, 3, 224, 224])))
])
_base_ = ['./classification_static.py', '../_base_/backends/tensorrt-int8.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 224, 224],
opt_shape=[1, 3, 224, 224],
max_shape=[1, 3, 224, 224])))
])
_base_ = ['./classification_dynamic.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 224, 224],
opt_shape=[4, 3, 224, 224],
max_shape=[8, 3, 224, 224])))
])
_base_ = ['./classification_static.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 224, 224],
opt_shape=[1, 3, 224, 224],
max_shape=[1, 3, 224, 224])))
])
_base_ = [
'../_base_/torchscript_config.py', '../_base_/backends/torchscript.py'
]
ir_config = dict(input_shape=None)
codebase_config = dict(type='mmpretrain', task='Classification')
_base_ = ['./classification_static.py', '../_base_/backends/tvm.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(model_inputs=[
dict(
shape=dict(input=[1, 3, 224, 224]),
dtype=dict(input='float32'),
tuner=dict(
type='AutoScheduleTuner',
log_file='tvm_tune_log.log',
num_measure_trials=2000))
])
_base_ = ['./classification_tvm-autotvm_static-224x224.py']
calib_config = dict(create_calib=True, calib_file='calib_data.h5')
backend_config = dict(model_inputs=[
dict(
shape=dict(input=[1, 3, 224, 224]),
dtype=dict(input='float32'),
tuner=dict(
type='AutoTVMTuner',
log_file='tvm_tune_log.log',
n_trial=1000,
tuner=dict(type='XGBTuner'),
),
qconfig=dict(calibrate_mode='kl_divergence', weight_scale='max'),
)
])
_base_ = ['./classification_static.py', '../_base_/backends/tvm.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(model_inputs=[
dict(
shape=dict(input=[1, 3, 224, 224]),
dtype=dict(input='float32'),
tuner=dict(
type='AutoTVMTuner',
log_file='tvm_tune_log.log',
n_trial=1000,
tuner=dict(type='XGBTuner'),
))
])
_base_ = ['./classification_static.py', '../_base_/backends/vacc.py']
backend_config = dict(
common_config=dict(
vdsp_params_info=dict(
vdsp_op_type=300,
iimage_format=5000,
iimage_width=256,
iimage_height=256,
iimage_width_pitch=256,
iimage_height_pitch=256,
short_edge_threshold=256,
resize_type=1,
color_cvt_code=2,
color_space=0,
crop_size=224,
meanr=22459,
meang=22340,
meanb=22136,
stdr=21325,
stdg=21284,
stdb=21292,
norma_type=3)),
model_inputs=[
dict(shape=dict(input=[1, 3, 224, 224]), qconfig=dict(dtype='fp16'))
])
_base_ = ['./classification_static.py', '../_base_/backends/vacc.py']
backend_config = dict(model_inputs=[
dict(shape=dict(input=[1, 3, 224, 224]), qconfig=dict(dtype='int8'))
])
_base_ = [
'./rotated-detection_static.py', '../_base_/backends/onnxruntime-fp16.py'
]
onnx_config = dict(
output_names=['dets', 'labels'],
input_shape=[1024, 1024],
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
})
backend_config = dict(
common_config=dict(op_block_list=['NMSRotated', 'Resize']))
_base_ = ['./rotated-detection_onnxruntime_static.py']
onnx_config = dict(
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}, )
_base_ = ['./rotated-detection_static.py', '../_base_/backends/onnxruntime.py']
onnx_config = dict(output_names=['dets', 'labels'], input_shape=[1024, 1024])
_base_ = ['./rotated-detection_static.py', '../_base_/backends/sdk.py']
codebase_config = dict(model_type='sdk')
backend_config = dict(pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Collect', keys=['img'], meta_keys=['filename', 'ori_shape'])
])
_base_ = ['../_base_/onnx_config.py']
codebase_config = dict(
type='mmrotate',
task='RotatedDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.1,
pre_top_k=3000,
keep_top_k=2000,
max_output_boxes_per_class=2000))
_base_ = ['./rotated-detection_tensorrt_dynamic-320x320-1024x1024.py']
backend_config = dict(common_config=dict(fp16_mode=True))
_base_ = [
'./rotated-detection_static.py', '../_base_/backends/tensorrt-fp16.py'
]
onnx_config = dict(output_names=['dets', 'labels'], input_shape=(1024, 1024))
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 1024, 1024],
opt_shape=[1, 3, 1024, 1024],
max_shape=[1, 3, 1024, 1024])))
])
_base_ = ['./rotated-detection_static.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(
output_names=['dets', 'labels'],
input_shape=None,
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
},
)
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 1024, 1024],
max_shape=[1, 3, 1024, 1024])))
])
_base_ = ['./segmentation_static.py', '../_base_/backends/ascend.py']
onnx_config = dict(input_shape=[2048, 1024])
backend_config = dict(
model_inputs=[dict(input_shapes=dict(input=[1, 3, 1024, 2048]))])
_base_ = ['./segmentation_static.py', '../_base_/backends/ascend.py']
onnx_config = dict(input_shape=[1024, 512])
backend_config = dict(
model_inputs=[dict(input_shapes=dict(input=[1, 3, 512, 1024]))])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment