Commit 68661967 authored by limm's avatar limm
Browse files

add config module

parent 4353fa59
Pipeline #2808 canceled with stages
backend_config = dict(type='ncnn', precision='INT8', use_vulkan=False)
backend_config = dict(type='ncnn', precision='FP32', use_vulkan=False)
backend_config = dict(
type='onnxruntime',
precision='fp16',
common_config=dict(
min_positive_val=1e-7,
max_finite_val=1e4,
keep_io_types=False,
disable_shape_infer=False,
op_block_list=None,
node_block_list=None))
backend_config = dict(type='onnxruntime')
backend_config = dict(type='openvino')
backend_config = dict(type='pplnn')
backend_config = dict(
type='rknn',
common_config=dict(
target_platform='rv1126', # 'rk3588'
optimization_level=1),
quantization_config=dict(
do_quantization=True,
dataset=None,
pre_compile=False,
rknn_batch_size=-1))
backend_config = dict(type='sdk')
backend_config = dict(type='snpe')
backend_config = dict(
type='tensorrt', common_config=dict(fp16_mode=True, max_workspace_size=0))
_base_ = ['./tensorrt.py']
backend_config = dict(common_config=dict(fp16_mode=True, int8_mode=True))
calib_config = dict(create_calib=True, calib_file='calib_data.h5')
backend_config = dict(
type='tensorrt', common_config=dict(fp16_mode=False, max_workspace_size=0))
backend_config = dict(type='torchscript')
backend_config = dict(type='tvm')
backend_config = dict(
type='vacc',
common_config=dict(name='end2end'),
model_inputs=[
dict(
shape=dict(input=[1, 3, 224, 224]),
qconfig=dict(
dtype='fp16',
calibrate_mode='percentile',
weight_scale='max',
data_transmode=1,
per_channel=False,
cluster_mode=0,
skip_conv_layers=[],
calib_num=1000,
))
])
onnx_config = dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
save_file='end2end.onnx',
input_names=['input'],
output_names=['output'],
input_shape=None,
optimize=True)
ir_config = dict(
type='torchscript',
save_file='end2end.pt',
input_names=['input'],
output_names=['output'],
input_shape=None)
_base_ = ['./video-recognition_static.py']
onnx_config = dict(
dynamic_axes={
'input': {
0: 'batch',
1: 'num_crops * num_segs',
3: 'height',
4: 'width'
},
'output': {
0: 'batch',
}
},
input_shape=None)
_base_ = ['./video-recognition_static.py', '../../_base_/backends/tensorrt.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 250, 3, 224, 224],
opt_shape=[1, 250, 3, 224, 224],
max_shape=[1, 250, 3, 224, 224])))
])
_base_ = ['./video-recognition_static.py']
onnx_config = dict(
dynamic_axes={
'input': {
0: 'batch',
1: 'num_crops * num_segs',
3: 'time',
4: 'height',
5: 'width'
},
'output': {
0: 'batch',
}
},
input_shape=None)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment