Commit 68661967 authored by limm's avatar limm
Browse files

add config module

parent 4353fa59
Pipeline #2808 canceled with stages
_base_ = ['./voxel-detection_static.py']
onnx_config = dict(
dynamic_axes={
'voxels': {
0: 'voxels_num',
},
'num_points': {
0: 'voxels_num',
},
'coors': {
0: 'voxels_num',
}
},
input_shape=None)
_base_ = [
'./voxel-detection_dynamic.py', '../../_base_/backends/onnxruntime-fp16.py'
]
_base_ = [
'./voxel-detection_dynamic.py', '../../_base_/backends/onnxruntime.py'
]
_base_ = ['./voxel-detection_dynamic.py', '../../_base_/backends/openvino.py']
onnx_config = dict(input_shape=None)
backend_config = dict(model_inputs=[
dict(
opt_shapes=dict(
voxels=[5000, 32, 4], num_points=[5000], coors=[5000, 4]))
])
_base_ = ['./voxel-detection_dynamic.py', '../../_base_/backends/openvino.py']
onnx_config = dict(input_shape=None)
backend_config = dict(model_inputs=[
dict(
opt_shapes=dict(
voxels=[20000, 20, 5], num_points=[20000], coors=[20000, 4]))
])
_base_ = ['./voxel-detection_dynamic.py', '../../_base_/backends/openvino.py']
onnx_config = dict(input_shape=None)
backend_config = dict(model_inputs=[
dict(
opt_shapes=dict(
voxels=[20000, 64, 4], num_points=[20000], coors=[20000, 4]))
])
_base_ = ['../../_base_/onnx_config.py']
codebase_config = dict(
type='mmdet3d', task='VoxelDetection', model_type='end2end')
onnx_config = dict(
input_names=['voxels', 'num_points', 'coors'],
# need to change output_names for head with multi-level features
output_names=['cls_score0', 'bbox_pred0', 'dir_cls_pred0'])
_base_ = ['./voxel-detection_dynamic.py', '../../_base_/backends/tensorrt.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
voxels=dict(
min_shape=[2000, 32, 4],
opt_shape=[5000, 32, 4],
max_shape=[9000, 32, 4]),
num_points=dict(
min_shape=[2000], opt_shape=[5000], max_shape=[9000]),
coors=dict(
min_shape=[2000, 4],
opt_shape=[5000, 4],
max_shape=[9000, 4]),
))
])
_base_ = ['./voxel-detection_dynamic.py', '../../_base_/backends/tensorrt.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 32),
model_inputs=[
dict(
input_shapes=dict(
voxels=dict(
min_shape=[5000, 20, 5],
opt_shape=[20000, 20, 5],
max_shape=[30000, 20, 5]),
num_points=dict(
min_shape=[5000], opt_shape=[20000], max_shape=[30000]),
coors=dict(
min_shape=[5000, 4],
opt_shape=[20000, 4],
max_shape=[30000, 4]),
))
])
_base_ = ['./voxel-detection_dynamic.py', '../../_base_/backends/tensorrt.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 32),
model_inputs=[
dict(
input_shapes=dict(
voxels=dict(
min_shape=[5000, 64, 4],
opt_shape=[20000, 64, 4],
max_shape=[30000, 64, 4]),
num_points=dict(
min_shape=[5000], opt_shape=[20000], max_shape=[30000]),
coors=dict(
min_shape=[5000, 4],
opt_shape=[20000, 4],
max_shape=[30000, 4]),
))
])
_base_ = ['./text-detection_dynamic.py', '../../_base_/backends/ascend.py']
onnx_config = dict(input_shape=None)
backend_config = dict(model_inputs=[
dict(
input_shapes=dict(input=[-1, 3, -1, -1]),
dynamic_dims=[(1, 640, 640), (4, 640, 640), (1, 1280, 1280)])
])
_base_ = ['./text-detection_static.py', '../../_base_/backends/ascend.py']
onnx_config = dict(input_shape=[640, 640])
backend_config = dict(
model_inputs=[dict(input_shapes=dict(input=[1, 3, 640, 640]))])
_base_ = [
'./text-detection_static.py', '../../_base_/torchscript_config.py',
'../../_base_/backends/coreml.py'
]
ir_config = dict(input_shape=[512, 512])
backend_config = dict(model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 512],
max_shape=[1, 3, 512, 512],
default_shape=[1, 3, 512, 512])))
])
_base_ = ['./text-detection_static.py']
onnx_config = dict(
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'output': {
0: 'batch',
2: 'height',
3: 'width'
}
}, )
_base_ = ['./text-detection_static.py', '../../_base_/backends/onnxruntime.py']
onnx_config = dict(
output_names=['dets', 'labels', 'masks'],
dynamic_axes=dict(
input=dict({
0: 'batch',
2: 'height',
3: 'width'
}),
dets=dict({
0: 'batch',
1: 'num_dets'
}),
labels=dict({
0: 'batch',
1: 'num_dets'
}),
masks=dict({
0: 'batch',
1: 'num_dets',
2: 'height',
3: 'width'
})))
codebase_config = dict(
post_processing=dict(
score_threshold=0.05,
confidence_threshold=0.005,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
export_postprocess_mask=False))
_base_ = ['./text-detection_mrcnn_tensorrt_dynamic-320x320-2240x2240.py']
backend_config = dict(common_config=dict(fp16_mode=True))
_base_ = ['./text-detection_mrcnn_tensorrt_dynamic-320x320-2240x2240.py']
backend_config = dict(common_config=dict(fp16_mode=True, int8_mode=True))
calib_config = dict(create_calib=True, calib_file='calib_data.h5')
_base_ = ['./text-detection_static.py', '../../_base_/backends/tensorrt.py']
onnx_config = dict(
output_names=['dets', 'labels', 'masks'],
dynamic_axes=dict(
input=dict({
0: 'batch',
2: 'height',
3: 'width'
}),
dets=dict({
0: 'batch',
1: 'num_dets'
}),
labels=dict({
0: 'batch',
1: 'num_dets'
}),
masks=dict({
0: 'batch',
1: 'num_dets',
2: 'height',
3: 'width'
})))
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 600, 800],
max_shape=[1, 3, 2240, 2240])))
])
codebase_config = dict(
post_processing=dict(
score_threshold=0.05,
confidence_threshold=0.005,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
export_postprocess_mask=False))
_base_ = [
'../../_base_/torchscript_config.py',
'../../_base_/backends/torchscript.py'
]
ir_config = dict(input_shape=None, output_names=['dets', 'labels', 'masks'])
codebase_config = dict(
type='mmocr',
task='TextDetection',
post_processing=dict(
score_threshold=0.05,
confidence_threshold=0.005,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
export_postprocess_mask=False))
_base_ = ['./text-detection_static.py', '../../_base_/backends/ncnn-int8.py']
onnx_config = dict(input_shape=None)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment