onnx_to_mxr.py 600 Bytes
Newer Older
sunzhq2's avatar
sunzhq2 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import migraphx

# migraphx-driver compile optimized_bert_best.onnx --fp16 --binary --output new_modle_1.mxr --input-dim @input 64 256

# def AllocateOutputMemory(model):
#     outputData={}
#     for key in model.get_outputs().keys():
#         outputData[key] = migraphx.allocate_gpu(s=model.get_outputs()[key])
#     return outputData


max_input = {"images":[24,3,640,640]}
model = migraphx.parse_onnx("./yolov5m.onnx", map_input_dims=max_input)

migraphx.quantize_fp16(model)

model.compile(t=migraphx.get_target("gpu"), offload_copy=False, device_id=0)

migraphx.save(model, "yolov5m_fp16.mxr")