migraphx-infer.py 2.5 KB
Newer Older
root's avatar
root committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import migraphx
import json
import csv
import numpy as np
import time
from tqdm import tqdm

DTYPE={
     
    "float32": np.float32,
    "float64": np.float64,
    "int32": np.int32,
    "int64": np.int64,
    "uint8": np.uint8,
    "uint16": np.uint16,
    "uint32": np.uint32,
    "uint64": np.uint64,
    "int8": np.int8,
    "int16": np.int16,
}
def read_csv_data(file_path):
    with open(file_path, 'r') as f:
        reader = csv.reader(f)
        next(reader)
        datas = list(reader)
    for data in datas:
        data[2] = data[2][1:-1].split(",")
    names_dtype = {data[1]:data[-1] for data in datas}
    return names_dtype
def load_datasets(datasets_path):
    with open(datasets_path, 'r') as f:
        datasets = json.load(f)
    return datasets

def AllocateteOutputMemory(model):
    outputData={}
    for key in model.get_outputs().keys():
        outputData[key] = migraphx.allocate_gpu(s=model.get_outputs()[key])

    return outputData

if __name__ == "__main__":
    input_names_dtype = read_csv_data("./new_models/model_1/input_tensors.csv")
    complie = False
    for batch_size in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]:
        dataset_path = './new_models/model_1/dataset/input_tensor_datas_'+str(batch_size)+'.json'
        model_path = './new_models/model_1/onnx/model-static-batch-size-'+str(batch_size)+'.onnx'
        input_datasets = load_datasets(dataset_path)
        input_datas = {key: np.array(value).astype(DTYPE[input_names_dtype[key]]) for key, value in input_datasets.items()}
        
        if complie:
            model = migraphx.parse_onnx(model_path)
            print(f"compile {model_path}")
            model.compile(migraphx.get_target("gpu"), offload_copy=False, device_id=0)
            print(f"./model_1/onnx/model-static-batch-size-{batch_size}.mxr")
            migraphx.save(model, f"./model_1/mxr/model-static-batch-size-{batch_size}.mxr")
        else:
            print(f"./new_models/model_1/onnx/model-static-batch-size-{batch_size}.mxr")
            model = migraphx.load( f"./new_models/model_1/mxr/model-static-batch-size-{batch_size}.mxr")
            modelData = AllocateteOutputMemory(model)
            for key, _ in input_datas.items():
                modelData[key] = migraphx.to_gpu(migraphx.argument(input_datas[key]))
            for i in range(1100):
                if i < 100:
                    times = time.time()
                model.run(modelData)
            print("*******batch_size: ", batch_size, "*******QPS: ", 1000/(time.time() - times)*batch_size)