"tests/vscode:/vscode.git/clone" did not exist on "f586c415eb04c02ea6729f92df7b096688b468ca"
Commit 89f99655 authored by liucong's avatar liucong
Browse files

更新工程示例

parent a5f006e7
...@@ -125,7 +125,21 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_ ...@@ -125,7 +125,21 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_
} }
``` ```
YOLOV5的MIGraphX推理结果inferenceResults是一个std::vector< migraphx::argument >类型,YOLOV5的onnx模型包含一个输出,所以result等于inferenceResults[0],result包含三个维度:outputShape.lens()[0]=1表示batch信息,outputShape.lens()[1]=22743表示生成anchor数量,outputShape.lens()[2]=85表示对每个anchor的预测信息。同时可将85拆分为4+1+80,前4个参数用于判断每一个特征点的回归参数,回归参数调整后可以获得预测框,第5个参数用于判断每一个特征点是否包含物体,最后80个参数用于判断每一个特征点所包含的物体种类。获取上述信息之后进行anchors筛选,筛选过程分为两个步骤: YOLOV5的MIGraphX推理结果inferenceResults是一个std::vector< migraphx::argument >类型,YOLOV5的onnx模型包含一个输出,所以result等于inferenceResults[0],result包含三个维度:outputShape.lens()[0]=1表示batch信息,outputShape.lens()[1]=22743表示生成anchor数量,outputShape.lens()[2]=85表示对每个anchor的预测信息。同时可将85拆分为4+1+80,前4个参数用于判断每一个特征点的回归参数,回归参数调整后可以获得预测框,第5个参数用于判断每一个特征点是否包含物体,最后80个参数用于判断每一个特征点所包含的物体种类。
另外,如果想要指定输出节点,可以在eval()方法中通过提供outputNames参数来实现:
```
...
// 推理
std::vector<std::string> outputNames = {"output0"}
std::vector<migraphx::argument> inferenceResults = net.eval(inputData, outputNames);
...
```
如果没有指定outputName参数,则默认输出所有输出节点,此时输出节点的顺序与ONNX中输出节点顺序保持一致,可以通过netron查看ONNX文件的输出节点的顺序。
获取上述信息之后进行anchors筛选,筛选过程分为两个步骤:
- 第一步根据objectThreshold阈值进行筛选,大于该阈值则判断当前anchor内包含物体,小于该阈值则判断无物体 - 第一步根据objectThreshold阈值进行筛选,大于该阈值则判断当前anchor内包含物体,小于该阈值则判断无物体
- 第二步根据confidenceThreshold阈值进行筛选,当满足第一步阈值anchor的最大置信度得分maxClassScore大于该阈值,则进一步获取当前anchor的坐标信息和预测物体类别信息,小于该阈值则不做处理。 - 第二步根据confidenceThreshold阈值进行筛选,当满足第一步阈值anchor的最大置信度得分maxClassScore大于该阈值,则进一步获取当前anchor的坐标信息和预测物体类别信息,小于该阈值则不做处理。
......
...@@ -53,18 +53,28 @@ class YOLOv5: ...@@ -53,18 +53,28 @@ class YOLOv5:
maxInput={"images":[1,3,800,800]} maxInput={"images":[1,3,800,800]}
self.model = migraphx.parse_onnx(path, map_input_dims=maxInput) self.model = migraphx.parse_onnx(path, map_input_dims=maxInput)
self.inputName = self.model.get_parameter_names()[0] # 获取模型输入/输出节点信息
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() print("inputs:")
print("inputName:{0} \ninputMaxShape:{1}".format(self.inputName, inputShape)) inputs = self.model.get_inputs()
for key,value in inputs.items():
print("{}:{}".format(key,value))
print("outputs:")
outputs = self.model.get_outputs()
for key,value in outputs.items():
print("{}:{}".format(key,value))
# 获取模型的输入name
self.inputName = "images"
# 获取模型的输入尺寸
inputShape = inputShape=inputs[self.inputName].lens()
self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3])
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
else: else:
self.model = migraphx.parse_onnx(path) self.model = migraphx.parse_onnx(path)
self.inputName = self.model.get_parameter_names()[0] ...
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
# 静态推理尺寸
self.inputWidth = inputShape[3]
self.inputHeight = inputShape[2]
# 模型编译 # 模型编译
self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备 self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备
...@@ -85,7 +95,7 @@ def detect(self, image, input_shape=None): ...@@ -85,7 +95,7 @@ def detect(self, image, input_shape=None):
# 执行推理 # 执行推理
start = time.time() start = time.time()
result = self.model.run({self.model.get_parameter_names()[0]: input_img}) result = self.model.run({self.inputName: input_img})
print('net forward time: {:.4f}'.format(time.time() - start)) print('net forward time: {:.4f}'.format(time.time() - start))
# 模型输出结果后处理 # 模型输出结果后处理
boxes, scores, class_ids = self.process_output(result) boxes, scores, class_ids = self.process_output(result)
......
...@@ -20,19 +20,47 @@ class YOLOv5: ...@@ -20,19 +20,47 @@ class YOLOv5:
if self.isDynamic: if self.isDynamic:
maxInput={"images":[1,3,800,800]} maxInput={"images":[1,3,800,800]}
self.model = migraphx.parse_onnx(path, map_input_dims=maxInput) self.model = migraphx.parse_onnx(path, map_input_dims=maxInput)
# 获取模型输入/输出节点信息
print("inputs:")
inputs = self.model.get_inputs()
for key,value in inputs.items():
print("{}:{}".format(key,value))
print("outputs:")
outputs = self.model.get_outputs()
for key,value in outputs.items():
print("{}:{}".format(key,value))
# 获取模型的输入name
self.inputName = "images"
self.inputName = self.model.get_parameter_names()[0] # 获取模型的输入尺寸
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() inputShape = inputShape=inputs[self.inputName].lens()
print("inputName:{0} \ninputMaxShape:{1}".format(self.inputName, inputShape)) self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3])
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
else: else:
self.model = migraphx.parse_onnx(path) self.model = migraphx.parse_onnx(path)
self.inputName = self.model.get_parameter_names()[0] # 获取模型输入/输出节点信息
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() print("inputs:")
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape)) inputs = self.model.get_inputs()
for key,value in inputs.items():
print("{}:{}".format(key,value))
# 静态推理尺寸 print("outputs:")
self.inputWidth = inputShape[3] outputs = self.model.get_outputs()
self.inputHeight = inputShape[2] for key,value in outputs.items():
print("{}:{}".format(key,value))
# 获取模型的输入name
self.inputName = "images"
# 获取模型的输入尺寸
inputShape = inputShape=inputs[self.inputName].lens()
self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3])
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
# 模型编译 # 模型编译
self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备 self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备
...@@ -47,7 +75,7 @@ class YOLOv5: ...@@ -47,7 +75,7 @@ class YOLOv5:
# 执行推理 # 执行推理
start = time.time() start = time.time()
result = self.model.run({self.model.get_parameter_names()[0]: input_img}) result = self.model.run({self.inputName: input_img})
print('net forward time: {:.4f}'.format(time.time() - start)) print('net forward time: {:.4f}'.format(time.time() - start))
# 模型输出结果后处理 # 模型输出结果后处理
boxes, scores, class_ids = self.process_output(result) boxes, scores, class_ids = self.process_output(result)
......
...@@ -66,10 +66,6 @@ python YoloV5_infer_migraphx.py --staticInfer ...@@ -66,10 +66,6 @@ python YoloV5_infer_migraphx.py --staticInfer
2. 动态推理 2. 动态推理
``` ```
# 开启环境变量
export MIGRAPHX_DYNAMIC_SHAPE=1
# 运行示例
python YoloV5_infer_migraphx.py --dynamicInfer python YoloV5_infer_migraphx.py --dynamicInfer
``` ```
...@@ -137,10 +133,6 @@ cd build/ ...@@ -137,10 +133,6 @@ cd build/
2. 动态推理 2. 动态推理
``` ```
# 开启环境变量
export MIGRAPHX_DYNAMIC_SHAPE=1
# 执行动态推理示例程序
./YOLOV5 1 ./YOLOV5 1
``` ```
......
...@@ -68,10 +68,21 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -68,10 +68,21 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
net = migraphx::parse_onnx(modelPath, onnx_options); net = migraphx::parse_onnx(modelPath, onnx_options);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性 // 获取模型输入/输出节点信息
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes(); std::cout<<"inputs:"<<std::endl;
inputName=inputMap.begin()->first; std::unordered_map<std::string, migraphx::shape> inputs=net.get_inputs();
inputShape=inputMap.begin()->second; for(auto i:inputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
std::cout<<"outputs:"<<std::endl;
std::unordered_map<std::string, migraphx::shape> outputs=net.get_outputs();
for(auto i:outputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
inputName=inputs.begin()->first;
inputShape=inputs.begin()->second;
int N=inputShape.lens()[0]; int N=inputShape.lens()[0];
int C=inputShape.lens()[1]; int C=inputShape.lens()[1];
int H=inputShape.lens()[2]; int H=inputShape.lens()[2];
...@@ -92,10 +103,21 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -92,10 +103,21 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
net = migraphx::parse_onnx(modelPath); net = migraphx::parse_onnx(modelPath);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性 // 获取模型输入/输出节点信息
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes(); std::cout<<"inputs:"<<std::endl;
inputName=inputMap.begin()->first; std::unordered_map<std::string, migraphx::shape> inputs=net.get_inputs();
inputShape=inputMap.begin()->second; for(auto i:inputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
std::cout<<"outputs:"<<std::endl;
std::unordered_map<std::string, migraphx::shape> outputs=net.get_outputs();
for(auto i:outputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
inputName=inputs.begin()->first;
inputShape=inputs.begin()->second;
int N=inputShape.lens()[0]; int N=inputShape.lens()[0];
int C=inputShape.lens()[1]; int C=inputShape.lens()[1];
int H=inputShape.lens()[2]; int H=inputShape.lens()[2];
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment