Commit 6a0a3439 authored by liucong's avatar liucong
Browse files

更新示例工程

parent 77ad90bb
...@@ -21,10 +21,21 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ ...@@ -21,10 +21,21 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ
net = migraphx::parse_onnx(modelPath); net = migraphx::parse_onnx(modelPath);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性 // 获取模型输入/输出节点信息
std::pair<std::string, migraphx::shape> inputMap=net.get_parameter_shapes(); std::cout<<"inputs:"<<std::endl;
inputName=inputAttribute.first; std::unordered_map<std::string, migraphx::shape> inputs=net.get_inputs();
inputShape=inputAttribute.second; for(auto i:inputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
std::cout<<"outputs:"<<std::endl;
std::unordered_map<std::string, migraphx::shape> outputs=net.get_outputs();
for(auto i:outputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
inputName=inputs.begin()->first;
inputShape=inputs.begin()->second;
int N=inputShape.lens()[0]; int N=inputShape.lens()[0];
int C=inputShape.lens()[1]; int C=inputShape.lens()[1];
int H=inputShape.lens()[2]; int H=inputShape.lens()[2];
...@@ -110,6 +121,18 @@ ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe ...@@ -110,6 +121,18 @@ ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
} }
``` ```
另外,如果想要指定输出节点,可以在eval()方法中通过提供outputNames参数来实现:
```
...
// 推理
std::vector<std::string> outputNames = {"output"}
std::vector<migraphx::argument> inferenceResults = net.eval(inputData, outputNames);
...
```
如果没有指定outputName参数,则默认输出所有输出节点,此时输出节点的顺序与ONNX中输出节点顺序保持一致,可以通过netron查看ONNX文件的输出节点的顺序。
获取MIGraphX推理结果之后需要进一步处理才可以得到YOLOV7的检测结果,输出结果result的第一个维度outputShape.lens()[0]的数值表示YOLOV7模型在当前待检测图像上生成的anchor数量。后处理过程包含两次anchor筛选过程,首先根据阈值objectThreshold判断anchor内部是否包含物体,小于该阈值的anchor则去除,然后获取第一次筛选后保留的anchor内部预测物体类别概率的最高得分,并与boxScores相乘得到anchor的置信度得分,最后根据置信度阈值confidenceThreshold进行第二次anchor筛选,大于该置信度阈值的anchor则保留,并获取最终保留下来anchor的坐标信息和物体类别预测信息,同时还需将预测坐标信息根据图像预处理缩放的比例ratioh、ratiow映射到原图。 获取MIGraphX推理结果之后需要进一步处理才可以得到YOLOV7的检测结果,输出结果result的第一个维度outputShape.lens()[0]的数值表示YOLOV7模型在当前待检测图像上生成的anchor数量。后处理过程包含两次anchor筛选过程,首先根据阈值objectThreshold判断anchor内部是否包含物体,小于该阈值的anchor则去除,然后获取第一次筛选后保留的anchor内部预测物体类别概率的最高得分,并与boxScores相乘得到anchor的置信度得分,最后根据置信度阈值confidenceThreshold进行第二次anchor筛选,大于该置信度阈值的anchor则保留,并获取最终保留下来anchor的坐标信息和物体类别预测信息,同时还需将预测坐标信息根据图像预处理缩放的比例ratioh、ratiow映射到原图。
``` ```
......
...@@ -48,11 +48,22 @@ class YOLOv7: ...@@ -48,11 +48,22 @@ class YOLOv7:
# 解析推理模型 # 解析推理模型
self.model = migraphx.parse_onnx(path) self.model = migraphx.parse_onnx(path)
# 获取模型输入/输出节点信息
print("inputs:")
inputs = self.model.get_inputs()
for key,value in inputs.items():
print("{}:{}".format(key,value))
print("outputs:")
outputs = self.model.get_outputs()
for key,value in outputs.items():
print("{}:{}".format(key,value))
# 获取模型的输入name # 获取模型的输入name
self.inputName = self.model.get_parameter_names()[0] self.inputName = "images"
# 获取模型的输入尺寸 # 获取模型的输入尺寸
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() inputShape = inputShape=inputs[self.inputName].lens()
self.inputHeight = int(inputShape[2]) self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3]) self.inputWidth = int(inputShape[3])
``` ```
...@@ -72,7 +83,7 @@ def detect(self, image): ...@@ -72,7 +83,7 @@ def detect(self, image):
# 执行推理 # 执行推理
print("Start to inference") print("Start to inference")
start = time.time() start = time.time()
result = self.model.run({self.model.get_parameter_names()[0]: input_img}) result = self.model.run({self.inputName: input_img})
print('net forward time: {:.4f}'.format(time.time() - start)) print('net forward time: {:.4f}'.format(time.time() - start))
# 模型输出结果后处理 # 模型输出结果后处理
boxes, scores, class_ids = self.process_output(result) boxes, scores, class_ids = self.process_output(result)
......
...@@ -18,14 +18,25 @@ class YOLOv7: ...@@ -18,14 +18,25 @@ class YOLOv7:
# 解析推理模型 # 解析推理模型
self.model = migraphx.parse_onnx(path) self.model = migraphx.parse_onnx(path)
# 获取模型输入/输出节点信息
print("inputs:")
inputs = self.model.get_inputs()
for key,value in inputs.items():
print("{}:{}".format(key,value))
print("outputs:")
outputs = self.model.get_outputs()
for key,value in outputs.items():
print("{}:{}".format(key,value))
# 获取模型的输入name # 获取模型的输入name
self.inputName = self.model.get_parameter_names()[0] self.inputName = "images"
# 获取模型的输入尺寸 # 获取模型的输入尺寸
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() inputShape = inputShape=inputs[self.inputName].lens()
self.inputHeight = int(inputShape[2]) self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3]) self.inputWidth = int(inputShape[3])
print("inputName:{0} \ninputShape:{1}".format(self.inputName,inputShape)) print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
def detect(self, image): def detect(self, image):
# 输入图片预处理 # 输入图片预处理
...@@ -37,7 +48,7 @@ class YOLOv7: ...@@ -37,7 +48,7 @@ class YOLOv7:
# 执行推理 # 执行推理
print("Start to inference") print("Start to inference")
start = time.time() start = time.time()
result = self.model.run({self.model.get_parameter_names()[0]: input_img}) result = self.model.run({self.inputName: input_img})
print('net forward time: {:.4f}'.format(time.time() - start)) print('net forward time: {:.4f}'.format(time.time() - start))
# 模型输出结果后处理 # 模型输出结果后处理
boxes, scores, class_ids = self.process_output(result) boxes, scores, class_ids = self.process_output(result)
......
...@@ -55,10 +55,21 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ ...@@ -55,10 +55,21 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ
net = migraphx::parse_onnx(modelPath); net = migraphx::parse_onnx(modelPath);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性 // 获取模型输入/输出节点信息
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes(); std::cout<<"inputs:"<<std::endl;
inputName=inputMap.begin()->first; std::unordered_map<std::string, migraphx::shape> inputs=net.get_inputs();
inputShape=inputMap.begin()->second; for(auto i:inputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
std::cout<<"outputs:"<<std::endl;
std::unordered_map<std::string, migraphx::shape> outputs=net.get_outputs();
for(auto i:outputs)
{
std::cout<<i.first<<":"<<i.second<<std::endl;
}
inputName=inputs.begin()->first;
inputShape=inputs.begin()->second;
int N=inputShape.lens()[0]; int N=inputShape.lens()[0];
int C=inputShape.lens()[1]; int C=inputShape.lens()[1];
int H=inputShape.lens()[2]; int H=inputShape.lens()[2];
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment