Commit e93d7587 authored by Your Name's avatar Your Name
Browse files

提交YOLOV7C++推理示例

parent 52bca813
# This file is used to ignore files which are generated
# ----------------------------------------------------------------------------
build/*
depend/*
.vscode/*
core.*
# qtcreator generated files
*.pro.user*
# VS
*.sdf
*.opensdf
*.ilk
*.pdb
*.exp
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
# Executables
*.exe
*.out
*.app
# xemacs temporary files
*.flc
# Vim temporary files
.*.swp
# others
*.avi
*.pyc
*.egg
#! /bin/sh
############### Ubuntu ###############
# 参考:https://docs.opencv.org/3.4.11/d7/d9f/tutorial_linux_install.html
# apt-get install build-essential -y
# apt-get install cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev -y
# apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev -y # 处理图像所需的包,可选
############### CentOS ###############
yum install gcc gcc-c++ gtk2-devel gimp-devel gimp-devel-tools gimp-help-browser zlib-devel libtiff-devel libjpeg-devel libpng-devel gstreamer-devel libavc1394-devel libraw1394-devel libdc1394-devel jasper-devel jasper-utils swig python libtool nasm -y
\ No newline at end of file
############################ 在线安装依赖 ###############################
#cd ./3rdParty
#pip install rbuild-master.tar.gz
############################ 离线安装依赖 ###############################
# 安装依赖
cd ./3rdParty/rbuild_depend
pip install click-6.6-py2.py3-none-any.whl
pip install six-1.15.0-py2.py3-none-any.whl
pip install subprocess32-3.5.4.tar.gz
pip install cget-0.1.9.tar.gz
# 安装rbuild
cd ../
pip install rbuild-master.tar.gz
# 设置cmake的最低版本
cmake_minimum_required(VERSION 3.5)
# 设置项目名
project(MIGraphX_Samples)
# 设置编译器
set(CMAKE_CXX_COMPILER g++)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} -std=c++17) # 2.2版本以上需要c++17
set(CMAKE_BUILD_TYPE release)
# 添加头文件路径
set(INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Src/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Detector/
$ENV{DTKROOT}/include/
${CMAKE_CURRENT_SOURCE_DIR}/depend/include/)
include_directories(${INCLUDE_PATH})
# 添加依赖库路径
set(LIBRARY_PATH ${CMAKE_CURRENT_SOURCE_DIR}/depend/lib64/
$ENV{DTKROOT}/lib/)
link_directories(${LIBRARY_PATH})
# 添加依赖库
set(LIBRARY opencv_core
opencv_imgproc
opencv_imgcodecs
opencv_dnn
migraphx_ref
migraphx
migraphx_c
migraphx_device
migraphx_gpu
migraphx_onnx)
link_libraries(${LIBRARY})
# 添加源文件
set(SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/Src/main.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Sample.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Detector/DetectorYOLOV7.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/CommonUtility.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/Filesystem.cpp)
# 添加可执行目标
add_executable(MIGraphX_Samples ${SOURCE_FILES})
......@@ -13,7 +13,7 @@ class YOLOv7:
self.nmsThreshold = iou_thres
# 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('./weights/coco.names', 'r').readlines()))
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/Detector/YOLOV7/coco.names', 'r').readlines()))
# 解析推理模型
self.model = migraphx.parse_onnx(path)
......@@ -110,15 +110,15 @@ class YOLOv7:
# 绘制检测物体框
cv2.rectangle(image, (cx, cy), (cx + w, cy + h), (0, 255, 255), thickness=2)
label = self.classNames[class_id]
label = f'{label} {int(score * 100)}%'
label = f'{label} {score:.2f}'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.putText(image, label, (cx, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), thickness=2)
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imgpath', type=str, default='./images/bus.jpg', help="image path")
parser.add_argument('--modelpath', type=str, default='./weights/yolov7-tiny.onnx',help="onnx filepath")
parser.add_argument('--imgpath', type=str, default='../Resource/Images/bus.jpg', help="image path")
parser.add_argument('--modelpath', type=str, default='../Resource/Models/Detector/YOLOV7/yolov7-tiny.onnx',help="onnx filepath")
parser.add_argument('--objectThreshold', default=0.5, type=float, help='class confidence')
parser.add_argument('--confThreshold', default=0.25, type=float, help='class confidence')
parser.add_argument('--nmsThreshold', default=0.5, type=float, help='nms iou thresh')
......
opencv-contrib-python
numpy
os
argparse
time
\ No newline at end of file
......@@ -8,27 +8,83 @@ YOLOV7是2022年最新出现的一种YOLO系列目标检测模型,在论文 [Y
YoloV7模型的网络结构包括三个部分:input、backbone和head。与yolov5不同的是,将neck层与head层合称为head层,实际上的功能是一样的。各个部分的功能和yolov5相同,如backbone用于提取特征,head用于预测。yolov7依旧基于anchor based的方法,同时在网络架构上增加E-ELAN层,并将REP层也加入进来,方便后续部署,同时在训练时,在head时,新增Aux_detect用于辅助检测。
## 推理
## 构建安装
在光源可拉取推理的docker镜像,YoloV7工程推荐的镜像如下:
### 环境配置
```python
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
```
[光源](https://www.sourcefind.cn/#/image/dcu/custom)可拉取用于推理的docker镜像,YoloV7模型推理推荐的镜像如下:
### 安装Opencv依赖
```python
cd <path_to_migraphx_samples>
sh ./3rdParty/InstallOpenCVDependences.sh
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort_dcu_1.14.0_migraphx2.5.2_dtk22.10.1
### 修改CMakeLists.txt
- 如果使用ubuntu系统,需要修改CMakeLists.txt中依赖库路径:
将"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib64/"修改为"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib/"
- **MIGraphX2.3.0及以上版本需要c++17**
### 安装OpenCV并构建工程
```
rbuild build -d depend
```
### 设置环境变量
[光合开发者社区](https://cancon.hpccube.com:65024/4/main/)可下载MIGraphX安装包,python依赖安装:
将依赖库依赖加入环境变量LD_LIBRARY_PATH,在~/.bashrc中添加如下语句:
**Centos**:
```
pip install -r requirements.txt
export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib64/:$LD_LIBRARY_PATH
```
**Ubuntu**:
```
export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib/:$LD_LIBRARY_PATH
```
### 运行示例
然后执行:
```
source ~/.bashrc
```
## 推理
### C++版本推理
成功编译YoloV7工程后,在build目录下输入如下命令运行该示例:
```
./MIGraphX_Samples 0
```
程序运行结束会在build目录生成YoloV7检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result" style="zoom:50%;" />
### python版本推理
YoloV7模型的推理示例程序是YoloV7_infer_migraphx.py,使用如下命令运行该推理示例:
```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
# 运行程序
python YoloV7_infer_migraphx.py \
--imgpath 测试图像路径 \
--modelpath onnx模型路径 \
......@@ -39,7 +95,7 @@ python YoloV7_infer_migraphx.py \
程序运行结束会在当前目录生成YoloV7检测结果图像。
<img src="./images/Result.jpg" alt="Result" style="zoom: 50%;" />
<img src="./Resource/Images/Result.jpg" alt="Result_2" style="zoom: 50%;" />
## 历史版本
......
<?xml version="1.0" encoding="GB2312"?>
<opencv_storage>
<!--YOLOV7检测器 -->
<DetectorYOLOV7>
<ModelPath>"../Resource/Models/Detector/YOLOV7/yolov7-tiny.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/Detector/YOLOV7/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.25</ConfidenceThreshold>
<NMSThreshold>0.5</NMSThreshold>
<ObjectThreshold>0.5</ObjectThreshold>
</DetectorYOLOV7>
</opencv_storage>
#include <DetectorYOLOV7.h>
#include <migraphx/onnx.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp>
#include <opencv2/dnn.hpp>
#include <CommonUtility.h>
#include <Filesystem.h>
#include <SimpleLog.h>
using namespace cv::dnn;
namespace migraphxSamples
{
DetectorYOLOV7::DetectorYOLOV7():logFile(NULL)
{
}
DetectorYOLOV7::~DetectorYOLOV7()
{
configurationFile.release();
}
ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializationParameterOfDetector)
{
// 初始化(获取日志文件,加载配置文件等)
ErrorCode errorCode=DoCommonInitialization(initializationParameterOfDetector);
if(errorCode!=SUCCESS)
{
LOG_ERROR(logFile,"fail to DoCommonInitialization\n");
return errorCode;
}
LOG_INFO(logFile,"succeed to DoCommonInitialization\n");
// 获取配置文件参数
FileNode netNode = configurationFile["DetectorYOLOV7"];
string modelPath=initializationParameter.parentPath+(string)netNode["ModelPath"];
string pathOfClassNameFile=(string)netNode["ClassNameFile"];
yolov7Parameter.confidenceThreshold = (float)netNode["ConfidenceThreshold"];
yolov7Parameter.nmsThreshold = (float)netNode["NMSThreshold"];
yolov7Parameter.objectThreshold = (float)netNode["ObjectThreshold"];
yolov7Parameter.numberOfClasses=(int)netNode["NumberOfClasses"];
useFP16=(bool)(int)netNode["UseFP16"];
// 加载模型
if(Exists(modelPath)==false)
{
LOG_ERROR(logFile,"%s not exist!\n",modelPath.c_str());
return MODEL_NOT_EXIST;
}
net = migraphx::parse_onnx(modelPath);
LOG_INFO(logFile,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性
std::pair<std::string, migraphx::shape> inputAttribute=*(net.get_parameter_shapes().begin());
inputName=inputAttribute.first;
inputShape=inputAttribute.second;
inputSize=cv::Size(inputShape.lens()[3],inputShape.lens()[2]);
// 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{};
// 量化
if(useFP16)
{
migraphx::quantize_fp16(net);
}
// 编译模型
migraphx::compile_options options;
options.device_id=0; // 设置GPU设备,默认为0号设备
options.offload_copy=true; // 设置offload_copy
net.compile(gpuTarget,options);
LOG_INFO(logFile,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
// Run once by itself
migraphx::parameter_map inputData;
inputData[inputName]=migraphx::generate_argument(inputShape);
net.eval(inputData);
// 读取类别名
if(!pathOfClassNameFile.empty())
{
ifstream classNameFile(pathOfClassNameFile);
string line;
while (getline(classNameFile, line))
{
classNames.push_back(line);
}
}
else
{
classNames.resize(yolov7Parameter.numberOfClasses);
}
// log
LOG_INFO(logFile,"InputSize:%dx%d\n",inputSize.width,inputSize.height);
LOG_INFO(logFile,"InputName:%s\n",inputName.c_str());
LOG_INFO(logFile,"ConfidenceThreshold:%f\n",yolov7Parameter.confidenceThreshold);
LOG_INFO(logFile,"objectThreshold:%f\n",yolov7Parameter.objectThreshold);
LOG_INFO(logFile,"NMSThreshold:%f\n",yolov7Parameter.nmsThreshold);
LOG_INFO(logFile,"NumberOfClasses:%d\n",yolov7Parameter.numberOfClasses);
return SUCCESS;
}
ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
if(srcImage.empty()||srcImage.type()!=CV_8UC3)
{
LOG_ERROR(logFile, "image error!\n");
return IMAGE_ERROR;
}
// 预处理并转换为NCHW
cv::Mat inputBlob;
blobFromImage(srcImage,
inputBlob,
1 / 255.0,
inputSize,
Scalar(0, 0, 0),
true,
false);
// 输入数据
migraphx::parameter_map inputData;
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
// 推理
std::vector<migraphx::argument> inferenceResults = net.eval(inputData);
// 获取推理结果
std::vector<cv::Mat> outs;
migraphx::argument result = inferenceResults[0];
// 转换为cv::Mat
migraphx::shape outputShape = result.get_shape();
int shape[]={outputShape.lens()[0],outputShape.lens()[1],outputShape.lens()[2]};
cv::Mat out(3,shape,CV_32F);
memcpy(out.data,result.data(),sizeof(float)*outputShape.elements());
outs.push_back(out);
//获取先验框的个数
int numProposal = outs[0].size[1];
int numOut = outs[0].size[2];
//变换输出的维度
outs[0] = outs[0].reshape(0, numProposal);
//生成先验框
std::vector<float> confidences;
std::vector<cv::Rect> boxes;
std::vector<int> classIds;
float ratioh = (float)srcImage.rows / inputSize.height, ratiow = (float)srcImage.cols / inputSize.width;
//计算cx,cy,w,h,box_sore,class_sore
int n = 0, rowInd = 0;
float* pdata = (float*)outs[0].data;
for (n = 0; n < numProposal; n++)
{
float boxScores = pdata[4];
if (boxScores > yolov7Parameter.objectThreshold)
{
cv::Mat scores = outs[0].row(rowInd).colRange(5, numOut);
cv::Point classIdPoint;
double maxClassScore;
cv::minMaxLoc(scores, 0, &maxClassScore, 0, &classIdPoint);
maxClassScore *= boxScores;
if (maxClassScore > yolov7Parameter.confidenceThreshold)
{
const int classIdx = classIdPoint.x;
float cx = pdata[0] * ratiow;
float cy = pdata[1] * ratioh;
float w = pdata[2] * ratiow;
float h = pdata[3] * ratioh;
int left = int(cx - 0.5 * w);
int top = int(cy - 0.5 * h);
confidences.push_back((float)maxClassScore);
boxes.push_back(cv::Rect(left, top, (int)(w), (int)(h)));
classIds.push_back(classIdx);
}
}
rowInd++;
pdata += numOut;
}
//执行non maximum suppression消除冗余重叠boxes
std::vector<int> indices;
dnn::NMSBoxes(boxes, confidences, yolov7Parameter.confidenceThreshold, yolov7Parameter.nmsThreshold, indices);
for (size_t i = 0; i < indices.size(); ++i)
{
int idx = indices[i];
int classID=classIds[idx];
string className=classNames[classID];
float confidence=confidences[idx];
cv::Rect box = boxes[idx];
ResultOfDetection result;
result.boundingBox=box;
result.confidence=confidence;// confidence
result.classID=classID; // label
result.className=className;
resultsOfDetection.push_back(result);
}
return SUCCESS;
}
ErrorCode DetectorYOLOV7::DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector)
{
initializationParameter=initializationParameterOfDetector;
// 获取日志文件
logFile=LogManager::GetInstance()->GetLogFile(initializationParameter.logName);
// 加载配置文件
std::string configFilePath=initializationParameter.configFilePath;
if(!Exists(configFilePath))
{
LOG_ERROR(logFile, "no configuration file!\n");
return CONFIG_FILE_NOT_EXIST;
}
if(!configurationFile.open(configFilePath, FileStorage::READ))
{
LOG_ERROR(logFile, "fail to open configuration file\n");
return FAIL_TO_OPEN_CONFIG_FILE;
}
LOG_INFO(logFile, "succeed to open configuration file\n");
// 修改父路径
std::string &parentPath = initializationParameter.parentPath;
if (!parentPath.empty())
{
if(!IsPathSeparator(parentPath[parentPath.size() - 1]))
{
parentPath+=PATH_SEPARATOR;
}
}
return SUCCESS;
}
}
#ifndef __DETECTOR_YOLOV7_H__
#define __DETECTOR_YOLOV7_H__
#include <string>
#include <migraphx/program.hpp>
#include <opencv2/opencv.hpp>
#include <CommonDefinition.h>
using namespace std;
using namespace cv;
using namespace migraphx;
namespace migraphxSamples
{
typedef struct _YOLOV7Parameter
{
int numberOfClasses;
float confidenceThreshold;
float nmsThreshold;
float objectThreshold;
}YOLOV7Parameter;
class DetectorYOLOV7
{
public:
DetectorYOLOV7();
~DetectorYOLOV7();
ErrorCode Initialize(InitializationParameterOfDetector initializationParameterOfDetector);
ErrorCode Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection);
private:
ErrorCode DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector);
private:
cv::FileStorage configurationFile;
InitializationParameterOfDetector initializationParameter;
FILE *logFile;
migraphx::program net;
cv::Size inputSize;
string inputName;
migraphx::shape inputShape;
bool useFP16;
vector<string> classNames;
YOLOV7Parameter yolov7Parameter;
};
}
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment