Commit 2a5fdf08 authored by liucong's avatar liucong
Browse files

精简代码

parent 65bac91b
......@@ -2,7 +2,7 @@
cmake_minimum_required(VERSION 3.5)
# 设置项目名
project(MIGraphX_Samples)
project(YoloV7)
# 设置编译器
set(CMAKE_CXX_COMPILER g++)
......@@ -12,7 +12,6 @@ set(CMAKE_BUILD_TYPE release)
# 添加头文件路径
set(INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Src/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Detector/
$ENV{DTKROOT}/include/
${CMAKE_CURRENT_SOURCE_DIR}/depend/include/)
include_directories(${INCLUDE_PATH})
......@@ -37,10 +36,9 @@ link_libraries(${LIBRARY})
# 添加源文件
set(SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/Src/main.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Sample.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Detector/DetectorYOLOV7.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/YOLOV7.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/CommonUtility.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/Filesystem.cpp)
# 添加可执行目标
add_executable(MIGraphX_Samples ${SOURCE_FILES})
add_executable(YoloV7 ${SOURCE_FILES})
# YOLOV7检测器
本示例提供了YOLOV7模型的MIGraphX C++推理教程,通过该教程可以了解图像预处理以及后处理等流程,根据文档说明执行YOLOV7推理则可获取目标检测结果。
## 模型简介
YOLOV7是2022年最新出现的一种YOLO系列目标检测模型,在论文 [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696)中提出。
<img src="./YOLOV7_01.png" style="zoom:67%;" />
## 模型初始化
初始化操作主要利用MIGraphX对输入的模型进行解析,获取模型的输入属性inputAttribute,同时设置使用GPU推理模式对模型进行编译。
```
ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializationParameterOfDetector)
{
...
//模型加载
net = migraphx::parse_onnx(modelPath);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性
std::pair<std::string, migraphx::shape> inputMap=net.get_parameter_shapes();
inputName=inputAttribute.first;
inputShape=inputAttribute.second;
int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{};
// 量化
if(useFP16)
{
migraphx::quantize_fp16(net);
}
// 编译模型
migraphx::compile_options options;
options.device_id=0; // 设置GPU设备,默认为0号设备
options.offload_copy=true; // 设置offload_copy
net.compile(gpuTarget,options);
LOG_INFO(stdout,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
...
}
```
## 预处理
在将数据输入到模型之前,需要对图像做如下预处理操作:
- 转换数据排布为NCHW
- 归一化到[0.0, 1.0]
- 将输入数据的尺寸变换到YOLOV7输入大小(1,3,640,640)
```c++
ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
// 预处理并转换为NCHW
cv::Mat inputBlob;
blobFromImage(srcImage, //输入数据
inputBlob, //输出数据
1 / 255.0, //缩放系数,这里为1/255.0
inputSize, //YOLOV7输入尺寸(640,640)
Scalar(0, 0, 0), // 均值,这里不需要减均值,所以设置为0.0
true, //转换RB通道
false);
...
}
```
## 推理
将预处理之后的图像输入到模型进行推理,获取模型的推理结果inferenceResults,并将输出结果由std::vector< migraphx::argument >类型转换为cv::Mat类型。
```
ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
// 创建输入数据
std::unordered_map<std::string, migraphx::shape> inputData;
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
// 推理
std::vector<migraphx::argument> inferenceResults=net.eval(inputData);
// 获取推理结果
std::vector<cv::Mat> outs;
migraphx::argument result = inferenceResults[0];
// 转换为cv::Mat
migraphx::shape outputShape = result.get_shape();
int shape[]={outputShape.lens()[0],outputShape.lens()[1],outputShape.lens()[2]};
cv::Mat out(4,shape,CV_32F);
memcpy(out.data,result.data(),sizeof(float)*outputShape.elements());
outs.push_back(out);
...
}
```
获取MIGraphX推理结果之后需要进一步处理才可以得到YOLOV7的检测结果,输出结果result的第一个维度outputShape.lens()[0]的数值表示YOLOV7模型在当前待检测图像上生成的anchor数量。后处理过程包含两次anchor筛选过程,首先根据阈值objectThreshold判断anchor内部是否包含物体,小于该阈值的anchor则去除,然后获取第一次筛选后保留的anchor内部预测物体类别概率的最高得分,并与boxScores相乘得到anchor的置信度得分,最后根据置信度阈值confidenceThreshold进行第二次anchor筛选,大于该置信度阈值的anchor则保留,并获取最终保留下来anchor的坐标信息和物体类别预测信息,同时还需将预测坐标信息根据图像预处理缩放的比例ratioh、ratiow映射到原图。
```
ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
//获取先验框的个数numProposal=25200
int numProposal = outs[0].size[1];
//每个anchor的预测信息数量numOut=85
int numOut = outs[0].size[2];
outs[0] = outs[0].reshape(0, numProposal);
std::vector<float> confidence;
std::vector<Rect> boxes
std::vector<int> classIds
//原图尺寸与模型输入尺寸的缩放比例
float ratioh = (float)srcImage.rows / inputSize.height, ratiow = (float)srcImage.cols / inputSize.width;
//计算cx,cy,w,h,box_sore,class_sore
int n = 0, rowInd = 0;
float* pdata = (float*)outs[0].data;
for (n = 0; n < numProposal; n++)
{
//获取是否包含物体的概率值
float boxScores = pdata[4];
//第一次筛选,判断anchor内是否包含物体
if (boxScores > yolov7Parameter.objectThreshold)
{
//获取每个anchor内部预测的80个类别概率信息
cv::Mat scores = outs[0].row(rowInd).colRange(5, numOut);
cv::Point classIdPoint;
double maxClassScore;
//获取80个类别中最大概率值和对应的类别ID
cv::minMaxLoc(scores, 0, &maxClassScore, 0, &classIdPoint);
maxClassScore *= boxScores;
//第二次筛选,判断当前anchor的最大置信度得分是否满足阈值
if (maxClassScore > yolov7Parameter.confidenceThreshold)
{
const int classIdx = classIdPoint.x;
//将每个anchor坐标按缩放比例映射到原图
float cx = pdata[0] * ratiow;
float cy = pdata[1] * ratioh;
float w = pdata[2] * ratiow;
float h = pdata[3] * ratioh;
//获取anchor的左上角坐标
int left = int(cx - 0.5 * w);
int top = int(cy - 0.5 * h);
confidences.push_back((float)maxClassScore);
boxes.push_back(cv::Rect(left, top, (int)(w), (int)(h)));
classIds.push_back(classIdx);
}
}
rowInd++;
pdata += numOut;
}
...
}
```
为了消除重叠锚框,输出最终的YOLOV7目标检测结果,执行非极大值抑制对筛选之后的anchor进行处理,最后保存检测结果到resultsOfDetection中。
```
ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
//执行non maximum suppression消除冗余重叠boxes
std::vector<int> indices;
dnn::NMSBoxes(boxes, confidences, yolov7Parameter.confidenceThreshold, yolov7Parameter.nmsThreshold, indices);
for (size_t i = 0; i < indices.size(); ++i)
{
int idx = indices[i];
int classID=classIds[idx];
string className=classNames[classID];
float confidence=confidences[idx];
cv::Rect box = boxes[idx];
//保存每个最终预测anchor的坐标值、置信度分数、类别ID
ResultOfDetection result;
result.boundingBox=box;
result.confidence=confidence;// confidence
result.classID=classID; // label
result.className=className;
resultsOfDetection.push_back(result);
}
...
}
```
# YOLOV7检测器
本示例提供了YOLOV7模型的MIGraphX Python推理教程,主要介绍了模型推理过程,根据文档说明执行YOLOV7 Python示例可以得到目标检测结果。
## 模型简介
YOLOV7是2022年最新出现的一种YOLO系列目标检测模型,在论文 [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696)中提出。
<img src="./YOLOV7_01.png" alt="YOLOV7_02" style="zoom:67%;" />
## 预处理
待检测图像输入模型进行检测之前需要进行预处理,主要包括调整输入的尺寸,归一化等操作。
1. 转换数据排布为NCHW
2. 调整输入数据的尺寸为(1,3,640,640)
```python
def prepare_input(self, image):
self.img_height, self.img_width = image.shape[:2]
input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 调整图像的尺寸为YOLOV7的输入尺寸
input_img = cv2.resize(input_img, (self.inputWidth, self.inputHeight))
# 调整输入数据的维度
input_img = input_img.transpose(2, 0, 1)
# 拓展维度,增加batch维度
input_img = np.expand_dims(input_img, 0)
input_img = np.ascontiguousarray(input_img)
input_img = input_img.astype(np.float32)
# 归一化[0.0, 1.0]
input_img = input_img / 255
return input_img
```
其中模型输入的inputWidth、inputHeight通过migraphx对输入模型进行解析获取,代码位置见YOLOV7类初始化位置。
```
class YOLOv7:
def __init__(self, path, obj_thres=0.5, conf_thres=0.7, iou_thres=0.5):
self.objectThreshold = obj_thres
self.confThreshold = conf_thres
self.nmsThreshold = iou_thres
# 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines()))
# 解析推理模型
self.model = migraphx.parse_onnx(path)
# 获取模型的输入name
self.inputName = self.model.get_parameter_names()[0]
# 获取模型的输入尺寸
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3])
```
## 推理
输入图像预处理完成之后开始进行推理,首先需要利用migraphx进行编译,然后对输入数据进行前向计算得到模型的输出result,在detect函数中调用定义的process_output函数对result进行后处理,得到图片中含有物体的anchor坐标信息、类别置信度、类别ID。
```
def detect(self, image):
# 输入图片预处理
input_img = self.prepare_input(image)
# 模型编译
self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备
print("Success to compile")
# 执行推理
print("Start to inference")
start = time.time()
result = self.model.run({self.model.get_parameter_names()[0]: input_img})
print('net forward time: {:.4f}'.format(time.time() - start))
# 模型输出结果后处理
boxes, scores, class_ids = self.process_output(result)
return boxes, scores, class_ids
```
其中对migraphx推理输出result进行后处理,首先需要对生成的anchor根据是否有物体阈值objectThreshold、置信度阈值confThreshold进行筛选,相关过程定义在process_output函数中。获取筛选后的anchor的坐标信息之后,需要将坐标映射到原图中的位置,相关过程定义在rescale_boxes函数中。
```
def process_output(self, output):
predictions = np.squeeze(output[0])
# 筛选包含物体的anchor
obj_conf = predictions[:, 4]
predictions = predictions[obj_conf > self.objectThreshold]
obj_conf = obj_conf[obj_conf > self.objectThreshold]
# 筛选大于置信度阈值的anchor
predictions[:, 5:] *= obj_conf[:, np.newaxis]
scores = np.max(predictions[:, 5:], axis=1)
valid_scores = scores > self.confThreshold
predictions = predictions[valid_scores]
scores = scores[valid_scores]
# 获取最高置信度分数对应的类别ID
class_ids = np.argmax(predictions[:, 5:], axis=1)
# 获取每个物体对应的anchor
boxes = self.extract_boxes(predictions)
# 执行非极大值抑制消除冗余anchor
indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), self.confThreshold, self.nmsThreshold).flatten()
return boxes[indices], scores[indices], class_ids[indices]
def rescale_boxes(self, boxes):
# 对anchor尺寸进行变换
input_shape = np.array([self.inputWidth, self.inputHeight, self.inputWidth, self.inputHeight])
boxes = np.divide(boxes, input_shape, dtype=np.float32)
boxes *= np.array([self.img_width, self.img_height, self.img_width, self.img_height])
return boxes
```
根据获取的detect函数输出的boxes、scores、class_ids信息在原图进行结果可视化,包括绘制图像中检测到的物体位置、类别和置信度分数,得到最终的YOLOV7目标检测结果输出。
```
def draw_detections(self, image, boxes, scores, class_ids):
for box, score, class_id in zip(boxes, scores, class_ids):
cx, cy, w, h = box.astype(int)
# 绘制检测物体框
cv2.rectangle(image, (cx, cy), (cx + w, cy + h), (0, 255, 255), thickness=2)
label = self.classNames[class_id]
label = f'{label} {score:.2f}'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.putText(image, label, (cx, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), thickness=2)
return image
```
......@@ -13,7 +13,7 @@ class YOLOv7:
self.nmsThreshold = iou_thres
# 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/Detector/YOLOV7/coco.names', 'r').readlines()))
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines()))
# 解析推理模型
self.model = migraphx.parse_onnx(path)
......@@ -118,7 +118,7 @@ class YOLOv7:
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imgpath', type=str, default='../Resource/Images/bus.jpg', help="image path")
parser.add_argument('--modelpath', type=str, default='../Resource/Models/Detector/YOLOV7/yolov7-tiny.onnx',help="onnx filepath")
parser.add_argument('--modelpath', type=str, default='../Resource/Models/yolov7-tiny.onnx',help="onnx filepath")
parser.add_argument('--objectThreshold', default=0.5, type=float, help='class confidence')
parser.add_argument('--confThreshold', default=0.25, type=float, help='class confidence')
parser.add_argument('--nmsThreshold', default=0.5, type=float, help='nms iou thresh')
......
......@@ -8,7 +8,11 @@ YOLOV7是2022年最新出现的一种YOLO系列目标检测模型,在论文 [Y
YoloV7模型的网络结构包括三个部分:input、backbone和head。与yolov5不同的是,将neck层与head层合称为head层,实际上的功能是一样的。各个部分的功能和yolov5相同,如backbone用于提取特征,head用于预测。yolov7依旧基于anchor based的方法,同时在网络架构上增加E-ELAN层,并将REP层也加入进来,方便后续部署,同时在训练时,在head时,新增Aux_detect用于辅助检测。
## 构建安装
## python版本推理
下面介绍如何运行python代码示例,具体推理代码解析,在Doc/Tutorial_Python.md中有详细说明。
### 构建安装
在光源可拉取推理的docker镜像,YoloV7工程推荐的镜像如下:
......@@ -16,6 +20,36 @@ YoloV7模型的网络结构包括三个部分:input、backbone和head。与yol
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
```
### 推理示例
YoloV7模型的推理示例程序是YoloV7_infer_migraphx.py,使用如下命令运行该推理示例:
```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
# 运行程序
python YoloV7_infer_migraphx.py \
--imgpath 测试图像路径 \
--modelpath onnx模型路径 \
--objectThreshold 判断是否有物体阈值,默认0.5 \
--confThreshold 置信度阈值,默认0.25 \
--nmsThreshold nms阈值,默认0.5 \
```
程序运行结束会在当前目录生成YoloV7检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result_2" style="zoom: 50%;" />
## C++版本推理
下面介绍如何运行C++代码示例,具体推理代码解析,在Doc/Tutorial_Cpp.md目录中有详细说明。
参考Python版本推理中的构建安装,在光源中拉取推理的docker镜像。
### 安装Opencv依赖
```python
......@@ -59,43 +93,26 @@ export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib/:$LD_LIBRARY_PATH
source ~/.bashrc
```
## 推理
### 推理示例
### C++版本推理
成功编译YoloV7工程后,执行如下命令运行该示例:
成功编译YoloV7工程后,在build目录下输入如下命令运行该示例:
```
./MIGraphX_Samples 0
```
# 进入migraphx samples工程根目录
cd <path_to_migraphx_samples>
程序运行结束会在build目录生成YoloV7检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result" style="zoom:50%;" />
### python版本推理
YoloV7模型的推理示例程序是YoloV7_infer_migraphx.py,使用如下命令运行该推理示例:
# 进入build目录
cd ./build/
# 执行示例程序
./YOLOV7
```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
程序运行结束会在build目录生成YoloV7检测结果图像。
# 运行程序
python YoloV7_infer_migraphx.py \
--imgpath 测试图像路径 \
--modelpath onnx模型路径 \
--objectThreshold 判断是否有物体阈值,默认0.5 \
--confThreshold 置信度阈值,默认0.25 \
--nmsThreshold nms阈值,默认0.5 \
```
<img src="./Resource/Images/Result.jpg" alt="Result" style="zoom:50%;" />
程序运行结束会在当前目录生成YoloV7检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result_2" style="zoom: 50%;" />
## 历史版本
......
......@@ -3,8 +3,8 @@
<!--YOLOV7检测器 -->
<DetectorYOLOV7>
<ModelPath>"../Resource/Models/Detector/YOLOV7/yolov7-tiny.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/Detector/YOLOV7/coco.names"</ClassNameFile>
<ModelPath>"../Resource/Models/yolov7-tiny.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.25</ConfidenceThreshold>
......
#include <Sample.h>
#include <opencv2/dnn.hpp>
#include <SimpleLog.h>
#include <Filesystem.h>
#include <DetectorYOLOV7.h>
#include <fstream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
using namespace migraphx;
using namespace migraphxSamples;
void Sample_DetectorYOLOV7()
{
// 创建YOLOV7检测器
DetectorYOLOV7 detector;
InitializationParameterOfDetector initParamOfDetectorYOLOV7;
initParamOfDetectorYOLOV7.parentPath = "";
initParamOfDetectorYOLOV7.configFilePath = CONFIG_FILE;
initParamOfDetectorYOLOV7.logName = "";
ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV7);
if(errorCode!=SUCCESS)
{
LOG_ERROR(stdout, "fail to initialize detector!\n");
exit(-1);
}
LOG_INFO(stdout, "succeed to initialize detector\n");
// 读取测试图片
Mat srcImage=imread("../Resource/Images/bus.jpg",1);
// 推理
std::vector<ResultOfDetection> predictions;
double time1 = getTickCount();
detector.Detect(srcImage,predictions);
double time2 = getTickCount();
double elapsedTime = (time2 - time1)*1000 / getTickFrequency();
LOG_INFO(stdout, "inference time:%f ms\n", elapsedTime);
// 获取推理结果
LOG_INFO(stdout,"========== Detection Results ==========\n");
for(int i=0;i<predictions.size();++i)
{
ResultOfDetection result=predictions[i];
cv::rectangle(srcImage,result.boundingBox,Scalar(0,255,255),2);
string label = format("%.2f", result.confidence);
label = result.className + ":" + label;
int left = predictions[i].boundingBox.x;
int top = predictions[i].boundingBox.y;
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
top = max(top, labelSize.height);
cv::putText(srcImage, label, Point(left, top-10), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 255, 255), 2);
LOG_INFO(stdout,"box:%d %d %d %d,label:%d,confidence:%f\n",predictions[i].boundingBox.x,
predictions[i].boundingBox.y,predictions[i].boundingBox.width,predictions[i].boundingBox.height,predictions[i].classID,predictions[i].confidence);
}
imwrite("Result.jpg",srcImage);
LOG_INFO(stdout,"Detection results have been saved to ./Result.jpg\n");
}
\ No newline at end of file
// 示例程序
#ifndef __SAMPLE_H__
#define __SAMPLE_H__
// YOLOV7 sample
void Sample_DetectorYOLOV7();
#endif
\ No newline at end of file
// 常用数据类型和宏定义
// 常用定义
#ifndef __COMMON_DEFINITION_H__
#define __COMMON_DEFINITION_H__
#include <string>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
namespace migraphxSamples
{
......@@ -21,20 +17,7 @@ namespace migraphxSamples
#define CONFIG_FILE "../Resource/Configuration.xml"
typedef struct __Time
{
string year;
string month;
string day;
string hour;
string minute;
string second;
string millisecond; // ms
string microsecond; // us
string weekDay;
}_Time;
typedef enum _ErrorCode
typedef enum _ErrorCode
{
SUCCESS=0, // 0
MODEL_NOT_EXIST, // 模型不存在
......@@ -44,7 +27,7 @@ typedef enum _ErrorCode
IMAGE_ERROR, // 图像错误
}ErrorCode;
typedef struct _ResultOfPrediction
typedef struct _ResultOfPrediction
{
float confidence;
int label;
......@@ -52,24 +35,22 @@ typedef struct _ResultOfPrediction
}ResultOfPrediction;
typedef struct _ResultOfDetection
typedef struct _ResultOfDetection
{
Rect boundingBox;
cv::Rect boundingBox;
float confidence;
int classID;
string className;
std::string className;
bool exist;
_ResultOfDetection():confidence(0.0f),classID(0),exist(true){}
}ResultOfDetection;
typedef struct _InitializationParameterOfDetector
typedef struct _InitializationParameterOfDetector
{
std::string parentPath;
std::string configFilePath;
cv::Size inputSize;
std::string logName;
}InitializationParameterOfDetector;
}
......
#include <CommonUtility.h>
#include <assert.h>
#include <ctype.h>
#include <time.h>
#include <stdlib.h>
#include <algorithm>
#include <sstream>
#include <vector>
#ifdef _WIN32
#include <io.h>
#include <direct.h>
#include <Windows.h>
#else
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/time.h>
#endif
#include <SimpleLog.h>
namespace migraphxSamples
{
_Time GetCurrentTime3()
{
_Time currentTime;
#if (defined WIN32 || defined _WIN32)
SYSTEMTIME systemTime;
GetLocalTime(&systemTime);
char temp[8] = { 0 };
sprintf(temp, "%04d", systemTime.wYear);
currentTime.year=string(temp);
sprintf(temp, "%02d", systemTime.wMonth);
currentTime.month=string(temp);
sprintf(temp, "%02d", systemTime.wDay);
currentTime.day=string(temp);
sprintf(temp, "%02d", systemTime.wHour);
currentTime.hour=string(temp);
sprintf(temp, "%02d", systemTime.wMinute);
currentTime.minute=string(temp);
sprintf(temp, "%02d", systemTime.wSecond);
currentTime.second=string(temp);
sprintf(temp, "%03d", systemTime.wMilliseconds);
currentTime.millisecond=string(temp);
sprintf(temp, "%d", systemTime.wDayOfWeek);
currentTime.weekDay=string(temp);
#else
struct timeval tv;
struct tm *p;
gettimeofday(&tv, NULL);
p = localtime(&tv.tv_sec);
char temp[8]={0};
sprintf(temp,"%04d",1900+p->tm_year);
currentTime.year=string(temp);
sprintf(temp,"%02d",1+p->tm_mon);
currentTime.month=string(temp);
sprintf(temp,"%02d",p->tm_mday);
currentTime.day=string(temp);
sprintf(temp,"%02d",p->tm_hour);
currentTime.hour=string(temp);
sprintf(temp,"%02d",p->tm_min);
currentTime.minute=string(temp);
sprintf(temp,"%02d",p->tm_sec);
currentTime.second=string(temp);
sprintf(temp,"%03d",tv.tv_usec/1000);
currentTime.millisecond = string(temp);
sprintf(temp, "%03d", tv.tv_usec % 1000);
currentTime.microsecond = string(temp);
sprintf(temp, "%d", p->tm_wday);
currentTime.weekDay = string(temp);
#endif
return currentTime;
}
std::vector<std::string> SplitString(std::string str, std::string separator)
{
std::string::size_type pos;
std::vector<std::string> result;
str+=separator;//扩展字符串以方便操作
int size=str.size();
for(int i=0; i<size; i++)
{
pos=str.find(separator,i);
if(pos<size)
{
std::string s=str.substr(i,pos-i);
result.push_back(s);
i=pos+separator.size()-1;
}
}
return result;
}
bool CompareConfidence(const ResultOfDetection &L,const ResultOfDetection &R)
{
return L.confidence > R.confidence;
......@@ -109,7 +13,7 @@ bool CompareArea(const ResultOfDetection &L,const ResultOfDetection &R)
return L.boundingBox.area() > R.boundingBox.area();
}
void NMS(vector<ResultOfDetection> &detections, float IOUThreshold)
void NMS(std::vector<ResultOfDetection> &detections, float IOUThreshold)
{
// sort
std::sort(detections.begin(), detections.end(), CompareConfidence);
......
......@@ -3,23 +3,16 @@
#ifndef __COMMON_UTILITY_H__
#define __COMMON_UTILITY_H__
#include <mutex>
#include <string>
#include <vector>
#include <CommonDefinition.h>
using namespace std;
namespace migraphxSamples
{
// 分割字符串
std::vector<std::string> SplitString(std::string str,std::string separator);
// 排序规则: 按照置信度或者按照面积排序
bool CompareConfidence(const ResultOfDetection &L,const ResultOfDetection &R);
bool CompareArea(const ResultOfDetection &L,const ResultOfDetection &R);
// 非极大抑制
void NMS(std::vector<ResultOfDetection> &detections, float IOUThreshold);
}
......
......@@ -11,12 +11,7 @@
#include <unistd.h>
#include <dirent.h>
#endif
#include <CommonUtility.h>
#include <opencv2/opencv.hpp>
#include <SimpleLog.h>
using namespace cv;
// 路径分隔符(Linux:‘/’,Windows:’\\’)
#ifdef _WIN32
#define PATH_SEPARATOR '\\'
......@@ -24,9 +19,31 @@ using namespace cv;
#define PATH_SEPARATOR '/'
#endif
using namespace std;
namespace migraphxSamples
{
static std::vector<std::string> SplitString(std::string str, std::string separator)
{
std::string::size_type pos;
std::vector<std::string> result;
str+=separator;//扩展字符串以方便操作
int size=str.size();
for(int i=0; i<size; i++)
{
pos=str.find(separator,i);
if(pos<size)
{
std::string s=str.substr(i,pos-i);
result.push_back(s);
i=pos+separator.size()-1;
}
}
return result;
}
#if defined _WIN32 || defined WINCE
const char dir_separators[] = "/\\";
......@@ -293,7 +310,7 @@ namespace migraphxSamples
}
else
{
LOG_INFO(stdout, "could not open directory: %s", directory.c_str());
printf("could not open directory: %s", directory.c_str());
}
}
......@@ -390,7 +407,7 @@ namespace migraphxSamples
#endif
if (!result)
{
LOG_INFO(stdout, "can't remove directory: %s\n", path.c_str());
printf("can't remove directory: %s\n", path.c_str());
}
}
else
......@@ -402,7 +419,7 @@ namespace migraphxSamples
#endif
if (!result)
{
LOG_INFO(stdout, "can't remove file: %s\n", path.c_str());
printf("can't remove file: %s\n", path.c_str());
}
}
}
......@@ -438,7 +455,7 @@ namespace migraphxSamples
{
RemoveAll(path);
++numberOfFiles;
LOG_INFO(stdout, "%s deleted! number of deleted files:%d\n", path.c_str(), numberOfFiles);
printf("%s deleted! number of deleted files:%d\n", path.c_str(), numberOfFiles);
}
}
......@@ -452,7 +469,7 @@ namespace migraphxSamples
}
else
{
LOG_INFO(stdout, "could not open directory: %s", directory.c_str());
printf("could not open directory: %s", directory.c_str());
}
// ����RemoveAllɾ��Ŀ¼
......@@ -592,17 +609,17 @@ namespace migraphxSamples
if(!srcFile.is_open())
{
LOG_ERROR(stdout,"can not open %s\n",srcPath.c_str());
printf("can not open %s\n",srcPath.c_str());
return false;
}
if(!dstFile.is_open())
{
LOG_ERROR(stdout, "can not open %s\n", dstPath.c_str());
printf("can not open %s\n", dstPath.c_str());
return false;
}
if(srcPath==dstPath)
{
LOG_ERROR(stdout, "src can not be same with dst\n");
printf("src can not be same with dst\n");
return false;
}
char buffer[2048];
......@@ -622,7 +639,7 @@ namespace migraphxSamples
{
if(srcPath==dstPath)
{
LOG_ERROR(stdout, "src can not be same with dst\n");
printf("src can not be same with dst\n");
return false;
}
......@@ -662,9 +679,9 @@ namespace migraphxSamples
// process
double process = (1.0*(i + 1) / fileNameList.size()) * 100;
LOG_INFO(stdout, "%s done! %f% \n", GetFileName(fileNameList[i]).c_str(), process);
printf("%s done! %f% \n", GetFileName(fileNameList[i]).c_str(), process);
}
LOG_INFO(stdout, "all done!(the number of files:%d)\n", fileNameList.size());
printf("all done!(the number of files:%d)\n", fileNameList.size());
return true;
......
......@@ -3,10 +3,8 @@
#ifndef __FILE_SYSTEM_H__
#define __FILE_SYSTEM_H__
#include <vector>
#include <string>
using namespace std;
#include <vector>
namespace migraphxSamples
{
......@@ -21,7 +19,7 @@ bool IsDirectory(const std::string &path);
bool IsPathSeparator(char c);
// 路径拼接
string JoinPath(const std::string &base, const std::string &path);
std::string JoinPath(const std::string &base, const std::string &path);
// 创建多级目录,注意:创建多级目录的时候,目标目录是不能有文件存在的
bool CreateDirectories(const std::string &directoryPath);
......@@ -49,14 +47,13 @@ void Remove(const std::string &directory, const std::string &extension="");
/** 获取路径的文件名和扩展名
*
* 示例:path为D:/1/1.txt,则GetFileName()为1.txt,GetFileName_NoExtension()为1,GetExtension()为.txt,GetParentPath()为D:/1/
*/
string GetFileName(const std::string &path); // 1.txt
string GetFileName_NoExtension(const std::string &path); // 1
string GetExtension(const std::string &path);// .txt
string GetParentPath(const std::string &path);// D:/1/
std::string GetFileName(const std::string &path);
std::string GetFileName_NoExtension(const std::string &path);
std::string GetExtension(const std::string &path);
std::string GetParentPath(const std::string &path);
// 拷贝文件:CopyFile("D:/1.txt","D:/2.txt");将1.txt拷贝为2.txt
// 拷贝文件
bool CopyFile(const std::string srcPath,const std::string dstPath);
/** 拷贝目录
......
......@@ -19,7 +19,7 @@ using namespace std;
/** 简易日志
*
* 轻量级日志系统,不依赖于其他第三方库,只需要包含一个头文件就可以使用。提供了4种日志级别,包括INFO,DEBUG,WARN和ERROR。
* 不依赖于其他第三方库,只需要包含一个头文件就可以使用。提供了4种日志级别,包括INFO,DEBUG,WARN和ERROR。
*
* 示例1:
// 初始化日志,在./Log/目录下创建两个日志文件log1.log和log2.log(注意:目录./Log/需要存在,否则日志创建失败)
......
#include <DetectorYOLOV7.h>
#include <YOLOV7.h>
#include <migraphx/onnx.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp>
#include <opencv2/dnn.hpp>
#include <CommonUtility.h>
#include <Filesystem.h>
#include <SimpleLog.h>
using namespace cv::dnn;
namespace migraphxSamples
{
DetectorYOLOV7::DetectorYOLOV7():logFile(NULL)
DetectorYOLOV7::DetectorYOLOV7()
{
}
......@@ -28,19 +24,24 @@ DetectorYOLOV7::~DetectorYOLOV7()
ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializationParameterOfDetector)
{
// 初始化(获取日志文件,加载配置文件等)
ErrorCode errorCode=DoCommonInitialization(initializationParameterOfDetector);
if(errorCode!=SUCCESS)
// 读取配置文件
std::string configFilePath=initializationParameterOfDetector.configFilePath;
if(Exists(configFilePath)==false)
{
LOG_ERROR(logFile,"fail to DoCommonInitialization\n");
return errorCode;
LOG_ERROR(stdout, "no configuration file!\n");
return CONFIG_FILE_NOT_EXIST;
}
if(!configurationFile.open(configFilePath, cv::FileStorage::READ))
{
LOG_ERROR(stdout, "fail to open configuration file\n");
return FAIL_TO_OPEN_CONFIG_FILE;
}
LOG_INFO(logFile,"succeed to DoCommonInitialization\n");
LOG_INFO(stdout, "succeed to open configuration file\n");
// 获取配置文件参数
FileNode netNode = configurationFile["DetectorYOLOV7"];
string modelPath=initializationParameter.parentPath+(string)netNode["ModelPath"];
string pathOfClassNameFile=(string)netNode["ClassNameFile"];
cv::FileNode netNode = configurationFile["DetectorYOLOV7"];
std::string modelPath=(std::string)netNode["ModelPath"];
std::string pathOfClassNameFile=(std::string)netNode["ClassNameFile"];
yolov7Parameter.confidenceThreshold = (float)netNode["ConfidenceThreshold"];
yolov7Parameter.nmsThreshold = (float)netNode["NMSThreshold"];
yolov7Parameter.objectThreshold = (float)netNode["ObjectThreshold"];
......@@ -50,17 +51,21 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ
// 加载模型
if(Exists(modelPath)==false)
{
LOG_ERROR(logFile,"%s not exist!\n",modelPath.c_str());
LOG_ERROR(stdout,"%s not exist!\n",modelPath.c_str());
return MODEL_NOT_EXIST;
}
net = migraphx::parse_onnx(modelPath);
LOG_INFO(logFile,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性
std::pair<std::string, migraphx::shape> inputAttribute=*(net.get_parameter_shapes().begin());
inputName=inputAttribute.first;
inputShape=inputAttribute.second;
inputSize=cv::Size(inputShape.lens()[3],inputShape.lens()[2]);
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes();
inputName=inputMap.begin()->first;
inputShape=inputMap.begin()->second;
int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{};
......@@ -74,20 +79,20 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ
// 编译模型
migraphx::compile_options options;
options.device_id=0; // 设置GPU设备,默认为0号设备
options.offload_copy=true; // 设置offload_copy
options.offload_copy=true;
net.compile(gpuTarget,options);
LOG_INFO(logFile,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
LOG_INFO(stdout,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
// Run once by itself
migraphx::parameter_map inputData;
inputData[inputName]=migraphx::generate_argument(inputShape);
// warm up
std::unordered_map<std::string, migraphx::argument> inputData;
inputData[inputName]=migraphx::argument{inputShape};
net.eval(inputData);
// 读取类别名
if(!pathOfClassNameFile.empty())
{
ifstream classNameFile(pathOfClassNameFile);
string line;
std::ifstream classNameFile(pathOfClassNameFile);
std::string line;
while (getline(classNameFile, line))
{
classNames.push_back(line);
......@@ -99,12 +104,12 @@ ErrorCode DetectorYOLOV7::Initialize(InitializationParameterOfDetector initializ
}
// log
LOG_INFO(logFile,"InputSize:%dx%d\n",inputSize.width,inputSize.height);
LOG_INFO(logFile,"InputName:%s\n",inputName.c_str());
LOG_INFO(logFile,"ConfidenceThreshold:%f\n",yolov7Parameter.confidenceThreshold);
LOG_INFO(logFile,"objectThreshold:%f\n",yolov7Parameter.objectThreshold);
LOG_INFO(logFile,"NMSThreshold:%f\n",yolov7Parameter.nmsThreshold);
LOG_INFO(logFile,"NumberOfClasses:%d\n",yolov7Parameter.numberOfClasses);
LOG_INFO(stdout,"InputSize:%dx%d\n",inputSize.width,inputSize.height);
LOG_INFO(stdout,"InputName:%s\n",inputName.c_str());
LOG_INFO(stdout,"ConfidenceThreshold:%f\n",yolov7Parameter.confidenceThreshold);
LOG_INFO(stdout,"objectThreshold:%f\n",yolov7Parameter.objectThreshold);
LOG_INFO(stdout,"NMSThreshold:%f\n",yolov7Parameter.nmsThreshold);
LOG_INFO(stdout,"NumberOfClasses:%d\n",yolov7Parameter.numberOfClasses);
return SUCCESS;
}
......@@ -113,22 +118,22 @@ ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
{
if(srcImage.empty()||srcImage.type()!=CV_8UC3)
{
LOG_ERROR(logFile, "image error!\n");
LOG_ERROR(stdout, "image error!\n");
return IMAGE_ERROR;
}
// 预处理并转换为NCHW
// 数据预处理并转换为NCHW格式
cv::Mat inputBlob;
blobFromImage(srcImage,
cv::dnn::blobFromImage(srcImage,
inputBlob,
1 / 255.0,
inputSize,
Scalar(0, 0, 0),
cv::Scalar(0, 0, 0),
true,
false);
// 输入数据
migraphx::parameter_map inputData;
// 创建输入数据
std::unordered_map<std::string, migraphx::argument> inputData;
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
// 推理
......@@ -192,12 +197,12 @@ ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
//执行non maximum suppression消除冗余重叠boxes
std::vector<int> indices;
dnn::NMSBoxes(boxes, confidences, yolov7Parameter.confidenceThreshold, yolov7Parameter.nmsThreshold, indices);
cv::dnn::NMSBoxes(boxes, confidences, yolov7Parameter.confidenceThreshold, yolov7Parameter.nmsThreshold, indices);
for (size_t i = 0; i < indices.size(); ++i)
{
int idx = indices[i];
int classID=classIds[idx];
string className=classNames[classID];
std::string className=classNames[classID];
float confidence=confidences[idx];
cv::Rect box = boxes[idx];
......@@ -212,39 +217,4 @@ ErrorCode DetectorYOLOV7::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
return SUCCESS;
}
ErrorCode DetectorYOLOV7::DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector)
{
initializationParameter=initializationParameterOfDetector;
// 获取日志文件
logFile=LogManager::GetInstance()->GetLogFile(initializationParameter.logName);
// 加载配置文件
std::string configFilePath=initializationParameter.configFilePath;
if(!Exists(configFilePath))
{
LOG_ERROR(logFile, "no configuration file!\n");
return CONFIG_FILE_NOT_EXIST;
}
if(!configurationFile.open(configFilePath, FileStorage::READ))
{
LOG_ERROR(logFile, "fail to open configuration file\n");
return FAIL_TO_OPEN_CONFIG_FILE;
}
LOG_INFO(logFile, "succeed to open configuration file\n");
// 修改父路径
std::string &parentPath = initializationParameter.parentPath;
if (!parentPath.empty())
{
if(!IsPathSeparator(parentPath[parentPath.size() - 1]))
{
parentPath+=PATH_SEPARATOR;
}
}
return SUCCESS;
}
}
#ifndef __DETECTOR_YOLOV7_H__
#define __DETECTOR_YOLOV7_H__
#include <string>
#include <migraphx/program.hpp>
#include <opencv2/opencv.hpp>
#include <CommonDefinition.h>
using namespace std;
using namespace cv;
using namespace migraphx;
#include <CommonDefinition.h>
namespace migraphxSamples
{
......@@ -33,22 +28,16 @@ public:
ErrorCode Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection);
private:
ErrorCode DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector);
private:
cv::FileStorage configurationFile;
InitializationParameterOfDetector initializationParameter;
FILE *logFile;
migraphx::program net;
cv::Size inputSize;
string inputName;
std::string inputName;
migraphx::shape inputShape;
bool useFP16;
vector<string> classNames;
std::vector<std::string> classNames;
YOLOV7Parameter yolov7Parameter;
};
......
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <Sample.h>
#include <SimpleLog.h>
#include <Filesystem.h>
#include <YOLOV7.h>
void MIGraphXSamplesUsage(char* programName)
int main()
{
printf("Usage : %s <index> \n", programName);
printf("index:\n");
printf("\t 0) YOLOV7 sample.\n");
}
int main(int argc, char *argv[])
{
if (argc < 2 || argc > 2)
// 创建YOLOV7检测器
migraphxSamples::DetectorYOLOV7 detector;
migraphxSamples::InitializationParameterOfDetector initParamOfDetectorYOLOV7;
initParamOfDetectorYOLOV7.configFilePath = CONFIG_FILE;
migraphxSamples::ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV7);
if(errorCode!=migraphxSamples::SUCCESS)
{
MIGraphXSamplesUsage(argv[0]);
return -1;
LOG_ERROR(stdout, "fail to initialize detector!\n");
exit(-1);
}
if (!strncmp(argv[1], "-h", 2))
{
MIGraphXSamplesUsage(argv[0]);
return 0;
}
switch (*argv[1])
LOG_INFO(stdout, "succeed to initialize detector\n");
// 读取测试图片
cv::Mat srcImage=cv::imread("../Resource/Images/bus.jpg",1);
// 推理
std::vector<migraphxSamples::ResultOfDetection> predictions;
double time1 = cv::getTickCount();
detector.Detect(srcImage,predictions);
double time2 = cv::getTickCount();
double elapsedTime = (time2 - time1)*1000 / cv::getTickFrequency();
LOG_INFO(stdout, "inference time:%f ms\n", elapsedTime);
// 获取推理结果
LOG_INFO(stdout,"========== Detection Results ==========\n");
for(int i=0;i<predictions.size();++i)
{
case '0':
{
Sample_DetectorYOLOV7();
break;
}
default :
{
MIGraphXSamplesUsage(argv[0]);
break;
}
migraphxSamples::ResultOfDetection result=predictions[i];
cv::rectangle(srcImage,result.boundingBox,cv::Scalar(0,255,255),2);
LOG_INFO(stdout,"box:%d %d %d %d,label:%d,confidence:%f\n",predictions[i].boundingBox.x,
predictions[i].boundingBox.y,predictions[i].boundingBox.width,predictions[i].boundingBox.height,predictions[i].classID,predictions[i].confidence);
}
cv::imwrite("Result.jpg",srcImage);
LOG_INFO(stdout,"Detection results have been saved to ./Result.jpg\n");
return 0;
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment