Commit 10cdc5d7 authored by liucong's avatar liucong
Browse files

精简代码

parent bfc5da30
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
cmake_minimum_required(VERSION 3.5) cmake_minimum_required(VERSION 3.5)
# 设置项目名 # 设置项目名
project(MIGraphX_Samples) project(YOLOV5)
# 设置编译器 # 设置编译器
set(CMAKE_CXX_COMPILER g++) set(CMAKE_CXX_COMPILER g++)
...@@ -12,7 +12,6 @@ set(CMAKE_BUILD_TYPE release) ...@@ -12,7 +12,6 @@ set(CMAKE_BUILD_TYPE release)
# 添加头文件路径 # 添加头文件路径
set(INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Src/ set(INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Src/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/ ${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Detector/
$ENV{DTKROOT}/include/ $ENV{DTKROOT}/include/
${CMAKE_CURRENT_SOURCE_DIR}/depend/include/) ${CMAKE_CURRENT_SOURCE_DIR}/depend/include/)
include_directories(${INCLUDE_PATH}) include_directories(${INCLUDE_PATH})
...@@ -37,10 +36,9 @@ link_libraries(${LIBRARY}) ...@@ -37,10 +36,9 @@ link_libraries(${LIBRARY})
# 添加源文件 # 添加源文件
set(SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/Src/main.cpp set(SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/Src/main.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Sample.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Src/YOLOV5.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Detector/DetectorYOLOV5.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/CommonUtility.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/CommonUtility.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/Filesystem.cpp) ${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/Filesystem.cpp)
# 添加可执行目标 # 添加可执行目标
add_executable(MIGraphX_Samples ${SOURCE_FILES}) add_executable(YOLOV5 ${SOURCE_FILES})
# YOLOV5检测器
YOLOV5模型是目前工业界使用较多的算法,官方提供了多个不同版本的预训练模型,本份文档主要介绍了如何将MIGraphX推理结果进行模型初始化、预处理以及后处理,该示例推理流程对YOLOV5其他版本的模型同样适用。
## 模型简介
YOLOV5是一种单阶段目标检测算法,该算法在YOLOV4的基础上添加了一些新的改进思路,使其速度与精度都得到了极大的性能提升。具体包括:输入端的Mosaic数据增强、自适应锚框计算、自适应图片缩放操作;主干网络的Focus结构与CSP结构;Neck端的FPN+PAN结构;输出端的损失函数GIOU_Loss以及预测框筛选的DIOU_nms。网络结构如图所示。
<img src=./YOLOV5_01.jpg style="zoom:100%;" align=middle>
## 检测器参数设置
samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示YOLOV5检测器的参数,相关参数主要依据官方推理示例进行设置。各个参数含义如下:
- ModelPath:yolov5模型存放路径
- ClassNameFile:coco数据集类别文件存放路径
- UseFP16:是否使用FP16推理模式
- NumberOfClasses:检测类别数量
- ConfidenceThreshold:置信度阈值,用于判断anchor内的物体是否为正样本
- NMSThreshold:非极大值抑制阈值,用于消除重复框
- ObjectThreshold:用于判断anchor内部是否有物体
```
<ModelPath>"../Resource/Models/YOLOV5s.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.25</ConfidenceThreshold>
<NMSThreshold>0.5</NMSThreshold>
<ObjectThreshold>0.5</ObjectThreshold>
```
## 模型初始化
模型初始化首先通过parse_onnx()函数加载YOLOV5的onnx模型,并可以通过program的get_parameter_shapes()函数获取网络的输入属性。完成模型加载之后需要使用compile()方法编译模型,编译模式使用migraphx::gpu::target{}设为GPU模式,编译过程主要基于MIGraphX IR完成各种优化。同时如果需要使用低精度量化进行推理,可以使用quantize_fp16()函数实现。
```
ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector)
{
...
//模型加载
net = migraphx::parse_onnx(modelPath);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性
std::pair<std::string, migraphx::shape> inputAttribute=*(net.get_parameter_shapes().begin());
inputName=inputAttribute.first;
inputShape=inputAttribute.second;
int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{};
// 量化
if(useFP16)
{
migraphx::quantize_fp16(net);
}
// 编译模型
migraphx::compile_options options;
options.device_id=0; // 设置GPU设备,默认为0号设备
options.offload_copy=true; // 设置offload_copy
net.compile(gpuTarget,options);
LOG_INFO(stdout,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
...
}
```
## 预处理
在将数据输入到模型之前,需要对图像做如下预处理操作:
1. 转换数据排布为NCHW
2. 归一化[0.0, 1.0]
3. 将输入数据的尺寸变换到YOLOV5输入大小(1,3,608,608)
```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
// 预处理并转换为NCHW
cv::Mat inputBlob;
blobFromImage(srcImage, // 输入数据
inputBlob, // 输出数据
1 / 255.0, //归一化
inputSize, //YOLOV5输入尺寸,本示例为608x608
Scalar(0, 0, 0), //未减去均值
true, //转换RB通道
false);
...
}
```
## 推理
完成图像预处理以及YOLOV5目标检测相关参数设置之后开始执行推理,利用migraphx推理计算得到YOLOV5模型的输出。
```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
// 创建输入数据
std::unordered_map<std::string, migraphx::shape> inputData;
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
// 推理
std::vector<migraphx::argument> inferenceResults = net.eval(inputData);
// 获取推理结果
std::vector<cv::Mat> outs;
migraphx::argument result = inferenceResults[0];
// 转换为cv::Mat
migraphx::shape outputShape = result.get_shape();
int shape[]={outputShape.lens()[0],outputShape.lens()[1],outputShape.lens()[2]};
cv::Mat out(3,shape,CV_32F);
memcpy(out.data,result.data(),sizeof(float)*outputShape.elements());
outs.push_back(out);
...
}
```
YOLOV5的MIGraphX推理结果inferenceResults是一个std::vector< migraphx::argument >类型,YOLOV5的onnx模型包含一个输出,所以result等于inferenceResults[0],result包含三个维度:outputShape.lens()[0]=1表示batch信息,outputShape.lens()[1]=22743表示生成anchor数量,outputShape.lens()[2]=85表示对每个anchor的预测信息。同时可将85拆分为4+1+80,前4个参数用于判断每一个特征点的回归参数,回归参数调整后可以获得预测框,第5个参数用于判断每一个特征点是否包含物体,最后80个参数用于判断每一个特征点所包含的物体种类。获取上述信息之后进行anchors筛选,筛选过程分为两个步骤:
- 第一步根据objectThreshold阈值进行筛选,大于该阈值则判断当前anchor内包含物体,小于该阈值则判断无物体
- 第二步根据confidenceThreshold阈值进行筛选,当满足第一步阈值anchor的最大置信度得分maxClassScore大于该阈值,则进一步获取当前anchor的坐标信息和预测物体类别信息,小于该阈值则不做处理。
```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
//获取先验框的个数numProposal=22743
numProposal = outs[0].size[1];
//每个anchor的预测信息数量numOut=85
numOut = outs[0].size[2];
outs[0] = outs[0].reshape(0, numProposal);
std::vector<float> confidences;
std::vector<cv::Rect> boxes;
std::vector<int> classIds;
//原图尺寸与模型输入尺寸的缩放比例
float ratioh = (float)srcImage.rows / inputSize.height, ratiow = (float)srcImage.cols / inputSize.width;
//计算cx,cy,w,h,box_sore,class_sore
int n = 0, rowInd = 0;
float* pdata = (float*)outs[0].data;
for (n = 0; n < numProposal; n++)
{
//获取当前anchor是否包含物体的概率值
float boxScores = pdata[4];
//第一次筛选,判断anchor内是否包含物体
if (boxScores > yolov5Parameter.objectThreshold)
{
//获取每个anchor内部预测的80个类别概率信息
cv::Mat scores = outs[0].row(rowInd).colRange(5, numOut);
cv::Point classIdPoint;
double maxClassScore;
//获取80个类别中最大概率值和对应的类别ID
cv::minMaxLoc(scores, 0, &maxClassScore, 0, &classIdPoint);
maxClassScore *= boxScores;
//第二次筛选,判断当前anchor的最大置信度得分是否满足阈值
if (maxClassScore > yolov5Parameter.confidenceThreshold)
{
const int classIdx = classIdPoint.x;
//将每个anchor坐标按缩放比例映射到原图
float cx = pdata[0] * ratiow;
float cy = pdata[1] * ratioh;
float w = pdata[2] * ratiow;
float h = pdata[3] * ratioh;
//获取anchor的左上角坐标
int left = int(cx - 0.5 * w);
int top = int(cy - 0.5 * h);
confidences.push_back((float)maxClassScore);
boxes.push_back(cv::Rect(left, top, (int)(w), (int)(h)));
classIds.push_back(classIdx);
}
}
rowInd++;
pdata += numOut;
}
...
}
```
为了消除重叠锚框,输出最终的YOLOV5目标检测结果,执行非极大值抑制对筛选之后的anchor进行处理,最后保存检测结果到resultsOfDetection中。
```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection)
{
...
// 执行non maximum suppression消除冗余重叠boxes
std::vector<int> indices;
cv::dnn::NMSBoxes(boxes, confidences, yolov5Parameter.confidenceThreshold, yolov5Parameter.nmsThreshold, indices);
for (size_t i = 0; i < indices.size(); ++i)
{
int idx = indices[i];
int classID=classIds[idx];
string className=classNames[classID];
float confidence=confidences[idx];
cv::Rect box = boxes[idx];
//保存每个最终预测anchor的坐标值、置信度分数、类别ID
ResultOfDetection result;
result.boundingBox=box;
result.confidence=confidence;// confidence
result.classID=classID; // label
result.className=className;
resultsOfDetection.push_back(result);
}
...
}
```
# YOLOV5检测器
本份文档主要介绍如何基于MIGraphX构建YOLOV5的Python推理示例,根据文档描述可以了解怎样运行该Python示例得到YOLOV5的目标检测结果。
## 模型简介
YOLOV5是一种单阶段目标检测算法,该算法在YOLOV4的基础上添加了一些新的改进思路,使其速度与精度都得到了极大的性能提升。具体包括:输入端的Mosaic数据增强、自适应锚框计算、自适应图像缩放操作;主干网络的Focus结构与CSP结构;Neck端的FPN+PAN结构;输出端的损失函数GIOU_Loss以及预测框筛选的DIOU_nms。网络结构如图所示。
<img src=./YOLOV5_01.jpg style="zoom:100%;" align=middle>
## 预处理
待检测图像输入模型进行检测之前需要进行预处理,主要包括调整输入的尺寸,归一化等操作。
1. 转换数据排布为NCHW
2. 归一化[0.0, 1.0]
3. 调整输入数据的尺寸为(1,3,608,608)
```
def prepare_input(self, image):
self.img_height, self.img_width = image.shape[:2]
input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 调整图像的尺寸
input_img = cv2.resize(input_img, (self.inputWidth, self.inputHeight))
# 维度转换HWC->CHW
input_img = input_img.transpose(2, 0, 1)
# 维度拓展,增加batch维度
input_img = np.expand_dims(input_img, 0)
input_img = np.ascontiguousarray(input_img)
input_img = input_img.astype(np.float32)
# 归一化
input_img = input_img / 255
return input_img
```
其中模型输入的inputWidth、inputHeight通过migraphx对输入模型进行解析获取,代码位置见YOLOV5类初始化位置。
```
class YOLOv5:
def __init__(self, path, obj_thres=0.5, conf_thres=0.25, iou_thres=0.5):
self.objectThreshold = obj_thres
self.confThreshold = conf_thres
self.nmsThreshold = iou_thres
# 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines()))
# 解析推理模型
self.model = migraphx.parse_onnx(path)
# 获取模型的输入name
self.inputName = self.model.get_parameter_names()[0]
# 获取模型的输入尺寸
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
self.inputHeight = int(inputShape[2])
self.inputWidth = int(inputShape[3])
```
## 推理
输入图像预处理完成之后开始进行推理,首先需要利用migraphx进行编译,然后对输入数据进行前向计算得到模型的输出result,在detect函数中调用定义的process_output函数对result进行后处理,得到图像中含有物体的anchor坐标信息、类别置信度、类别ID。
```
def detect(self, image):
# 输入图像预处理
input_img = self.prepare_input(image)
# 模型编译
self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备
print("Success to compile")
# 执行推理
print("Start to inference")
start = time.time()
result = self.model.run({self.model.get_parameter_names()[0]: input_img})
print('net forward time: {:.4f}'.format(time.time() - start))
# 模型输出结果后处理
boxes, scores, class_ids = self.process_output(result)
return boxes, scores, class_ids
```
其中对migraphx推理输出result进行后处理,首先需要对生成的anchor根据是否有物体阈值objectThreshold、置信度阈值confThreshold进行筛选,相关过程定义在process_output函数中。获取筛选后的anchor的坐标信息之后,需要将坐标映射到原图中的位置,相关过程定义在rescale_boxes函数中。
```
def process_output(self, output):
predictions = np.squeeze(output[0])
# 筛选包含物体的anchor
obj_conf = predictions[:, 4]
predictions = predictions[obj_conf > self.objectThreshold]
obj_conf = obj_conf[obj_conf > self.objectThreshold]
# 筛选大于置信度阈值的anchor
predictions[:, 5:] *= obj_conf[:, np.newaxis]
scores = np.max(predictions[:, 5:], axis=1)
valid_scores = scores > self.confThreshold
predictions = predictions[valid_scores]
scores = scores[valid_scores]
# 获取最高置信度分数对应的类别ID
class_ids = np.argmax(predictions[:, 5:], axis=1)
# 获取每个物体对应的anchor
boxes = self.extract_boxes(predictions)
# 执行非极大值抑制消除冗余anchor
indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), self.confThreshold, self.nmsThreshold).flatten()
return boxes[indices], scores[indices], class_ids[indices]
def rescale_boxes(self, boxes):
# 对anchor尺寸进行变换
input_shape = np.array([self.inputWidth, self.inputHeight, self.inputWidth, self.inputHeight])
boxes = np.divide(boxes, input_shape, dtype=np.float32)
boxes *= np.array([self.img_width, self.img_height, self.img_width, self.img_height])
return boxes
```
根据获取的detect函数输出的boxes、scores、class_ids信息在原图进行结果可视化,包括绘制图像中检测到的物体位置、类别和置信度分数,得到最终的YOLOV5目标检测结果输出。
```
def draw_detections(self, image, boxes, scores, class_ids):
for box, score, class_id in zip(boxes, scores, class_ids):
cx, cy, w, h = box.astype(int)
# 绘制检测物体框
cv2.rectangle(image, (cx, cy), (cx + w, cy + h), (0, 255, 255), thickness=2)
label = self.classNames[class_id]
label = f'{label} {int(score * 100)}%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.putText(image, label, (cx, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), thickness=2)
return image
```
...@@ -14,7 +14,7 @@ class YOLOv5: ...@@ -14,7 +14,7 @@ class YOLOv5:
self.nmsThreshold = iou_thres self.nmsThreshold = iou_thres
# 获取模型检测的类别信息 # 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/Detector/YOLOV5/coco.names', 'r').readlines())) self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines()))
# 解析推理模型 # 解析推理模型
self.model = migraphx.parse_onnx(path) self.model = migraphx.parse_onnx(path)
...@@ -119,7 +119,7 @@ class YOLOv5: ...@@ -119,7 +119,7 @@ class YOLOv5:
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--imgpath', type=str, default='../Resource/Images/bus.jpg', help="image path") parser.add_argument('--imgpath', type=str, default='../Resource/Images/bus.jpg', help="image path")
parser.add_argument('--modelpath', type=str, default='../Resource/Models/Detector/YOLOV5/yolov5s.onnx', help="onnx filepath") parser.add_argument('--modelpath', type=str, default='../Resource/Models/yolov5s.onnx', help="onnx filepath")
parser.add_argument('--objectThreshold', default=0.5, type=float, help='class confidence') parser.add_argument('--objectThreshold', default=0.5, type=float, help='class confidence')
parser.add_argument('--confThreshold', default=0.25, type=float, help='class confidence') parser.add_argument('--confThreshold', default=0.25, type=float, help='class confidence')
parser.add_argument('--nmsThreshold', default=0.5, type=float, help='nms iou thresh') parser.add_argument('--nmsThreshold', default=0.5, type=float, help='nms iou thresh')
......
...@@ -13,7 +13,11 @@ YoloV5模型的主要改进思路有以下几点: ...@@ -13,7 +13,11 @@ YoloV5模型的主要改进思路有以下几点:
- Neck端的FPN+PAN结构; - Neck端的FPN+PAN结构;
- 输出端的损失函数GIOU_Loss以及预测框筛选的DIOU_nms。 - 输出端的损失函数GIOU_Loss以及预测框筛选的DIOU_nms。
## 构建安装 ## python版本推理
下面介绍如何运行python代码示例,具体推理代码解析,在Doc/Tutorial_Python.md中有详细说明。
### 构建安装
在光源可拉取推理的docker镜像,YoloV5工程推荐的镜像如下: 在光源可拉取推理的docker镜像,YoloV5工程推荐的镜像如下:
...@@ -21,6 +25,36 @@ YoloV5模型的主要改进思路有以下几点: ...@@ -21,6 +25,36 @@ YoloV5模型的主要改进思路有以下几点:
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1 docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
``` ```
### 推理示例
YoloV5模型的推理示例程序是YoloV5_infer_migraphx.py,使用如下命令运行该推理示例:
```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
# 运行程序
python YoloV5_infer_migraphx.py \
--imgpath 测试图像路径 \
--modelpath onnx模型路径 \
--objectThreshold 判断是否有物体阈值,默认0.5 \
--confThreshold 置信度阈值,默认0.25 \
--nmsThreshold nms阈值,默认0.5 \
```
程序运行结束会在当前目录生成YoloV5检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result_2" style="zoom: 50%;" />
## C++版本推理
下面介绍如何运行C++代码示例,具体推理代码解析,在Doc/Tutorial_Cpp.md目录中有详细说明。
参考Python版本推理中的构建安装,在光源中拉取推理的docker镜像。
### 安装Opencv依赖 ### 安装Opencv依赖
```python ```python
...@@ -64,43 +98,24 @@ export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib/:$LD_LIBRARY_PATH ...@@ -64,43 +98,24 @@ export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib/:$LD_LIBRARY_PATH
source ~/.bashrc source ~/.bashrc
``` ```
## 推理 ### 推理示例
### C++版本推理
成功编译YoloV5工程后,在build目录下输入如下命令运行该示例: 成功编译YoloV5工程后,执行如下命令运行该示例:
``` ```
./MIGraphX_Samples 0 # 进入migraphx samples工程根目录
``` cd <path_to_migraphx_samples>
程序运行结束会在build目录生成YoloV5检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result" style="zoom:50%;" />
### python版本推理 # 进入build目录
cd ./build/
YoloV5模型的推理示例程序是YoloV5_infer_migraphx.py,使用如下命令运行该推理示例:
# 执行示例程序
./YOLOV5
``` ```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
# 运行程序 程序运行结束会在build目录生成YoloV5检测结果图像。
python YoloV5_infer_migraphx.py \
--imgpath 测试图像路径 \
--modelpath onnx模型路径 \
--objectThreshold 判断是否有物体阈值,默认0.5 \
--confThreshold 置信度阈值,默认0.25 \
--nmsThreshold nms阈值,默认0.5 \
```
程序运行结束会在当前目录生成YoloV5检测结果图像。
<img src="./Resource/Images/Result.jpg" alt="Result_2" style="zoom: 50%;" /> <img src="./Resource/Images/Result.jpg" alt="Result" style="zoom:50%;" />
## 历史版本 ## 历史版本
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
<!--YOLOV5检测器 --> <!--YOLOV5检测器 -->
<DetectorYOLOV5> <DetectorYOLOV5>
<ModelPath>"../Resource/Models/Detector/YOLOV5/yolov5s.onnx"</ModelPath> <ModelPath>"../Resource/Models/yolov5s.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/Detector/YOLOV5/coco.names"</ClassNameFile> <ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16--> <UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20--> <NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.25</ConfidenceThreshold> <ConfidenceThreshold>0.25</ConfidenceThreshold>
......
#include <Sample.h>
#include <opencv2/dnn.hpp>
#include <SimpleLog.h>
#include <Filesystem.h>
#include <DetectorYOLOV5.h>
#include <fstream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
using namespace migraphx;
using namespace migraphxSamples;
void Sample_DetectorYOLOV5()
{
// 创建YOLOV5检测器
DetectorYOLOV5 detector;
InitializationParameterOfDetector initParamOfDetectorYOLOV5;
initParamOfDetectorYOLOV5.parentPath = "";
initParamOfDetectorYOLOV5.configFilePath = CONFIG_FILE;
initParamOfDetectorYOLOV5.logName = "";
ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV5);
if(errorCode!=SUCCESS)
{
LOG_ERROR(stdout, "fail to initialize detector!\n");
exit(-1);
}
LOG_INFO(stdout, "succeed to initialize detector\n");
// 读取测试图片
Mat srcImage=imread("../Resource/Images/bus.jpg",1);
// 推理
std::vector<ResultOfDetection> predictions;
double time1 = getTickCount();
detector.Detect(srcImage,predictions);
double time2 = getTickCount();
double elapsedTime = (time2 - time1)*1000 / getTickFrequency();
LOG_INFO(stdout, "inference time:%f ms\n", elapsedTime);
// 获取推理结果
LOG_INFO(stdout,"========== Detection Results ==========\n");
for(int i=0;i<predictions.size();++i)
{
ResultOfDetection result=predictions[i];
cv::rectangle(srcImage,result.boundingBox,Scalar(0,255,255),2);
string label = format("%.2f", result.confidence);
label = result.className + " " + label;
int left = predictions[i].boundingBox.x;
int top = predictions[i].boundingBox.y;
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
top = max(top, labelSize.height);
putText(srcImage, label, Point(left, top-10), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 255, 255), 2);
LOG_INFO(stdout,"box:%d %d %d %d,label:%d,confidence:%f\n",predictions[i].boundingBox.x,
predictions[i].boundingBox.y,predictions[i].boundingBox.width,predictions[i].boundingBox.height,predictions[i].classID,predictions[i].confidence);
}
imwrite("Result.jpg",srcImage);
LOG_INFO(stdout,"Detection results have been saved to ./Result.jpg\n");
}
// 示例程序
#ifndef __SAMPLE_H__
#define __SAMPLE_H__
// YOLOV5 sample
void Sample_DetectorYOLOV5();
#endif
\ No newline at end of file
// 常用数据类型和宏定义 // 常用定义
#ifndef __COMMON_DEFINITION_H__ #ifndef __COMMON_DEFINITION_H__
#define __COMMON_DEFINITION_H__ #define __COMMON_DEFINITION_H__
#include <string>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
namespace migraphxSamples namespace migraphxSamples
{ {
...@@ -21,20 +17,7 @@ namespace migraphxSamples ...@@ -21,20 +17,7 @@ namespace migraphxSamples
#define CONFIG_FILE "../Resource/Configuration.xml" #define CONFIG_FILE "../Resource/Configuration.xml"
typedef struct __Time typedef enum _ErrorCode
{
string year;
string month;
string day;
string hour;
string minute;
string second;
string millisecond; // ms
string microsecond; // us
string weekDay;
}_Time;
typedef enum _ErrorCode
{ {
SUCCESS=0, // 0 SUCCESS=0, // 0
MODEL_NOT_EXIST, // 模型不存在 MODEL_NOT_EXIST, // 模型不存在
...@@ -44,7 +27,7 @@ typedef enum _ErrorCode ...@@ -44,7 +27,7 @@ typedef enum _ErrorCode
IMAGE_ERROR, // 图像错误 IMAGE_ERROR, // 图像错误
}ErrorCode; }ErrorCode;
typedef struct _ResultOfPrediction typedef struct _ResultOfPrediction
{ {
float confidence; float confidence;
int label; int label;
...@@ -52,27 +35,24 @@ typedef struct _ResultOfPrediction ...@@ -52,27 +35,24 @@ typedef struct _ResultOfPrediction
}ResultOfPrediction; }ResultOfPrediction;
typedef struct _ResultOfDetection typedef struct _ResultOfDetection
{ {
Rect boundingBox; cv::Rect boundingBox;
float confidence; float confidence;
int classID; int classID;
string className; std::string className;
bool exist; bool exist;
_ResultOfDetection():confidence(0.0f),classID(0),exist(true){} _ResultOfDetection():confidence(0.0f),classID(0),exist(true){}
}ResultOfDetection; }ResultOfDetection;
typedef struct _InitializationParameterOfDetector typedef struct _InitializationParameterOfDetector
{ {
std::string parentPath; std::string parentPath;
std::string configFilePath; std::string configFilePath;
cv::Size inputSize;
std::string logName;
}InitializationParameterOfDetector; }InitializationParameterOfDetector;
} }
#endif #endif
......
#include <CommonUtility.h> #include <CommonUtility.h>
#include <assert.h>
#include <ctype.h>
#include <time.h>
#include <stdlib.h>
#include <algorithm>
#include <sstream>
#include <vector>
#ifdef _WIN32
#include <io.h>
#include <direct.h>
#include <Windows.h>
#else
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/time.h>
#endif
#include <SimpleLog.h>
namespace migraphxSamples namespace migraphxSamples
{ {
_Time GetCurrentTime3()
{
_Time currentTime;
#if (defined WIN32 || defined _WIN32)
SYSTEMTIME systemTime;
GetLocalTime(&systemTime);
char temp[8] = { 0 };
sprintf(temp, "%04d", systemTime.wYear);
currentTime.year=string(temp);
sprintf(temp, "%02d", systemTime.wMonth);
currentTime.month=string(temp);
sprintf(temp, "%02d", systemTime.wDay);
currentTime.day=string(temp);
sprintf(temp, "%02d", systemTime.wHour);
currentTime.hour=string(temp);
sprintf(temp, "%02d", systemTime.wMinute);
currentTime.minute=string(temp);
sprintf(temp, "%02d", systemTime.wSecond);
currentTime.second=string(temp);
sprintf(temp, "%03d", systemTime.wMilliseconds);
currentTime.millisecond=string(temp);
sprintf(temp, "%d", systemTime.wDayOfWeek);
currentTime.weekDay=string(temp);
#else
struct timeval tv;
struct tm *p;
gettimeofday(&tv, NULL);
p = localtime(&tv.tv_sec);
char temp[8]={0};
sprintf(temp,"%04d",1900+p->tm_year);
currentTime.year=string(temp);
sprintf(temp,"%02d",1+p->tm_mon);
currentTime.month=string(temp);
sprintf(temp,"%02d",p->tm_mday);
currentTime.day=string(temp);
sprintf(temp,"%02d",p->tm_hour);
currentTime.hour=string(temp);
sprintf(temp,"%02d",p->tm_min);
currentTime.minute=string(temp);
sprintf(temp,"%02d",p->tm_sec);
currentTime.second=string(temp);
sprintf(temp,"%03d",tv.tv_usec/1000);
currentTime.millisecond = string(temp);
sprintf(temp, "%03d", tv.tv_usec % 1000);
currentTime.microsecond = string(temp);
sprintf(temp, "%d", p->tm_wday);
currentTime.weekDay = string(temp);
#endif
return currentTime;
}
std::vector<std::string> SplitString(std::string str, std::string separator)
{
std::string::size_type pos;
std::vector<std::string> result;
str+=separator;//扩展字符串以方便操作
int size=str.size();
for(int i=0; i<size; i++)
{
pos=str.find(separator,i);
if(pos<size)
{
std::string s=str.substr(i,pos-i);
result.push_back(s);
i=pos+separator.size()-1;
}
}
return result;
}
bool CompareConfidence(const ResultOfDetection &L,const ResultOfDetection &R) bool CompareConfidence(const ResultOfDetection &L,const ResultOfDetection &R)
{ {
return L.confidence > R.confidence; return L.confidence > R.confidence;
...@@ -109,7 +13,7 @@ bool CompareArea(const ResultOfDetection &L,const ResultOfDetection &R) ...@@ -109,7 +13,7 @@ bool CompareArea(const ResultOfDetection &L,const ResultOfDetection &R)
return L.boundingBox.area() > R.boundingBox.area(); return L.boundingBox.area() > R.boundingBox.area();
} }
void NMS(vector<ResultOfDetection> &detections, float IOUThreshold) void NMS(std::vector<ResultOfDetection> &detections, float IOUThreshold)
{ {
// sort // sort
std::sort(detections.begin(), detections.end(), CompareConfidence); std::sort(detections.begin(), detections.end(), CompareConfidence);
......
...@@ -3,23 +3,16 @@ ...@@ -3,23 +3,16 @@
#ifndef __COMMON_UTILITY_H__ #ifndef __COMMON_UTILITY_H__
#define __COMMON_UTILITY_H__ #define __COMMON_UTILITY_H__
#include <mutex>
#include <string>
#include <vector>
#include <CommonDefinition.h> #include <CommonDefinition.h>
using namespace std;
namespace migraphxSamples namespace migraphxSamples
{ {
// 分割字符串
std::vector<std::string> SplitString(std::string str,std::string separator);
// 排序规则: 按照置信度或者按照面积排序 // 排序规则: 按照置信度或者按照面积排序
bool CompareConfidence(const ResultOfDetection &L,const ResultOfDetection &R); bool CompareConfidence(const ResultOfDetection &L,const ResultOfDetection &R);
bool CompareArea(const ResultOfDetection &L,const ResultOfDetection &R); bool CompareArea(const ResultOfDetection &L,const ResultOfDetection &R);
// 非极大抑制
void NMS(std::vector<ResultOfDetection> &detections, float IOUThreshold); void NMS(std::vector<ResultOfDetection> &detections, float IOUThreshold);
} }
......
...@@ -11,12 +11,7 @@ ...@@ -11,12 +11,7 @@
#include <unistd.h> #include <unistd.h>
#include <dirent.h> #include <dirent.h>
#endif #endif
#include <CommonUtility.h>
#include <opencv2/opencv.hpp>
#include <SimpleLog.h>
using namespace cv;
// 路径分隔符(Linux:‘/’,Windows:’\\’) // 路径分隔符(Linux:‘/’,Windows:’\\’)
#ifdef _WIN32 #ifdef _WIN32
#define PATH_SEPARATOR '\\' #define PATH_SEPARATOR '\\'
...@@ -24,9 +19,31 @@ using namespace cv; ...@@ -24,9 +19,31 @@ using namespace cv;
#define PATH_SEPARATOR '/' #define PATH_SEPARATOR '/'
#endif #endif
using namespace std;
namespace migraphxSamples namespace migraphxSamples
{ {
static std::vector<std::string> SplitString(std::string str, std::string separator)
{
std::string::size_type pos;
std::vector<std::string> result;
str+=separator;//扩展字符串以方便操作
int size=str.size();
for(int i=0; i<size; i++)
{
pos=str.find(separator,i);
if(pos<size)
{
std::string s=str.substr(i,pos-i);
result.push_back(s);
i=pos+separator.size()-1;
}
}
return result;
}
#if defined _WIN32 || defined WINCE #if defined _WIN32 || defined WINCE
const char dir_separators[] = "/\\"; const char dir_separators[] = "/\\";
...@@ -293,7 +310,7 @@ namespace migraphxSamples ...@@ -293,7 +310,7 @@ namespace migraphxSamples
} }
else else
{ {
LOG_INFO(stdout, "could not open directory: %s", directory.c_str()); printf("could not open directory: %s", directory.c_str());
} }
} }
...@@ -390,7 +407,7 @@ namespace migraphxSamples ...@@ -390,7 +407,7 @@ namespace migraphxSamples
#endif #endif
if (!result) if (!result)
{ {
LOG_INFO(stdout, "can't remove directory: %s\n", path.c_str()); printf("can't remove directory: %s\n", path.c_str());
} }
} }
else else
...@@ -402,7 +419,7 @@ namespace migraphxSamples ...@@ -402,7 +419,7 @@ namespace migraphxSamples
#endif #endif
if (!result) if (!result)
{ {
LOG_INFO(stdout, "can't remove file: %s\n", path.c_str()); printf("can't remove file: %s\n", path.c_str());
} }
} }
} }
...@@ -438,7 +455,7 @@ namespace migraphxSamples ...@@ -438,7 +455,7 @@ namespace migraphxSamples
{ {
RemoveAll(path); RemoveAll(path);
++numberOfFiles; ++numberOfFiles;
LOG_INFO(stdout, "%s deleted! number of deleted files:%d\n", path.c_str(), numberOfFiles); printf("%s deleted! number of deleted files:%d\n", path.c_str(), numberOfFiles);
} }
} }
...@@ -452,7 +469,7 @@ namespace migraphxSamples ...@@ -452,7 +469,7 @@ namespace migraphxSamples
} }
else else
{ {
LOG_INFO(stdout, "could not open directory: %s", directory.c_str()); printf("could not open directory: %s", directory.c_str());
} }
// ����RemoveAllɾ��Ŀ¼ // ����RemoveAllɾ��Ŀ¼
...@@ -592,17 +609,17 @@ namespace migraphxSamples ...@@ -592,17 +609,17 @@ namespace migraphxSamples
if(!srcFile.is_open()) if(!srcFile.is_open())
{ {
LOG_ERROR(stdout,"can not open %s\n",srcPath.c_str()); printf("can not open %s\n",srcPath.c_str());
return false; return false;
} }
if(!dstFile.is_open()) if(!dstFile.is_open())
{ {
LOG_ERROR(stdout, "can not open %s\n", dstPath.c_str()); printf("can not open %s\n", dstPath.c_str());
return false; return false;
} }
if(srcPath==dstPath) if(srcPath==dstPath)
{ {
LOG_ERROR(stdout, "src can not be same with dst\n"); printf("src can not be same with dst\n");
return false; return false;
} }
char buffer[2048]; char buffer[2048];
...@@ -622,7 +639,7 @@ namespace migraphxSamples ...@@ -622,7 +639,7 @@ namespace migraphxSamples
{ {
if(srcPath==dstPath) if(srcPath==dstPath)
{ {
LOG_ERROR(stdout, "src can not be same with dst\n"); printf("src can not be same with dst\n");
return false; return false;
} }
...@@ -662,9 +679,9 @@ namespace migraphxSamples ...@@ -662,9 +679,9 @@ namespace migraphxSamples
// process // process
double process = (1.0*(i + 1) / fileNameList.size()) * 100; double process = (1.0*(i + 1) / fileNameList.size()) * 100;
LOG_INFO(stdout, "%s done! %f% \n", GetFileName(fileNameList[i]).c_str(), process); printf("%s done! %f% \n", GetFileName(fileNameList[i]).c_str(), process);
} }
LOG_INFO(stdout, "all done!(the number of files:%d)\n", fileNameList.size()); printf("all done!(the number of files:%d)\n", fileNameList.size());
return true; return true;
......
...@@ -3,10 +3,8 @@ ...@@ -3,10 +3,8 @@
#ifndef __FILE_SYSTEM_H__ #ifndef __FILE_SYSTEM_H__
#define __FILE_SYSTEM_H__ #define __FILE_SYSTEM_H__
#include <vector>
#include <string> #include <string>
#include <vector>
using namespace std;
namespace migraphxSamples namespace migraphxSamples
{ {
...@@ -21,7 +19,7 @@ bool IsDirectory(const std::string &path); ...@@ -21,7 +19,7 @@ bool IsDirectory(const std::string &path);
bool IsPathSeparator(char c); bool IsPathSeparator(char c);
// 路径拼接 // 路径拼接
string JoinPath(const std::string &base, const std::string &path); std::string JoinPath(const std::string &base, const std::string &path);
// 创建多级目录,注意:创建多级目录的时候,目标目录是不能有文件存在的 // 创建多级目录,注意:创建多级目录的时候,目标目录是不能有文件存在的
bool CreateDirectories(const std::string &directoryPath); bool CreateDirectories(const std::string &directoryPath);
...@@ -49,14 +47,13 @@ void Remove(const std::string &directory, const std::string &extension=""); ...@@ -49,14 +47,13 @@ void Remove(const std::string &directory, const std::string &extension="");
/** 获取路径的文件名和扩展名 /** 获取路径的文件名和扩展名
* *
* 示例:path为D:/1/1.txt,则GetFileName()为1.txt,GetFileName_NoExtension()为1,GetExtension()为.txt,GetParentPath()为D:/1/ * 示例:path为D:/1/1.txt,则GetFileName()为1.txt,GetFileName_NoExtension()为1,GetExtension()为.txt,GetParentPath()为D:/1/
*/ */
string GetFileName(const std::string &path); // 1.txt std::string GetFileName(const std::string &path);
string GetFileName_NoExtension(const std::string &path); // 1 std::string GetFileName_NoExtension(const std::string &path);
string GetExtension(const std::string &path);// .txt std::string GetExtension(const std::string &path);
string GetParentPath(const std::string &path);// D:/1/ std::string GetParentPath(const std::string &path);
// 拷贝文件:CopyFile("D:/1.txt","D:/2.txt");将1.txt拷贝为2.txt // 拷贝文件
bool CopyFile(const std::string srcPath,const std::string dstPath); bool CopyFile(const std::string srcPath,const std::string dstPath);
/** 拷贝目录 /** 拷贝目录
......
...@@ -19,7 +19,7 @@ using namespace std; ...@@ -19,7 +19,7 @@ using namespace std;
/** 简易日志 /** 简易日志
* *
* 轻量级日志系统,不依赖于其他第三方库,只需要包含一个头文件就可以使用。提供了4种日志级别,包括INFO,DEBUG,WARN和ERROR。 * 不依赖于其他第三方库,只需要包含一个头文件就可以使用。提供了4种日志级别,包括INFO,DEBUG,WARN和ERROR。
* *
* 示例1: * 示例1:
// 初始化日志,在./Log/目录下创建两个日志文件log1.log和log2.log(注意:目录./Log/需要存在,否则日志创建失败) // 初始化日志,在./Log/目录下创建两个日志文件log1.log和log2.log(注意:目录./Log/需要存在,否则日志创建失败)
......
#include <DetectorYOLOV5.h> #include <YOLOV5.h>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#include <migraphx/gpu/target.hpp> #include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp> #include <migraphx/quantization.hpp>
#include <opencv2/dnn.hpp>
#include <CommonUtility.h>
#include <Filesystem.h> #include <Filesystem.h>
#include <SimpleLog.h> #include <SimpleLog.h>
using namespace cv::dnn;
namespace migraphxSamples namespace migraphxSamples
{ {
DetectorYOLOV5::DetectorYOLOV5():logFile(NULL) DetectorYOLOV5::DetectorYOLOV5()
{ {
} }
...@@ -28,19 +25,24 @@ DetectorYOLOV5::~DetectorYOLOV5() ...@@ -28,19 +25,24 @@ DetectorYOLOV5::~DetectorYOLOV5()
ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector) ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector)
{ {
// 初始化(获取日志文件,加载配置文件等) // 读取配置文件
ErrorCode errorCode=DoCommonInitialization(initializationParameterOfDetector); std::string configFilePath=initializationParameterOfDetector.configFilePath;
if(errorCode!=SUCCESS) if(Exists(configFilePath)==false)
{ {
LOG_ERROR(logFile,"fail to DoCommonInitialization\n"); LOG_ERROR(stdout, "no configuration file!\n");
return errorCode; return CONFIG_FILE_NOT_EXIST;
} }
LOG_INFO(logFile,"succeed to DoCommonInitialization\n"); if(!configurationFile.open(configFilePath, cv::FileStorage::READ))
{
LOG_ERROR(stdout, "fail to open configuration file\n");
return FAIL_TO_OPEN_CONFIG_FILE;
}
LOG_INFO(stdout, "succeed to open configuration file\n");
// 获取配置文件参数 // 获取配置文件参数
FileNode netNode = configurationFile["DetectorYOLOV5"]; cv::FileNode netNode = configurationFile["DetectorYOLOV5"];
string modelPath=initializationParameter.parentPath+(string)netNode["ModelPath"]; std::string modelPath=(std::string)netNode["ModelPath"];
string pathOfClassNameFile=(string)netNode["ClassNameFile"]; std::string pathOfClassNameFile=(std::string)netNode["ClassNameFile"];
yolov5Parameter.confidenceThreshold = (float)netNode["ConfidenceThreshold"]; yolov5Parameter.confidenceThreshold = (float)netNode["ConfidenceThreshold"];
yolov5Parameter.nmsThreshold = (float)netNode["NMSThreshold"]; yolov5Parameter.nmsThreshold = (float)netNode["NMSThreshold"];
yolov5Parameter.objectThreshold = (float)netNode["ObjectThreshold"]; yolov5Parameter.objectThreshold = (float)netNode["ObjectThreshold"];
...@@ -50,17 +52,21 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -50,17 +52,21 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
// 加载模型 // 加载模型
if(Exists(modelPath)==false) if(Exists(modelPath)==false)
{ {
LOG_ERROR(logFile,"%s not exist!\n",modelPath.c_str()); LOG_ERROR(stdout,"%s not exist!\n",modelPath.c_str());
return MODEL_NOT_EXIST; return MODEL_NOT_EXIST;
} }
net = migraphx::parse_onnx(modelPath); net = migraphx::parse_onnx(modelPath);
LOG_INFO(logFile,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性 // 获取模型输入属性
std::pair<std::string, migraphx::shape> inputAttribute=*(net.get_parameter_shapes().begin()); std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes();
inputName=inputAttribute.first; inputName=inputMap.begin()->first;
inputShape=inputAttribute.second; inputShape=inputMap.begin()->second;
inputSize=cv::Size(inputShape.lens()[3],inputShape.lens()[2]); int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// 设置模型为GPU模式 // 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{}; migraphx::target gpuTarget = migraphx::gpu::target{};
...@@ -74,20 +80,20 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -74,20 +80,20 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
// 编译模型 // 编译模型
migraphx::compile_options options; migraphx::compile_options options;
options.device_id=0; // 设置GPU设备,默认为0号设备 options.device_id=0; // 设置GPU设备,默认为0号设备
options.offload_copy=true; // 设置offload_copy options.offload_copy=true;
net.compile(gpuTarget,options); net.compile(gpuTarget,options);
LOG_INFO(logFile,"succeed to compile model: %s\n",GetFileName(modelPath).c_str()); LOG_INFO(stdout,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
// Run once by itself // warm up
migraphx::parameter_map inputData; std::unordered_map<std::string, migraphx::argument> inputData;
inputData[inputName]=migraphx::generate_argument(inputShape); inputData[inputName]=migraphx::argument{inputShape};
net.eval(inputData); net.eval(inputData);
// 读取类别名 // 读取类别名
if(!pathOfClassNameFile.empty()) if(!pathOfClassNameFile.empty())
{ {
ifstream classNameFile(pathOfClassNameFile); std::ifstream classNameFile(pathOfClassNameFile);
string line; std::string line;
while (getline(classNameFile, line)) while (getline(classNameFile, line))
{ {
classNames.push_back(line); classNames.push_back(line);
...@@ -99,12 +105,12 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -99,12 +105,12 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
} }
// log // log
LOG_INFO(logFile,"InputSize:%dx%d\n",inputSize.width,inputSize.height); LOG_INFO(stdout,"InputSize:%dx%d\n",inputSize.width,inputSize.height);
LOG_INFO(logFile,"InputName:%s\n",inputName.c_str()); LOG_INFO(stdout,"InputName:%s\n",inputName.c_str());
LOG_INFO(logFile,"ConfidenceThreshold:%f\n",yolov5Parameter.confidenceThreshold); LOG_INFO(stdout,"ConfidenceThreshold:%f\n",yolov5Parameter.confidenceThreshold);
LOG_INFO(logFile,"NMSThreshold:%f\n",yolov5Parameter.nmsThreshold); LOG_INFO(stdout,"NMSThreshold:%f\n",yolov5Parameter.nmsThreshold);
LOG_INFO(logFile,"objectThreshold:%f\n",yolov5Parameter.objectThreshold); LOG_INFO(stdout,"objectThreshold:%f\n",yolov5Parameter.objectThreshold);
LOG_INFO(logFile,"NumberOfClasses:%d\n",yolov5Parameter.numberOfClasses); LOG_INFO(stdout,"NumberOfClasses:%d\n",yolov5Parameter.numberOfClasses);
return SUCCESS; return SUCCESS;
...@@ -114,21 +120,22 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe ...@@ -114,21 +120,22 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
{ {
if(srcImage.empty()||srcImage.type()!=CV_8UC3) if(srcImage.empty()||srcImage.type()!=CV_8UC3)
{ {
LOG_ERROR(logFile, "image error!\n"); LOG_ERROR(stdout, "image error!\n");
return IMAGE_ERROR; return IMAGE_ERROR;
} }
// 预处理并转换为NCHW // 数据预处理并转换为NCHW格式
cv::Mat inputBlob; cv::Mat inputBlob;
blobFromImage(srcImage, cv::dnn::blobFromImage(srcImage,
inputBlob, inputBlob,
1 / 255.0, 1 / 255.0,
inputSize, inputSize,
Scalar(0, 0, 0), cv::Scalar(0, 0, 0),
true, true,
false); false);
// 输入数据
migraphx::parameter_map inputData; // 创建输入数据
std::unordered_map<std::string, migraphx::argument> inputData;
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data}; inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
// 推理 // 推理
...@@ -192,7 +199,7 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe ...@@ -192,7 +199,7 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
//执行non maximum suppression消除冗余重叠boxes //执行non maximum suppression消除冗余重叠boxes
std::vector<int> indices; std::vector<int> indices;
dnn::NMSBoxes(boxes, confidences, yolov5Parameter.confidenceThreshold, yolov5Parameter.nmsThreshold, indices); cv::dnn::NMSBoxes(boxes, confidences, yolov5Parameter.confidenceThreshold, yolov5Parameter.nmsThreshold, indices);
for (size_t i = 0; i < indices.size(); ++i) for (size_t i = 0; i < indices.size(); ++i)
{ {
int idx = indices[i]; int idx = indices[i];
...@@ -212,39 +219,4 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe ...@@ -212,39 +219,4 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDe
return SUCCESS; return SUCCESS;
} }
ErrorCode DetectorYOLOV5::DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector)
{
initializationParameter=initializationParameterOfDetector;
// 获取日志文件
logFile=LogManager::GetInstance()->GetLogFile(initializationParameter.logName);
// 加载配置文件
std::string configFilePath=initializationParameter.configFilePath;
if(!Exists(configFilePath))
{
LOG_ERROR(logFile, "no configuration file!\n");
return CONFIG_FILE_NOT_EXIST;
}
if(!configurationFile.open(configFilePath, FileStorage::READ))
{
LOG_ERROR(logFile, "fail to open configuration file\n");
return FAIL_TO_OPEN_CONFIG_FILE;
}
LOG_INFO(logFile, "succeed to open configuration file\n");
// 修改父路径
std::string &parentPath = initializationParameter.parentPath;
if (!parentPath.empty())
{
if(!IsPathSeparator(parentPath[parentPath.size() - 1]))
{
parentPath+=PATH_SEPARATOR;
}
}
return SUCCESS;
}
} }
#ifndef __DETECTOR_YOLOV5_H__ #ifndef __DETECTOR_YOLOV5_H__
#define __DETECTOR_YOLOV5_H__ #define __DETECTOR_YOLOV5_H__
#include <string>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <opencv2/opencv.hpp>
#include <CommonDefinition.h>
using namespace std; #include <CommonDefinition.h>
using namespace cv;
using namespace migraphx;
namespace migraphxSamples namespace migraphxSamples
{ {
...@@ -33,21 +28,16 @@ public: ...@@ -33,21 +28,16 @@ public:
ErrorCode Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection); ErrorCode Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection);
private:
ErrorCode DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector);
private: private:
cv::FileStorage configurationFile; cv::FileStorage configurationFile;
InitializationParameterOfDetector initializationParameter;
FILE *logFile;
migraphx::program net; migraphx::program net;
cv::Size inputSize; cv::Size inputSize;
string inputName; std::string inputName;
migraphx::shape inputShape; migraphx::shape inputShape;
bool useFP16; bool useFP16;
vector<string> classNames; std::vector<std::string> classNames;
YOLOV5Parameter yolov5Parameter; YOLOV5Parameter yolov5Parameter;
}; };
......
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <Sample.h> #include <SimpleLog.h>
#include <Filesystem.h>
#include <YOLOV5.h>
void MIGraphXSamplesUsage(char* programName) int main()
{ {
printf("Usage : %s <index> \n", programName); // 创建YOLOV5检测器
printf("index:\n"); migraphxSamples::DetectorYOLOV5 detector;
printf("\t 0) YOLOV5 sample.\n"); migraphxSamples::InitializationParameterOfDetector initParamOfDetectorYOLOV5;
} initParamOfDetectorYOLOV5.configFilePath = CONFIG_FILE;
migraphxSamples::ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV5);
int main(int argc, char *argv[]) if(errorCode!=migraphxSamples::SUCCESS)
{
if (argc < 2 || argc > 2)
{ {
MIGraphXSamplesUsage(argv[0]); LOG_ERROR(stdout, "fail to initialize detector!\n");
return -1; exit(-1);
} }
if (!strncmp(argv[1], "-h", 2)) LOG_INFO(stdout, "succeed to initialize detector\n");
{
MIGraphXSamplesUsage(argv[0]); // 读取测试图片
return 0; cv::Mat srcImage = cv::imread("../Resource/Images/bus.jpg",1);
}
switch (*argv[1]) // 推理
std::vector<migraphxSamples::ResultOfDetection> predictions;
double time1 = cv::getTickCount();
detector.Detect(srcImage,predictions);
double time2 = cv::getTickCount();
double elapsedTime = (time2 - time1)*1000 / cv::getTickFrequency();
LOG_INFO(stdout, "inference time:%f ms\n", elapsedTime);
// 获取推理结果
LOG_INFO(stdout,"========== Detection Results ==========\n");
for(int i=0;i<predictions.size();++i)
{ {
case '0': migraphxSamples::ResultOfDetection result=predictions[i];
{ cv::rectangle(srcImage,result.boundingBox,cv::Scalar(0,255,255),2);
Sample_DetectorYOLOV5();
break; LOG_INFO(stdout,"box:%d %d %d %d,label:%d,confidence:%f\n",predictions[i].boundingBox.x,
} predictions[i].boundingBox.y,predictions[i].boundingBox.width,predictions[i].boundingBox.height,predictions[i].classID,predictions[i].confidence);
default :
{
MIGraphXSamplesUsage(argv[0]);
break;
}
} }
cv::imwrite("Result.jpg",srcImage);
LOG_INFO(stdout,"Detection results have been saved to ./Result.jpg\n");
return 0; return 0;
} }
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment