Commit 4d3d722b authored by Your Name's avatar Your Name
Browse files

提交RetinaFace C++示例

parent 18da8bf0
......@@ -12,35 +12,89 @@ RetinaFace模型 有几个主要特点:
- 引入 SSH 算法的 Context Modeling;
- 多任务训练,提供额外的监督信息。
## 推理
## 构建安装
在光源可拉取推理的docker镜像,RetinaFace工程推荐的镜像如下:
```python
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
```
### 安装Opencv依赖
```python
cd <path_to_migraphx_samples>
sh ./3rdParty/InstallOpenCVDependences.sh
```
### 修改CMakeLists.txt
### 环境配置
- 如果使用ubuntu系统,需要修改CMakeLists.txt中依赖库路径:
将"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib64/"修改为"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib/"
[光源](https://www.sourcefind.cn/#/image/dcu/custom)可拉取用于推理的docker镜像,RetinaFace 模型推理推荐的镜像如下:
- **MIGraphX2.3.0及以上版本需要c++17**
### 安装OpenCV并构建工程
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort_dcu_1.14.0_migraphx2.5.2_dtk22.10.1
rbuild build -d depend
```
[光合开发者社区](https://cancon.hpccube.com:65024/4/main/)可下载MIGraphX安装包,python依赖安装:
### 设置环境变量
将依赖库依赖加入环境变量LD_LIBRARY_PATH,在~/.bashrc中添加如下语句:
**Centos**:
```
pip install -r requirements.txt
export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib64/:$LD_LIBRARY_PATH
```
**Ubuntu**:
```
export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib/:$LD_LIBRARY_PATH
```
然后执行:
```
source ~/.bashrc
```
## 推理
### C++版本推理
安装DTK版的Pytorch和torchvision,下载地址:https://cancon.hpccube.com:65024/4/main/pytorch,https://cancon.hpccube.com:65024/4/main/vision
成功编译RetinaFace工程后,在build目录下输入如下命令运行该示例:
### 运行示例
```
./MIGraphX_Samples 0
```
程序运行结束会在build目录生成RetinaFace人脸检测结果图像。
<img src="./Resource/Images/Result_1.jpg" alt="Result" style="zoom:67%;" />
RetinaFace模型的推理示例程序是RetinaFace_infer_migraphx.py,使用如下命令运行该推理示例:
### python版本推理
RetinaFace模型的推理示例程序是RetinaFace_infer_migraphx.py,进入python文件夹使用如下命令运行该推理示例:
```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
# 运行程序
python RetinaFace_infer_migraphx.py
```
程序运行结束会在当前目录生成RetinaFace检测结果图像。
<img src="./curve/Result.jpg" alt="Result" style="zoom: 50%;" />
<img src="./Resource/Images/Result_2.jpg" alt="Result_2" style="zoom:67%;" />
## 历史版本
......
<?xml version="1.0" encoding="GB2312"?>
<opencv_storage>
<!--RetinaFace检测器-->
<DetectorRetinaFace>
<ModelPath>"../Resource/Models/Detector/RetinaFace/mobilenet0.25_Final.onnx"</ModelPath>
<Scale>1.0</Scale><!--缩放尺度-->
<MeanValue1>104</MeanValue1><!--均值,顺序为bgr-->
<MeanValue2>117</MeanValue2>
<MeanValue3>123</MeanValue3>
<SwapRB>0</SwapRB>
<Crop>0</Crop>
<UseInt8>0</UseInt8><!--是否使用int8,不支持-->
<UseFP16>0</UseFP16><!--是否使用FP16-->
<!--////////////////// RetinaFace检测器参数 ////////////////// -->
<!--priorbox层的个数-->
<PriorBoxLayerNumber>3</PriorBoxLayerNumber>
<!--每个priorbox层的minisize和maxSize(需要与输出检测层顺序保持一致,下面涉及每个priorbox层参数的都需要保持顺序一致)-->
<MinSize11>16</MinSize11>
<MinSize12>32</MinSize12>
<MinSize21>64</MinSize21>
<MinSize22>128</MinSize22>
<MinSize31>256</MinSize31>
<MinSize32>512</MinSize32>
<!--每个priorbox层的Flip和Clip(使用0,1表示)-->
<Flip1>0</Flip1>
<Flip2>0</Flip2>
<Flip3>0</Flip3>
<Clip1>0</Clip1>
<Clip2>0</Clip2>
<Clip3>0</Clip3>
<!--每个priorbox层的宽高比(由于RetinaFace只包含宽高比为1的anchor,所以这里不需要设置宽高比)-->
<!-- <AspectRatio11>0.3333</AspectRatio11>
<AspectRatio12>0.25</AspectRatio12>
<AspectRatio21>0.3333</AspectRatio21>
<AspectRatio22>0.25</AspectRatio22>
<AspectRatio31>0.3333</AspectRatio31>
<AspectRatio32>0.25</AspectRatio32>
<AspectRatio41>0.3333</AspectRatio41>
<AspectRatio42>0.25</AspectRatio42> -->
<!--每个priorbox层的step-->
<PriorBoxStepWidth1>8</PriorBoxStepWidth1><!--第一个priorbox层的step的width-->
<PriorBoxStepWidth2>16</PriorBoxStepWidth2>
<PriorBoxStepWidth3>32</PriorBoxStepWidth3>
<PriorBoxStepHeight1>8</PriorBoxStepHeight1><!--第一个priorbox层的step的height-->
<PriorBoxStepHeight2>16</PriorBoxStepHeight2>
<PriorBoxStepHeight3>32</PriorBoxStepHeight3>
<!--priorbox层中的offset-->
<Offset>0.5</Offset>
<!--DetectionOutput参数-->
<ClassNumber>2</ClassNumber>
<TopK>400</TopK>
<KeepTopK>200</KeepTopK>
<NMSThreshold>0.3</NMSThreshold>
<ConfidenceThreshold>0.9</ConfidenceThreshold>
</DetectorRetinaFace>
</opencv_storage>
This diff is collapsed.
#ifndef __DETECTOR_RETINAFACE_H__
#define __DETECTOR_RETINAFACE_H__
#include <string>
#include <migraphx/program.hpp>
#include <opencv2/opencv.hpp>
#include <CommonDefinition.h>
#include <SSDDefinition.h>
using namespace std;
using namespace cv;
using namespace migraphx;
namespace migraphxSamples
{
class DetectorRetinaFace
{
public:
DetectorRetinaFace();
~DetectorRetinaFace();
ErrorCode Initialize(InitializationParameterOfDetector initializationParameterOfDetector);
ErrorCode Detect(const cv::Mat &srcImage,std::vector<ResultOfDetection> &resultsOfDetection);
private:
ErrorCode DoCommonInitialization(InitializationParameterOfDetector initializationParameterOfDetector);
void GetSSDParameter();
void GetResult(const std::vector<std::vector<float>> &classification,const std::vector<std::vector<float>> &regression,std::vector<ResultOfDetection> &resultsOfDetection);
std::vector<float> PermuteLayer(const std::vector<float> &data,int width,int height,int channels);
void PriorBoxLayer(int indexOfLayer,int* priorboxOutputData);
void SoftmaxLayer(int softMaxWidth[],int* softMaxInputData[], int* softMaxOutputData);
void DetectionOutputLayer(int* allLocPreds[], int* allPriorBoxes[],int* confScores, int* assistMemPool);
void ComputeSoftMax(int* src, int size, int* dst);
void QuickSort(int* src,int low, int high, QuickSortStack *stack,int maxNum);
void NonMaxSuppression( int* proposals, int anchorsNum,int NMSThresh,int maxRoiNum);
void Swap(int* src1, int* src2);
void ComputeOverlap(int xMin1, int yMin1, int xMax1, int yMax1, int xMin2,int yMin2, int xMax2, int yMax2, int* areaSum, int* areaInter);
void CreateDetectionResults(std::vector<ResultOfDetection> &resultsOfDetection);
private:
cv::FileStorage configurationFile;
InitializationParameterOfDetector initializationParameter;
FILE *logFile;
migraphx::program net;
cv::Size inputSize;
string inputName;
migraphx::shape inputShape;
float scale;
cv::Scalar meanValue;
bool swapRB;
bool crop;
bool useInt8;
bool useFP16;
SSDParameter ssdParameter;
};
}
#endif
// SSD定义
#ifndef __SSD_DEFINITION_H__
#define __SSD_DEFINITION_H__
#include <string>
#include <vector>
using namespace std;
namespace migraphxSamples
{
#define SSD_MAX_PRIORBOX_LAYER_NUM 10 // 能够支持的最大检测层数量
// SSD参数
typedef struct _SSDParameter
{
int numberOfPriorBoxLayer; // 检测层数量
// Model Parameters
int convHeight[SSD_MAX_PRIORBOX_LAYER_NUM*2];
int convWidth[SSD_MAX_PRIORBOX_LAYER_NUM*2];
int convChannel[SSD_MAX_PRIORBOX_LAYER_NUM*2];
// PriorBoxLayer Parameters
int priorBoxWidth[SSD_MAX_PRIORBOX_LAYER_NUM]; // 每个检测层priorbox的宽
int priorBoxHeight[SSD_MAX_PRIORBOX_LAYER_NUM];// 每个检测层priorbox的高
std::vector<std::vector<float>> priorBoxMinSize; // 每个检测层priorbox的minsize
std::vector<std::vector<float>> priorBoxMaxSize; // 每个检测层priorbox的maxsize
int minSizeNum[SSD_MAX_PRIORBOX_LAYER_NUM]; // 每个检测层priorbox的minsize数量
int maxSizeNum[SSD_MAX_PRIORBOX_LAYER_NUM]; // 每个检测层priorbox的maxsize数量
int srcImageHeight;// 原图大小
int srcImageWidth;
int inputAspectRatioNum[SSD_MAX_PRIORBOX_LAYER_NUM];// 每个检测层宽高比的数量
std::vector<std::vector<float>> priorBoxAspectRatio;// 每个检测层的宽高比
float priorBoxStepWidth[SSD_MAX_PRIORBOX_LAYER_NUM];// 每个检测层步长的宽
float priorBoxStepHeight[SSD_MAX_PRIORBOX_LAYER_NUM];// 每个检测层步长的高
float offset;
int flip[SSD_MAX_PRIORBOX_LAYER_NUM];
int clip[SSD_MAX_PRIORBOX_LAYER_NUM];
int priorBoxVar[4];
// SoftmaxLayer Parameters
int softMaxInChn[SSD_MAX_PRIORBOX_LAYER_NUM];
int softMaxInHeight;
int concatNum;
int softMaxOutWidth;
int softMaxOutHeight;
int softMaxOutChn;
// DetectionOutLayer Parameters
int classNum;// 类别数(包含背景类)
int topK;
int keepTopK;
int NMSThresh;
int confThresh;
int detectInputChn[SSD_MAX_PRIORBOX_LAYER_NUM];
int convStride[SSD_MAX_PRIORBOX_LAYER_NUM];
// buffer
int *buffer;
int *classification[SSD_MAX_PRIORBOX_LAYER_NUM];// 分类数据
int *regression[SSD_MAX_PRIORBOX_LAYER_NUM];// 回归
int *priorboxOutputData;
int *softMaxOutputData;
int *getResultBuffer;
int *dstScore;
int *dstRoi;
int *classRoiNum;
_SSDParameter():srcImageHeight(0),
srcImageWidth(0),
offset(0.0),
softMaxInHeight(0),
concatNum(0),
softMaxOutWidth(0),
softMaxOutHeight(0),
softMaxOutChn(0),
buffer(NULL),
priorboxOutputData(NULL),
softMaxOutputData(NULL),
getResultBuffer(NULL),
dstScore(NULL),
dstRoi(NULL),
classRoiNum(NULL){}
}SSDParameter;
typedef struct _QuickSortStack
{
int min;
int max;
}QuickSortStack;
}
#endif
#include <Sample.h>
#include <opencv2/dnn.hpp>
#include <SimpleLog.h>
#include <Filesystem.h>
#include <DetectorRetinaFace.h>
#include <fstream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
using namespace migraphx;
using namespace migraphxSamples;
void Sample_DetectorRetinaFace()
{
// 创建RetinaFace检测器
DetectorRetinaFace detector;
InitializationParameterOfDetector initParamOfDetectorRetinaFace;
initParamOfDetectorRetinaFace.parentPath = "";
initParamOfDetectorRetinaFace.configFilePath = CONFIG_FILE;
initParamOfDetectorRetinaFace.logName = "";
ErrorCode errorCode=detector.Initialize(initParamOfDetectorRetinaFace);
if(errorCode!=SUCCESS)
{
LOG_ERROR(stdout, "fail to initialize detector!\n");
exit(-1);
}
LOG_INFO(stdout, "succeed to initialize detector\n");
// 读取测试图片
Mat srcImage=imread("../Resource/Images/FaceDetect.jpg",1);
// 推理
std::vector<ResultOfDetection> predictions;
double time1 = getTickCount();
detector.Detect(srcImage,predictions);
double time2 = getTickCount();
double elapsedTime = (time2 - time1)*1000 / getTickFrequency();
LOG_INFO(stdout, "inference time:%f ms\n", elapsedTime);
// 获取推理结果
LOG_INFO(stdout,"========== Detection Results ==========\n");
for(int i=0;i<predictions.size();++i)
{
ResultOfDetection result=predictions[i];
cv::rectangle(srcImage,result.boundingBox,Scalar(0,255,255),2);
LOG_INFO(stdout,"box:%d %d %d %d,label:%d,confidence:%f\n",predictions[i].boundingBox.x,
predictions[i].boundingBox.y,predictions[i].boundingBox.width,predictions[i].boundingBox.height,predictions[i].classID,predictions[i].confidence);
}
imwrite("Result.jpg",srcImage);
LOG_INFO(stdout,"Detection results have been saved to ./Result.jpg\n");
}
\ No newline at end of file
// 示例程序
#ifndef __SAMPLE_H__
#define __SAMPLE_H__
// RetinaFace sample
void Sample_DetectorRetinaFace();
#endif
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment