Commit e7b90068 authored by Your Name's avatar Your Name
Browse files

提交PaddleOCR C++示例

parent 905b1c93
# This file is used to ignore files which are generated
# ----------------------------------------------------------------------------
build/*
depend/*
.vscode/*
core.*
# qtcreator generated files
*.pro.user*
# VS
*.sdf
*.opensdf
*.ilk
*.pdb
*.exp
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
# Executables
*.exe
*.out
*.app
# xemacs temporary files
*.flc
# Vim temporary files
.*.swp
# others
*.avi
*.pyc
*.egg
#! /bin/sh
############### Ubuntu ###############
# 参考:https://docs.opencv.org/3.4.11/d7/d9f/tutorial_linux_install.html
# apt-get install build-essential -y
# apt-get install cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev -y
# apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev -y # 处理图像所需的包,可选
############### CentOS ###############
yum install gcc gcc-c++ gtk2-devel gimp-devel gimp-devel-tools gimp-help-browser zlib-devel libtiff-devel libjpeg-devel libpng-devel gstreamer-devel libavc1394-devel libraw1394-devel libdc1394-devel jasper-devel jasper-utils swig python libtool nasm -y
\ No newline at end of file
############################ 在线安装依赖 ###############################
#cd ./3rdParty
#pip install rbuild-master.tar.gz
############################ 离线安装依赖 ###############################
# 安装依赖
cd ./3rdParty/rbuild_depend
pip install click-6.6-py2.py3-none-any.whl
pip install six-1.15.0-py2.py3-none-any.whl
pip install subprocess32-3.5.4.tar.gz
pip install cget-0.1.9.tar.gz
# 安装rbuild
cd ../
pip install rbuild-master.tar.gz
# 设置cmake的最低版本
cmake_minimum_required(VERSION 3.5)
# 设置项目名
project(MIGraphX_Samples)
# 设置编译器
set(CMAKE_CXX_COMPILER g++)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} -std=c++17) # 2.2版本以上需要c++17
set(CMAKE_BUILD_TYPE release)
# 添加头文件路径
set(INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Src/
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/
${CMAKE_CURRENT_SOURCE_DIR}/Src/PaddleOCR/VLPR/
$ENV{DTKROOT}/include/
${CMAKE_CURRENT_SOURCE_DIR}/depend/include/)
include_directories(${INCLUDE_PATH})
# 添加依赖库路径
set(LIBRARY_PATH ${CMAKE_CURRENT_SOURCE_DIR}/depend/lib64/
$ENV{DTKROOT}/lib/)
link_directories(${LIBRARY_PATH})
# 添加依赖库
set(LIBRARY opencv_core
opencv_imgproc
opencv_imgcodecs
opencv_dnn
migraphx_ref
migraphx
migraphx_c
migraphx_device
migraphx_gpu
migraphx_onnx)
link_libraries(${LIBRARY})
# 添加源文件
set(SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/Src/main.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Sample.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/PaddleOCR/VLPR/clipper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/PaddleOCR/VLPR/OcrDB.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/PaddleOCR/VLPR/utility.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/PaddleOCR/VLPR/OcrSVTR.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/PaddleOCR/VLPR/VLPR.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/CommonUtility.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Src/Utility/Filesystem.cpp)
# 添加可执行目标
add_executable(MIGraphX_Samples ${SOURCE_FILES})
......@@ -272,7 +272,7 @@ class process_pred(object):
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
result_list.append((text, round(np.mean(conf_list)),2))
return result_list
def __call__(self, preds, label=None):
......@@ -291,8 +291,8 @@ class det_rec_functions(object):
def __init__(self, image):
self.img = image.copy()
self.det_file = './weights/db.onnx'
self.rec_file = './weights/svtr.onnx'
self.det_file = '../Resource/Models/PaddleOCR/VLPR/db.onnx'
self.rec_file = '../Resource/Models/PaddleOCR/VLPR/svtr.onnx'
# 解析检测模型
detInput = {"x":[1,3,2496,2496]}
......@@ -319,7 +319,7 @@ class det_rec_functions(object):
print("Success to compile SVTR")
self.infer_before_process_op, self.det_re_process_op = self.get_process()
self.postprocess_op = process_pred('./weights/ppocr_keys_v1.txt', 'ch', True)
self.postprocess_op = process_pred('../Resource/Models/PaddleOCR/VLPR/ppocr_keys_v1.txt', 'ch', True)
# 图片预处理过程
def transform(self, data, ops=None):
......@@ -457,9 +457,6 @@ class det_rec_functions(object):
shape_part_list = np.expand_dims(shape_part_list, axis=0)
# migraphx推理
inputOfReshape = img_part.shape
inputShapeMap={self.inputName:[1,3,inputOfReshape[2],inputOfReshape[3]]}
migraphx.reshape2(self.modelDet, inputShapeMap)
resultDets = self.modelDet.run({self.modelDet.get_parameter_names()[0]: migraphx.argument(img_part)})
# 获取第一个输出节点的数据,migraphx.argument类型
resultDet = resultDets[0]
......@@ -503,10 +500,6 @@ class det_rec_functions(object):
img = img[np.newaxis, :]
# migraphx推理
inputOfReshape = img.shape
inputShapeMap={self.inputName:[1,3,inputOfReshape[2],inputOfReshape[3]]}
migraphx.reshape2(self.modelRec, inputShapeMap)
results = self.modelRec.run({self.modelRec.get_parameter_names()[0]: migraphx.argument(img)})
# 获取第一个输出节点的数据,migraphx.argument类型
result = results[0]
......@@ -536,7 +529,7 @@ class det_rec_functions(object):
if __name__=='__main__':
image = cv2.imread('./images/vlpr.jpg')
image = cv2.imread('../Resource/Images/vlpr.jpg')
start = time.time()
ocr_sys = det_rec_functions(image)
dt_boxes = ocr_sys.get_boxes()
......
shapely
pyclipper
numpy
opencv-contrib-python==4.6.0.66
pillow
\ No newline at end of file
......@@ -8,37 +8,93 @@
DBnet是一种基于分割的文本检测方法,相比传统分割方法需要设定固定阈值,该模型将二值化操作插入到分割网络中进行联合优化,通过网络学习可以自适应的预测图像中每一个像素点的阈值,能够在像素水平很好的检测自然场景下不同形状的文字。SVTR是一种端到端的文本识别模型,通过单个视觉模型就可以一站式解决特征提取和文本转录两个任务,同时也保证了更快的推理速度。百度PaddleOCR开源项目提供了车牌识别的预训练模型,本示例使用PaddleOCR提供的蓝绿黄牌识别模型进行推理。
## 推理
## 构建安装
### 环境配置
在光源可拉取推理的docker镜像,PaddleOCR工程推荐的镜像如下:
[光源](https://www.sourcefind.cn/#/image/dcu/custom)可拉取用于推理的docker镜像,PaddleOCR车牌识别模型推理推荐的镜像如下:
```python
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
```
### 安装Opencv依赖
```python
cd <path_to_migraphx_samples>
sh ./3rdParty/InstallOpenCVDependences.sh
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
### 修改CMakeLists.txt
- 如果使用ubuntu系统,需要修改CMakeLists.txt中依赖库路径:
将"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib64/"修改为"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib/"
- **MIGraphX2.3.0及以上版本需要c++17**
### 安装OpenCV并构建工程
```
rbuild build -d depend
```
[光合开发者社区](https://cancon.hpccube.com:65024/4/main/)可下载MIGraphX安装包,python依赖安装:
### 设置环境变量
将依赖库依赖加入环境变量LD_LIBRARY_PATH,在~/.bashrc中添加如下语句:
**Centos**:
```
pip install -r requirements.txt
export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib64/:$LD_LIBRARY_PATH
```
**Ubuntu**:
```
export LD_LIBRARY_PATH=<path_to_migraphx_samples>/depend/lib/:$LD_LIBRARY_PATH
```
注意:PaddleOCR车牌识别使用动态shape推理,需要MIGraphX版本>=3.0.0。
然后执行:
### 运行示例
```
source ~/.bashrc
```
## 推理
PaddleOCR车牌识别的推理示例程序是PaddleOCR_infer_migraphx.py,使用如下命令运行该推理示例:
### C++版本推理
成功编译PaddleOCR车牌识别工程后,在build目录下输入如下命令运行该示例:
```
# 设置动态shape模式
# 开启环境变量
export MIGRAPHX_DYNAMIC_SHAPE=1
# 运行示例
./MIGraphX 0
```
PaddleOCR车牌识别结果为:
```
[皖AD19906, 0.999067]
```
### python版本推理
PaddleOCR工程同时提供了python推理示例,可使用如下命令运行:
```
# 进入python示例目录
cd ./Python
# 安装依赖
pip install -r requirements.txt
# 运行程序
python PaddleOCR_infer_migraphx.py
```
车牌识别结果为:
PaddleOCR车牌识别结果为:
```
[[('皖AD19906', 0.98606485)]]
......
<?xml version="1.0" encoding="GB2312"?>
<opencv_storage>
<!--PaddleOCR车牌检测 -->
<OcrDB>
<ModelPath>"../Resource/Models/PaddleOCR/VLPR/db.onnx"</ModelPath>
<BinaryThreshold>0.3</BinaryThreshold>
<BoxThreshold>0.5</BoxThreshold>
<UnclipRatio>1.6</UnclipRatio>
<LimitSideLen>2500</LimitSideLen>
<ScoreMode>"fast"</ScoreMode>
</OcrDB>
<!--PaddleOCR车牌识别 -->
<OcrSVTR>
<ModelPath>"../Resource/Models/PaddleOCR/VLPR/svtr.onnx"</ModelPath>
<DictPath>"../Resource/Models/PaddleOCR/VLPR/ppocr_keys_v1.txt"</DictPath>
</OcrSVTR>
</opencv_storage>
#include <OcrDB.h>
#include <migraphx/onnx.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/reshape2.hpp>
#include <opencv2/dnn.hpp>
#include <CommonUtility.h>
#include <Filesystem.h>
#include <SimpleLog.h>
using namespace cv::dnn;
namespace migraphxSamples
{
DB::DB():logFile(NULL)
{
}
DB::~DB()
{
configurationFile.release();
}
ErrorCode DB::Initialize(InitializationParameterOfDB InitializationParameterOfDB)
{
// 初始化(获取日志文件,加载配置文件等)
ErrorCode errorCode = DoCommonInitialization(InitializationParameterOfDB);
if (errorCode!=SUCCESS)
{
LOG_ERROR(logFile, "fail to DoCommonInitialization\n");
return errorCode;
}
LOG_INFO(logFile, "success to DoCommonInitialization\n");
// 获取配置文件参数
FileNode netNode = configurationFile["OcrDB"];
string modelPath = initializationParameter.parentPath + (string)netNode["ModelPath"];
dbParameter.BinaryThreshold = (float)netNode["BinaryThreshold"];
dbParameter.BoxThreshold = (float)netNode["BoxThreshold"];
dbParameter.UnclipRatio = (float)netNode["UnclipRatio"];
dbParameter.LimitSideLen = (int)netNode["LimitSideLen"];
dbParameter.ScoreMode = (string)netNode["ScoreMode"];
// 加载模型
if(Exists(modelPath)==false)
{
LOG_ERROR(logFile,"%s not exist!\n",modelPath.c_str());
return MODEL_NOT_EXIST;
}
migraphx::onnx_options onnx_options;
onnx_options.map_input_dims["x"]={1,3,2496,2496};
net = migraphx::parse_onnx(modelPath, onnx_options);
LOG_INFO(logFile,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性
std::pair<std::string, migraphx::shape> inputAttribute=*(net.get_parameter_shapes().begin());
inputName=inputAttribute.first;
inputShape=inputAttribute.second;
inputSize=cv::Size(inputShape.lens()[3],inputShape.lens()[2]);
// 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{};
// 编译模型
migraphx::compile_options options;
options.device_id=0; // 设置GPU设备,默认为0号设备
options.offload_copy=true; // 设置offload_copy
net.compile(gpuTarget,options);
LOG_INFO(logFile,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
// Run once by itself
migraphx::parameter_map inputData;
inputData[inputName]=migraphx::generate_argument(inputShape);
net.eval(inputData);
// log
LOG_INFO(logFile,"InputMaxSize:%dx%d\n",inputSize.width,inputSize.height);
LOG_INFO(logFile,"InputName:%s\n",inputName.c_str());
return SUCCESS;
}
ErrorCode DB::Infer(const cv::Mat &img, std::vector<cv::Mat> &imgList)
{
if(img.empty()||img.type()!=CV_8UC3)
{
LOG_ERROR(logFile, "image error!\n");
return IMAGE_ERROR;
}
cv::Mat srcImage;
cv::Mat resizeImg;
img.copyTo(srcImage);
int w = srcImage.cols;
int h = srcImage.rows;
float ratio = 1.f;
int maxWH = std::max(h, w);
if (maxWH > dbParameter.LimitSideLen)
{
if (h > w)
{
ratio = float(dbParameter.LimitSideLen) / float(h);
}
else
{
ratio = float(dbParameter.LimitSideLen) / float(w);
}
}
int resizeH = int(float(h) * ratio);
int resizeW = int(float(w) * ratio);
resizeH = std::max(int(round(float(resizeH) / 32) * 32), 32);
resizeW = std::max(int(round(float(resizeW) / 32) * 32), 32);
cv::resize(srcImage, resizeImg, cv::Size(resizeW, resizeH));
float ratioH = float(resizeH) / float(h);
float ratioW = float(resizeW) / float(w);
resizeImg.convertTo(resizeImg, CV_32FC3, 1.0/255.0);
std::vector<cv::Mat> bgrChannels(3);
cv::split(resizeImg, bgrChannels);
std::vector<float> mean = {0.485f, 0.456f, 0.406f};
std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
for (auto i = 0; i < bgrChannels.size(); i++)
{
bgrChannels[i].convertTo(bgrChannels[i], CV_32FC1, 1.0 * scale[i],
(0.0 - mean[i]) * scale[i]);
}
cv::merge(bgrChannels, resizeImg);
int rh = resizeImg.rows;
int rw = resizeImg.cols;
cv::Mat inputBlob;
inputBlob = cv::dnn::blobFromImage(resizeImg);
std::vector<std::size_t> inputShapeOfInfer={1,3,rh,rw};
// 输入数据
migraphx::parameter_map inputData;
inputData[inputName]= migraphx::argument{migraphx::shape(inputShape.type(),inputShapeOfInfer), (float*)inputBlob.data};
// 推理
std::vector<migraphx::argument> inferenceResults = net.eval(inputData);
// 获取推理结果
migraphx::argument result = inferenceResults[0];
// 转换为vector
migraphx::shape outputShape = result.get_shape();
int shape[]={outputShape.lens()[0],outputShape.lens()[1],outputShape.lens()[2],outputShape.lens()[3]};
int n2 = outputShape.lens()[2];
int n3 = outputShape.lens()[3];
int n = n2 * n3;
std::vector<float> out(n);
memcpy(out.data(),result.data(),sizeof(float)*outputShape.elements());
out.resize(n);
std::vector<float> pred(n, 0.0);
std::vector<unsigned char> cbuf(n, ' ');
for (int i = 0; i < n; i++)
{
pred[i] = (float)(out[i]);
cbuf[i] = (unsigned char)((out[i]) * 255);
}
cv::Mat cbufMap(n2, n3, CV_8UC1, (unsigned char *)cbuf.data());
cv::Mat predMap(n2, n3, CV_32F, (float *)pred.data());
const double threshold = dbParameter.BinaryThreshold * 255;
const double maxvalue = 255;
cv::Mat bitMap;
cv::threshold(cbufMap, bitMap, threshold, maxvalue, cv::THRESH_BINARY);
std::vector<std::vector<std::vector<int>>> boxes;
DBPostProcessor postProcessor;
boxes = postProcessor.BoxesFromBitmap(predMap, bitMap, dbParameter.BoxThreshold, dbParameter.UnclipRatio, dbParameter.ScoreMode);
boxes = postProcessor.FilterTagDetRes(boxes, ratioH, ratioW, srcImage);
std::vector<migraphxSamples::OCRPredictResult> ocrResults;
for (int i = 0; i < boxes.size(); i++)
{
OCRPredictResult res;
res.box = boxes[i];
ocrResults.push_back(res);
}
Utility::sorted_boxes(ocrResults);
for (int j = 0; j < ocrResults.size(); j++)
{
cv::Mat cropImg;
cropImg = Utility::GetRotateCropImage(img, ocrResults[j].box);
imgList.push_back(cropImg);
}
}
void DBPostProcessor::GetContourArea(const std::vector<std::vector<float>> &box,
float unclip_ratio, float &distance) {
int pts_num = 4;
float area = 0.0f;
float dist = 0.0f;
for (int i = 0; i < pts_num; i++) {
area += box[i][0] * box[(i + 1) % pts_num][1] -
box[i][1] * box[(i + 1) % pts_num][0];
dist += sqrtf((box[i][0] - box[(i + 1) % pts_num][0]) *
(box[i][0] - box[(i + 1) % pts_num][0]) +
(box[i][1] - box[(i + 1) % pts_num][1]) *
(box[i][1] - box[(i + 1) % pts_num][1]));
}
area = fabs(float(area / 2.0));
distance = area * unclip_ratio / dist;
}
cv::RotatedRect DBPostProcessor::UnClip(std::vector<std::vector<float>> box,
const float &unclip_ratio) {
float distance = 1.0;
GetContourArea(box, unclip_ratio, distance);
ClipperLib::ClipperOffset offset;
ClipperLib::Path p;
p << ClipperLib::IntPoint(int(box[0][0]), int(box[0][1]))
<< ClipperLib::IntPoint(int(box[1][0]), int(box[1][1]))
<< ClipperLib::IntPoint(int(box[2][0]), int(box[2][1]))
<< ClipperLib::IntPoint(int(box[3][0]), int(box[3][1]));
offset.AddPath(p, ClipperLib::jtRound, ClipperLib::etClosedPolygon);
ClipperLib::Paths soln;
offset.Execute(soln, distance);
std::vector<cv::Point2f> points;
for (int j = 0; j < soln.size(); j++) {
for (int i = 0; i < soln[soln.size() - 1].size(); i++) {
points.emplace_back(soln[j][i].X, soln[j][i].Y);
}
}
cv::RotatedRect res;
if (points.size() <= 0) {
res = cv::RotatedRect(cv::Point2f(0, 0), cv::Size2f(1, 1), 0);
} else {
res = cv::minAreaRect(points);
}
return res;
}
float **DBPostProcessor::Mat2Vec(cv::Mat mat) {
auto **array = new float *[mat.rows];
for (int i = 0; i < mat.rows; ++i)
array[i] = new float[mat.cols];
for (int i = 0; i < mat.rows; ++i) {
for (int j = 0; j < mat.cols; ++j) {
array[i][j] = mat.at<float>(i, j);
}
}
return array;
}
std::vector<std::vector<int>>
DBPostProcessor::OrderPointsClockwise(std::vector<std::vector<int>> pts) {
std::vector<std::vector<int>> box = pts;
std::sort(box.begin(), box.end(), XsortInt);
std::vector<std::vector<int>> leftmost = {box[0], box[1]};
std::vector<std::vector<int>> rightmost = {box[2], box[3]};
if (leftmost[0][1] > leftmost[1][1])
std::swap(leftmost[0], leftmost[1]);
if (rightmost[0][1] > rightmost[1][1])
std::swap(rightmost[0], rightmost[1]);
std::vector<std::vector<int>> rect = {leftmost[0], rightmost[0], rightmost[1],
leftmost[1]};
return rect;
}
std::vector<std::vector<float>> DBPostProcessor::Mat2Vector(cv::Mat mat) {
std::vector<std::vector<float>> img_vec;
std::vector<float> tmp;
for (int i = 0; i < mat.rows; ++i) {
tmp.clear();
for (int j = 0; j < mat.cols; ++j) {
tmp.push_back(mat.at<float>(i, j));
}
img_vec.push_back(tmp);
}
return img_vec;
}
bool DBPostProcessor::XsortFp32(std::vector<float> a, std::vector<float> b) {
if (a[0] != b[0])
return a[0] < b[0];
return false;
}
bool DBPostProcessor::XsortInt(std::vector<int> a, std::vector<int> b) {
if (a[0] != b[0])
return a[0] < b[0];
return false;
}
std::vector<std::vector<float>>
DBPostProcessor::GetMiniBoxes(cv::RotatedRect box, float &ssid) {
ssid = std::max(box.size.width, box.size.height);
cv::Mat points;
cv::boxPoints(box, points);
auto array = Mat2Vector(points);
std::sort(array.begin(), array.end(), XsortFp32);
std::vector<float> idx1 = array[0], idx2 = array[1], idx3 = array[2],
idx4 = array[3];
if (array[3][1] <= array[2][1]) {
idx2 = array[3];
idx3 = array[2];
} else {
idx2 = array[2];
idx3 = array[3];
}
if (array[1][1] <= array[0][1]) {
idx1 = array[1];
idx4 = array[0];
} else {
idx1 = array[0];
idx4 = array[1];
}
array[0] = idx1;
array[1] = idx2;
array[2] = idx3;
array[3] = idx4;
return array;
}
float DBPostProcessor::PolygonScoreAcc(std::vector<cv::Point> contour,
cv::Mat pred) {
int width = pred.cols;
int height = pred.rows;
std::vector<float> box_x;
std::vector<float> box_y;
for (int i = 0; i < contour.size(); ++i) {
box_x.push_back(contour[i].x);
box_y.push_back(contour[i].y);
}
int xmin =
clamp(int(std::floor(*(std::min_element(box_x.begin(), box_x.end())))), 0,
width - 1);
int xmax =
clamp(int(std::ceil(*(std::max_element(box_x.begin(), box_x.end())))), 0,
width - 1);
int ymin =
clamp(int(std::floor(*(std::min_element(box_y.begin(), box_y.end())))), 0,
height - 1);
int ymax =
clamp(int(std::ceil(*(std::max_element(box_y.begin(), box_y.end())))), 0,
height - 1);
cv::Mat mask;
mask = cv::Mat::zeros(ymax - ymin + 1, xmax - xmin + 1, CV_8UC1);
cv::Point *rook_point = new cv::Point[contour.size()];
for (int i = 0; i < contour.size(); ++i) {
rook_point[i] = cv::Point(int(box_x[i]) - xmin, int(box_y[i]) - ymin);
}
const cv::Point *ppt[1] = {rook_point};
int npt[] = {int(contour.size())};
cv::fillPoly(mask, ppt, npt, 1, cv::Scalar(1));
cv::Mat croppedImg;
pred(cv::Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1))
.copyTo(croppedImg);
float score = cv::mean(croppedImg, mask)[0];
delete[] rook_point;
return score;
}
float DBPostProcessor::BoxScoreFast(std::vector<std::vector<float>> box_array,
cv::Mat pred) {
auto array = box_array;
int width = pred.cols;
int height = pred.rows;
float box_x[4] = {array[0][0], array[1][0], array[2][0], array[3][0]};
float box_y[4] = {array[0][1], array[1][1], array[2][1], array[3][1]};
int xmin = clamp(int(std::floor(*(std::min_element(box_x, box_x + 4)))), 0,
width - 1);
int xmax = clamp(int(std::ceil(*(std::max_element(box_x, box_x + 4)))), 0,
width - 1);
int ymin = clamp(int(std::floor(*(std::min_element(box_y, box_y + 4)))), 0,
height - 1);
int ymax = clamp(int(std::ceil(*(std::max_element(box_y, box_y + 4)))), 0,
height - 1);
cv::Mat mask;
mask = cv::Mat::zeros(ymax - ymin + 1, xmax - xmin + 1, CV_8UC1);
cv::Point root_point[4];
root_point[0] = cv::Point(int(array[0][0]) - xmin, int(array[0][1]) - ymin);
root_point[1] = cv::Point(int(array[1][0]) - xmin, int(array[1][1]) - ymin);
root_point[2] = cv::Point(int(array[2][0]) - xmin, int(array[2][1]) - ymin);
root_point[3] = cv::Point(int(array[3][0]) - xmin, int(array[3][1]) - ymin);
const cv::Point *ppt[1] = {root_point};
int npt[] = {4};
cv::fillPoly(mask, ppt, npt, 1, cv::Scalar(1));
cv::Mat croppedImg;
pred(cv::Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1))
.copyTo(croppedImg);
auto score = cv::mean(croppedImg, mask)[0];
return score;
}
std::vector<std::vector<std::vector<int>>> DBPostProcessor::BoxesFromBitmap(
const cv::Mat pred, const cv::Mat bitmap, const float &box_thresh,
const float &det_db_unclip_ratio, const std::string &det_db_score_mode) {
const int min_size = 3;
const int max_candidates = 2000;
int width = bitmap.cols;
int height = bitmap.rows;
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(bitmap, contours, hierarchy, cv::RETR_LIST,
cv::CHAIN_APPROX_SIMPLE);
int num_contours =
contours.size() >= max_candidates ? max_candidates : contours.size();
std::vector<std::vector<std::vector<int>>> boxes;
for (int _i = 0; _i < num_contours; _i++) {
if (contours[_i].size() <= 2) {
continue;
}
float ssid;
cv::RotatedRect box = cv::minAreaRect(contours[_i]);
auto array = GetMiniBoxes(box, ssid);
auto box_for_unclip = array;
// end get_mini_box
if (ssid < min_size) {
continue;
}
float score;
if (det_db_score_mode == "slow")
/* compute using polygon*/
score = PolygonScoreAcc(contours[_i], pred);
else
score = BoxScoreFast(array, pred);
if (score < box_thresh)
continue;
// start for unclip
cv::RotatedRect points = UnClip(box_for_unclip, det_db_unclip_ratio);
if (points.size.height < 1.001 && points.size.width < 1.001) {
continue;
}
// end for unclip
cv::RotatedRect clipbox = points;
auto cliparray = GetMiniBoxes(clipbox, ssid);
if (ssid < min_size + 2)
continue;
int dest_width = pred.cols;
int dest_height = pred.rows;
std::vector<std::vector<int>> intcliparray;
for (int num_pt = 0; num_pt < 4; num_pt++) {
std::vector<int> a{int(clampf(roundf(cliparray[num_pt][0] / float(width) *
float(dest_width)),
0, float(dest_width))),
int(clampf(roundf(cliparray[num_pt][1] /
float(height) * float(dest_height)),
0, float(dest_height)))};
intcliparray.push_back(a);
}
boxes.push_back(intcliparray);
} // end for
return boxes;
}
std::vector<std::vector<std::vector<int>>> DBPostProcessor::FilterTagDetRes(
std::vector<std::vector<std::vector<int>>> boxes, float ratio_h,
float ratio_w, cv::Mat srcimg) {
int oriimg_h = srcimg.rows;
int oriimg_w = srcimg.cols;
std::vector<std::vector<std::vector<int>>> root_points;
for (int n = 0; n < boxes.size(); n++) {
boxes[n] = OrderPointsClockwise(boxes[n]);
for (int m = 0; m < boxes[0].size(); m++) {
boxes[n][m][0] /= ratio_w;
boxes[n][m][1] /= ratio_h;
boxes[n][m][0] = int(_min(_max(boxes[n][m][0], 0), oriimg_w - 1));
boxes[n][m][1] = int(_min(_max(boxes[n][m][1], 0), oriimg_h - 1));
}
}
for (int n = 0; n < boxes.size(); n++) {
int rect_width, rect_height;
rect_width = int(sqrt(pow(boxes[n][0][0] - boxes[n][1][0], 2) +
pow(boxes[n][0][1] - boxes[n][1][1], 2)));
rect_height = int(sqrt(pow(boxes[n][0][0] - boxes[n][3][0], 2) +
pow(boxes[n][0][1] - boxes[n][3][1], 2)));
if (rect_width <= 4 || rect_height <= 4)
continue;
root_points.push_back(boxes[n]);
}
return root_points;
}
ErrorCode DB::DoCommonInitialization(InitializationParameterOfDB InitializationParameterOfDB)
{
initializationParameter=InitializationParameterOfDB;
// 获取日志文件
logFile=LogManager::GetInstance()->GetLogFile(initializationParameter.logName);
// 加载配置文件
std::string configFilePath=initializationParameter.configFilePath;
if(!Exists(configFilePath))
{
LOG_ERROR(logFile, "no configuration file!\n");
return CONFIG_FILE_NOT_EXIST;
}
if(!configurationFile.open(configFilePath, FileStorage::READ))
{
LOG_ERROR(logFile, "fail to open configuration file\n");
return FAIL_TO_OPEN_CONFIG_FILE;
}
LOG_INFO(logFile, "succeed to open configuration file\n");
// 修改父路径
std::string &parentPath = initializationParameter.parentPath;
if (!parentPath.empty())
{
if(!IsPathSeparator(parentPath[parentPath.size() - 1]))
{
parentPath+=PATH_SEPARATOR;
}
}
return SUCCESS;
}
}
\ No newline at end of file
// PaddleOCR车牌检测
#ifndef __OCR_DB_H__
#define __OCR_DB_H__
#include <string>
#include <migraphx/program.hpp>
#include <opencv2/opencv.hpp>
#include <CommonDefinition.h>
#include <clipper.h>
#include <utility.h>
using namespace std;
using namespace cv;
using namespace migraphx;
namespace migraphxSamples
{
typedef struct _DBParameter
{
float BinaryThreshold;
float BoxThreshold;
float UnclipRatio;
int LimitSideLen;
string ScoreMode;
}DBParameter;
class DB
{
public:
DB();
~DB();
ErrorCode Initialize(InitializationParameterOfDB InitializationParameterOfDB);
ErrorCode Infer(const cv::Mat &img, std::vector<cv::Mat> &imgList);
private:
ErrorCode DoCommonInitialization(InitializationParameterOfDB InitializationParameterOfDB);
private:
cv::FileStorage configurationFile;
InitializationParameterOfDB initializationParameter;
FILE *logFile;
migraphx::program net;
cv::Size inputSize;
string inputName;
migraphx::shape inputShape;
DBParameter dbParameter;
};
class DBPostProcessor {
public:
void GetContourArea(const std::vector<std::vector<float>> &box,
float unclip_ratio, float &distance);
cv::RotatedRect UnClip(std::vector<std::vector<float>> box,
const float &unclip_ratio);
float **Mat2Vec(cv::Mat mat);
std::vector<std::vector<int>>
OrderPointsClockwise(std::vector<std::vector<int>> pts);
std::vector<std::vector<float>> GetMiniBoxes(cv::RotatedRect box,
float &ssid);
float BoxScoreFast(std::vector<std::vector<float>> box_array, cv::Mat pred);
float PolygonScoreAcc(std::vector<cv::Point> contour, cv::Mat pred);
std::vector<std::vector<std::vector<int>>>
BoxesFromBitmap(const cv::Mat pred, const cv::Mat bitmap,
const float &box_thresh, const float &det_db_unclip_ratio,
const std::string &det_db_score_mode);
std::vector<std::vector<std::vector<int>>>
FilterTagDetRes(std::vector<std::vector<std::vector<int>>> boxes,
float ratio_h, float ratio_w, cv::Mat srcimg);
private:
static bool XsortInt(std::vector<int> a, std::vector<int> b);
static bool XsortFp32(std::vector<float> a, std::vector<float> b);
std::vector<std::vector<float>> Mat2Vector(cv::Mat mat);
inline int _max(int a, int b) { return a >= b ? a : b; }
inline int _min(int a, int b) { return a >= b ? b : a; }
template <class T> inline T clamp(T x, T min, T max) {
if (x > max)
return max;
if (x < min)
return min;
return x;
}
inline float clampf(float x, float min, float max) {
if (x > max)
return max;
if (x < min)
return min;
return x;
}
};
}
#endif
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment