Commit 28602b9a authored by shizhm's avatar shizhm
Browse files

重构yolov5推理示例

parent b306b620
...@@ -26,10 +26,7 @@ set(LIBRARY opencv_core ...@@ -26,10 +26,7 @@ set(LIBRARY opencv_core
opencv_imgproc opencv_imgproc
opencv_imgcodecs opencv_imgcodecs
opencv_dnn opencv_dnn
migraphx_ref
migraphx migraphx
migraphx_c
migraphx_device
migraphx_gpu migraphx_gpu
migraphx_onnx) migraphx_onnx)
link_libraries(${LIBRARY}) link_libraries(${LIBRARY})
......
# YOLOV5检测器 # YOLOV5检测器
YOLOV5模型是目前工业界使用较多的算法,官方提供了多个不同版本的预训练模型,本份文档主要介绍了如何基于migraphx构建YOLOV5动态shape推理,该示例推理流程对YOLOV5其他版本的模型同样适用。 YOLOV5模型是目前工业界使用较多的算法,官方提供了多个不同版本的预训练模型,本份文档主要介绍了如何基于migraphx构建YOLOV5推理,包括:静态推理、动态shape推理,该示例推理流程对YOLOV5其他版本的模型同样适用。
## 模型简介 ## 模型简介
...@@ -12,7 +12,8 @@ YOLOV5是一种单阶段目标检测算法,该算法在YOLOV4的基础上添 ...@@ -12,7 +12,8 @@ YOLOV5是一种单阶段目标检测算法,该算法在YOLOV4的基础上添
samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示YOLOV5检测器的参数,相关参数主要依据官方推理示例进行设置。各个参数含义如下: samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示YOLOV5检测器的参数,相关参数主要依据官方推理示例进行设置。各个参数含义如下:
- ModelPath:yolov5模型存放路径 - ModelPathDynamic:yolov5动态模型存放路径
- ModelPathStatic:yolov5静态模型存放路径
- ClassNameFile:coco数据集类别文件存放路径 - ClassNameFile:coco数据集类别文件存放路径
- UseFP16:是否使用FP16推理模式 - UseFP16:是否使用FP16推理模式
- NumberOfClasses:检测类别数量 - NumberOfClasses:检测类别数量
...@@ -21,7 +22,8 @@ samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示 ...@@ -21,7 +22,8 @@ samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示
- ObjectThreshold:用于判断anchor内部是否有物体 - ObjectThreshold:用于判断anchor内部是否有物体
``` ```
<ModelPath>"../Resource/Models/yolov5s_Nx3xNxN.onnx"</ModelPath> <ModelPathDynamic>"../Resource/Models/yolov5s_Nx3xNxN.onnx"</ModelPathDynamic>
<ModelPathStatic>"../Resource/Models/yolov5s.onnx"</ModelPathStatic>
<ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile> <ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16--> <UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20--> <NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
...@@ -32,45 +34,34 @@ samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示 ...@@ -32,45 +34,34 @@ samples工程中的Resource/Configuration.xml文件的DetectorYOLOV5节点表示
## 模型初始化 ## 模型初始化
模型初始化首先通过parse_onnx()函数加载YOLOV5的onnx模型,本示例构建YOLOV5动态shape推理,所以需要设置模型输入的最大shape,本示例设为{1,3,800,800},并可以通过program的get_parameter_shapes()函数获取网络的输入属性。完成模型加载之后需要使用compile()方法编译模型,编译模式使用migraphx::gpu::target{}设为GPU模式,编译过程主要基于MIGraphX IR完成各种优化。同时如果需要使用低精度量化进行推理,可以使用quantize_fp16()函数实现。 模型初始化首先通过parse_onnx()函数加载YOLOV5的onnx模型。
- 静态推理:调用parse_onnx函数对静态模型进行解析
``` ```
ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector) ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector, bool dynamic)
{ {
... ...
migraphx::onnx_options onnx_options; // 加载模型
onnx_options.map_input_dims["images"]={1,3,800,800}; net = migraphx::parse_onnx(modelPath);
net = migraphx::parse_onnx(modelPath, onnx_options); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); ...
// 获取模型输入属性 }
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes(); ```
inputName=inputMap.begin()->first;
inputShape=inputMap.begin()->second;
int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{};
// 量化
if(useFP16)
{
migraphx::quantize_fp16(net);
}
// 编译模型
migraphx::compile_options options;
options.device_id=0;
options.offload_copy=true;
net.compile(gpuTarget,options);
LOG_INFO(stdout,"succeed to compile model: %s\n",GetFileName(modelPath).c_str());
- 动态shape推理:需要设置模型输入的最大shape,本示例设为{1,3,800,800}
```
ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector, bool dynamic)
{
...
migraphx::onnx_options onnx_options;
onnx_options.map_input_dims["images"]={1,3,800,800};//
net = migraphx::parse_onnx(modelPath, onnx_options);
... ...
} }
``` ```
...@@ -79,25 +70,44 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -79,25 +70,44 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
在将数据输入到模型之前,需要对图像做如下预处理操作: 在将数据输入到模型之前,需要对图像做如下预处理操作:
1. 转换数据排布为NCHW - 转换数据排布为NCHW
2. 归一化[0.0, 1.0]
3. 将输入数据的尺寸变换到YOLOV5动态输入大小,relInputShape为每次实际inputshape - 归一化[0.0, 1.0]
- 输入数据的尺寸变换:静态推理将输入大小变换为[1,3,608,608],动态推理对输入图像尺寸不做变换。
``` ```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection) ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection)
{ {
... ...
inputSize = cv::Size(relInputShape[3], relInputShape[2]); // 数据预处理
// 预处理并转换为NCHW
cv::Mat inputBlob; cv::Mat inputBlob;
blobFromImage(srcImage, // 输入数据 std::vector<std::size_t> relInputShape;
inputBlob, // 输出数据 int height, width;
1 / 255.0, //归一化 if(dynamic)
inputSize, //YOLOV5输入尺寸 {
Scalar(0, 0, 0), //未减去均值 width = srcImage.rows;
true, //转换RB通道 height = srcImage.cols;
relInputShape = {1,3,height,width};
cv::dnn::blobFromImage(srcImage,
inputBlob,
1 / 255.0,
cv::Size(width, height),
cv::Scalar(0, 0, 0),
true,
false);
}
else
{
cv::dnn::blobFromImage(srcImage,
inputBlob,
1 / 255.0,
inputSize,
cv::Scalar(0, 0, 0),
true,
false); false);
}
... ...
} }
...@@ -105,32 +115,30 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_ ...@@ -105,32 +115,30 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_
## 推理 ## 推理
完成图像预处理以及YOLOV5目标检测相关参数设置之后开始执行推理,利用migraphx推理计算得到YOLOV5模型的输出。 完成图像预处理以及YOLOV5目标检测相关参数设置之后开始执行推理,利用migraphx推理计算得到YOLOV5模型的输出。其中静态推理输入数据inputData的shape大小为模型的固定输入尺寸,动态推理则为实际输入的尺寸。
``` ```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection) ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection, bool dynamic)
{ {
... ...
// 创建输入数据
// 创建输入数据
migraphx::parameter_map inputData; migraphx::parameter_map inputData;
inputData[inputName]= migraphx::argument{migraphx::shape(inputShape.type(), relInputShape), (float*)inputBlob.data}; if(dynamic)
{
inputData[inputName]= migraphx::argument{migraphx::shape(inputShape.type(), relInputShape), (float*)inputBlob.data};
}
else
{
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
}
// 推理 // 推理
std::vector<migraphx::argument> inferenceResults = net.eval(inputData); std::vector<migraphx::argument> inferenceResults = net.eval(inputData);
// 获取推理结果
std::vector<cv::Mat> outs;
migraphx::argument result = inferenceResults[0];
// 转换为cv::Mat
migraphx::shape outputShape = result.get_shape();
int shape[]={outputShape.lens()[0],outputShape.lens()[1],outputShape.lens()[2]};
cv::Mat out(3,shape,CV_32F);
memcpy(out.data,result.data(),sizeof(float)*outputShape.elements());
outs.push_back(out);
... ...
} }
``` ```
...@@ -140,7 +148,7 @@ YOLOV5的MIGraphX推理结果inferenceResults是一个std::vector< migraphx::arg ...@@ -140,7 +148,7 @@ YOLOV5的MIGraphX推理结果inferenceResults是一个std::vector< migraphx::arg
- 第二步根据confidenceThreshold阈值进行筛选,当满足第一步阈值anchor的最大置信度得分maxClassScore大于该阈值,则进一步获取当前anchor的坐标信息和预测物体类别信息,小于该阈值则不做处理。 - 第二步根据confidenceThreshold阈值进行筛选,当满足第一步阈值anchor的最大置信度得分maxClassScore大于该阈值,则进一步获取当前anchor的坐标信息和预测物体类别信息,小于该阈值则不做处理。
``` ```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection) ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection, bool dynamic)
{ {
... ...
...@@ -196,7 +204,7 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_ ...@@ -196,7 +204,7 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_
为了消除重叠锚框,输出最终的YOLOV5目标检测结果,执行非极大值抑制对筛选之后的anchor进行处理,最后保存检测结果到resultsOfDetection中。 为了消除重叠锚框,输出最终的YOLOV5目标检测结果,执行非极大值抑制对筛选之后的anchor进行处理,最后保存检测结果到resultsOfDetection中。
``` ```
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection) ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection, bool dynamic)
{ {
... ...
......
...@@ -34,32 +34,37 @@ def prepare_input(self, image): ...@@ -34,32 +34,37 @@ def prepare_input(self, image):
return input_img return input_img
``` ```
其中模型输入的inputWidth、inputHeight为每次动态输入shape。
## 推理 ## 推理
执行YOLOV5动态输入推理,首先需要对YOLOV5的动态模型进行解析、编译,与静态推理不同的是,动态shape推理需要设置模型输入的最大shape,本示例设为[1,3,800,800]。 执行YOLOV5模型推理,首先需要对YOLOV5模型进行解析、编译,静态推理过程中直接调用parse_onnx函数对静态模型进行解析,获取静态模型的输入shape信息;与静态推理不同的是,动态shape推理需要设置模型输入的最大shape,本示例设为[1,3,800,800]。
``` ```
class YOLOv5: class YOLOv5:
def __init__(self, path, obj_thres=0.5, conf_thres=0.25, iou_thres=0.5): def __init__(self, path, dynamic=False, obj_thres=0.5, conf_thres=0.25, iou_thres=0.5):
self.objectThreshold = obj_thres self.objectThreshold = obj_thres
self.confThreshold = conf_thres self.confThreshold = conf_thres
self.nmsThreshold = iou_thres self.nmsThreshold = iou_thres
self.isDynamic = dynamic
# 获取模型检测的类别信息 # 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines())) self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines()))
# 解析推理模型 # 解析推理模型
maxInput={"images":[1,3,800,800]} if self.isDynamic:
self.model = migraphx.parse_onnx(path, map_input_dims=maxInput) maxInput={"images":[1,3,800,800]}
self.model = migraphx.parse_onnx(path, map_input_dims=maxInput)
# 获取模型的输入name
self.inputName = self.model.get_parameter_names()[0] self.inputName = self.model.get_parameter_names()[0]
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
# 获取模型的输入尺寸 print("inputName:{0} \ninputMaxShape:{1}".format(self.inputName, inputShape))
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() else:
print("inputName:{0} \ninputMaxShape:{1}".format(self.inputName, inputShape)) self.model = migraphx.parse_onnx(path)
self.inputName = self.model.get_parameter_names()[0]
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
# 静态推理尺寸
self.inputWidth = inputShape[3]
self.inputHeight = inputShape[2]
# 模型编译 # 模型编译
self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备 self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备
...@@ -71,9 +76,10 @@ class YOLOv5: ...@@ -71,9 +76,10 @@ class YOLOv5:
模型初始化完成之后开始进行推理,对输入数据进行前向计算得到模型的输出result,在detect函数中调用定义的process_output函数对result进行后处理,得到图像中含有物体的anchor坐标信息、类别置信度、类别ID。 模型初始化完成之后开始进行推理,对输入数据进行前向计算得到模型的输出result,在detect函数中调用定义的process_output函数对result进行后处理,得到图像中含有物体的anchor坐标信息、类别置信度、类别ID。
``` ```
def detect(self, image, input_shape): def detect(self, image, input_shape=None):
self.inputWidth = input_shape[3] if(self.isDynamic):
self.inputHeight = input_shape[2] self.inputWidth = input_shape[3]
self.inputHeight = input_shape[2]
# 输入图片预处理 # 输入图片预处理
input_img = self.prepare_input(image) input_img = self.prepare_input(image)
......
...@@ -8,32 +8,40 @@ import migraphx ...@@ -8,32 +8,40 @@ import migraphx
class YOLOv5: class YOLOv5:
def __init__(self, path, obj_thres=0.5, conf_thres=0.25, iou_thres=0.5): def __init__(self, path, dynamic=False, obj_thres=0.5, conf_thres=0.25, iou_thres=0.5):
self.objectThreshold = obj_thres self.objectThreshold = obj_thres
self.confThreshold = conf_thres self.confThreshold = conf_thres
self.nmsThreshold = iou_thres self.nmsThreshold = iou_thres
self.isDynamic = dynamic
# 获取模型检测的类别信息 # 获取模型检测的类别信息
self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines())) self.classNames = list(map(lambda x: x.strip(), open('../Resource/Models/coco.names', 'r').readlines()))
# 解析推理模型 # 解析推理模型
maxInput={"images":[1,3,800,800]} if self.isDynamic:
self.model = migraphx.parse_onnx(path, map_input_dims=maxInput) maxInput={"images":[1,3,800,800]}
self.model = migraphx.parse_onnx(path, map_input_dims=maxInput)
# 获取模型的输入name
self.inputName = self.model.get_parameter_names()[0] self.inputName = self.model.get_parameter_names()[0]
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
# 获取模型的输入尺寸 print("inputName:{0} \ninputMaxShape:{1}".format(self.inputName, inputShape))
inputShape = self.model.get_parameter_shapes()[self.inputName].lens() else:
print("inputName:{0} \ninputMaxShape:{1}".format(self.inputName, inputShape)) self.model = migraphx.parse_onnx(path)
self.inputName = self.model.get_parameter_names()[0]
inputShape = self.model.get_parameter_shapes()[self.inputName].lens()
print("inputName:{0} \ninputShape:{1}".format(self.inputName, inputShape))
# 静态推理尺寸
self.inputWidth = inputShape[3]
self.inputHeight = inputShape[2]
# 模型编译 # 模型编译
self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备 self.model.compile(t=migraphx.get_target("gpu"), device_id=0) # device_id: 设置GPU设备,默认为0号设备
print("Success to compile") print("Success to compile")
def detect(self, image, input_shape): def detect(self, image, input_shape=None):
self.inputWidth = input_shape[3] if(self.isDynamic):
self.inputHeight = input_shape[2] self.inputWidth = input_shape[3]
self.inputHeight = input_shape[2]
# 输入图片预处理 # 输入图片预处理
input_img = self.prepare_input(image) input_img = self.prepare_input(image)
...@@ -85,13 +93,8 @@ class YOLOv5: ...@@ -85,13 +93,8 @@ class YOLOv5:
return boxes[indices], scores[indices], class_ids[indices] return boxes[indices], scores[indices], class_ids[indices]
def extract_boxes(self, predictions): def extract_boxes(self, predictions):
# 获取anchor的坐标信息
boxes = predictions[:, :4] boxes = predictions[:, :4]
# 将anchor的坐标信息映射到输入image
boxes = self.rescale_boxes(boxes) boxes = self.rescale_boxes(boxes)
# 格式转换
boxes_ = np.copy(boxes) boxes_ = np.copy(boxes)
boxes_[..., 0] = boxes[..., 0] - boxes[..., 2] * 0.5 boxes_[..., 0] = boxes[..., 0] - boxes[..., 2] * 0.5
boxes_[..., 1] = boxes[..., 1] - boxes[..., 3] * 0.5 boxes_[..., 1] = boxes[..., 1] - boxes[..., 3] * 0.5
...@@ -124,28 +127,33 @@ def read_images(image_path): ...@@ -124,28 +127,33 @@ def read_images(image_path):
image_lists.append(image) image_lists.append(image)
return image_lists return image_lists
if __name__ == '__main__': def yolov5_Static(imgpath, modelpath, objectThreshold, confThreshold, nmsThreshold):
parser = argparse.ArgumentParser() yolov5_detector = YOLOv5(modelpath, False, obj_thres=objectThreshold, conf_thres=confThreshold,
parser.add_argument('--imgpath', type=str, default='../Resource/Images/DynamicPics', help="image path") iou_thres=nmsThreshold)
parser.add_argument('--modelpath', type=str, default='../Resource/Models/yolov5s_Nx3xNxN.onnx', help="onnx filepath") srcimg = cv2.imread(imgpath, 1)
parser.add_argument('--objectThreshold', default=0.5, type=float, help='class confidence')
parser.add_argument('--confThreshold', default=0.25, type=float, help='class confidence') boxes, scores, class_ids = yolov5_detector.detect(srcimg)
parser.add_argument('--nmsThreshold', default=0.5, type=float, help='nms iou thresh')
args = parser.parse_args() dstimg = yolov5_detector.draw_detections(srcimg, boxes, scores, class_ids)
# 保存检测结果
cv2.imwrite("./Result.jpg", dstimg)
print("Success to save result")
def yolov5_dynamic(imgpath, modelpath, objectThreshold, confThreshold, nmsThreshold):
# 设置动态输入shape # 设置动态输入shape
input_shapes = [] input_shapes = []
input_shapes.append([1,3,416,416]) input_shapes.append([1,3,416,416])
input_shapes.append([1,3,608,608]) input_shapes.append([1,3,608,608])
# 读取测试图像 # 读取测试图像
image_lists = read_images(args.imgpath) image_lists = read_images(imgpath)
# 推理 # 推理
yolov5_detector = YOLOv5(args.modelpath, obj_thres=args.objectThreshold, yolov5_detector = YOLOv5(modelpath, True, obj_thres=objectThreshold,
conf_thres=args.confThreshold, iou_thres=args.nmsThreshold) conf_thres=confThreshold, iou_thres=nmsThreshold)
for i, image in enumerate(image_lists): for i, image in enumerate(image_lists):
print("Start to inference image{}".format(i)) print("Start to inference image{}".format(i))
boxes, scores, class_ids = yolov5_detector.detect(image, input_shapes[i]) boxes, scores, class_ids = yolov5_detector.detect(image, input_shapes[i])
...@@ -156,6 +164,28 @@ if __name__ == '__main__': ...@@ -156,6 +164,28 @@ if __name__ == '__main__':
cv2.imwrite(result_name, dstimg) cv2.imwrite(result_name, dstimg)
print("Success to save results") print("Success to save results")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imgPath', type=str, default='../Resource/Images/DynamicPics/image1.jpg', help="image path")
parser.add_argument('--imgFolderPath', type=str, default='../Resource/Images/DynamicPics', help="image folder path")
parser.add_argument('--staticModelPath', type=str, default='../Resource/Models/yolov5s.onnx', help="static onnx filepath")
parser.add_argument('--dynamicModelPath', type=str, default='../Resource/Models/yolov5s_Nx3xNxN.onnx', help="static onnx filepath")
parser.add_argument('--objectThreshold', default=0.5, type=float, help='class confidence')
parser.add_argument('--confThreshold', default=0.25, type=float, help='class confidence')
parser.add_argument('--nmsThreshold', default=0.5, type=float, help='nms iou thresh')
parser.add_argument("--staticInfer",action="store_true",default=False,help="Performing static inference")
parser.add_argument("--dynamicInfer",action="store_true",default=False,help="Performing static inference")
args = parser.parse_args()
# 静态推理
if args.staticInfer:
yolov5_Static(args.imgPath, args.staticModelPath, args.objectThreshold, args.confThreshold, args.nmsThreshold)
# 动态推理
if args.dynamicInfer:
yolov5_dynamic(args.imgFolderPath, args.dynamicModelPath, args.objectThreshold, args.confThreshold, args.nmsThreshold)
......
...@@ -43,7 +43,32 @@ pip install -r requirements.txt ...@@ -43,7 +43,32 @@ pip install -r requirements.txt
### 运行示例 ### 运行示例
YoloV5模型的推理示例程序是YoloV5_infer_migraphx.py,本示例执行YOLOV5动态shape推理,在Python目录下使用如下命令运行该推理示例: YoloV5模型的推理示例程序是YoloV5_infer_migraphx.py,使用如下命令运行该推理示例:
```
# 进入python目录
cd <path_to_yolov5_migraphx>
# 进入Python目录
cd Python/
```
1. 静态推理
```
python YoloV5_infer_migraphx.py \
--imgPath 测试图像路径 \
--staticModelPath onnx模型路径 \
--objectThreshold 判断是否有物体阈值,默认0.5 \
--confThreshold 置信度阈值,默认0.25 \
--nmsThreshold nms阈值,默认0.5 \
```
程序运行结束后,在当前目录生成YOLOV5静态推理检测结果可视化图像Result.jpg
<img src="./Resource/Images/Result.jpg" alt="Result" style="zoom: 50%;" />
2. 动态推理
``` ```
# 开启环境变量 # 开启环境变量
...@@ -51,14 +76,14 @@ export MIGRAPHX_DYNAMIC_SHAPE=1 ...@@ -51,14 +76,14 @@ export MIGRAPHX_DYNAMIC_SHAPE=1
# 运行示例 # 运行示例
python YoloV5_infer_migraphx.py \ python YoloV5_infer_migraphx.py \
--imgpath 测试图像路径 \ --imgFolderPath 测试图像文件夹路径 \
--modelpath onnx模型路径 \ --dynamicModelPath onnx模型路径 \
--objectThreshold 判断是否有物体阈值,默认0.5 \ --objectThreshold 判断是否有物体阈值,默认0.5 \
--confThreshold 置信度阈值,默认0.25 \ --confThreshold 置信度阈值,默认0.25 \
--nmsThreshold nms阈值,默认0.5 \ --nmsThreshold nms阈值,默认0.5 \
``` ```
程序运行结束会在当前目录生成YoloV5检测结果图像 程序运行结束会在当前目录生成YoloV5动态推理检测结果可视化图像Result0.jpg、Result1.jpg
<img src="./Resource/Images/Result0.jpg" alt="Result_2" style="zoom: 50%;" /> <img src="./Resource/Images/Result0.jpg" alt="Result_2" style="zoom: 50%;" />
...@@ -76,22 +101,8 @@ python YoloV5_infer_migraphx.py \ ...@@ -76,22 +101,8 @@ python YoloV5_infer_migraphx.py \
docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1 docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort1.14.0_migraphx3.0.0-dtk22.10.1
``` ```
### 安装Opencv依赖
```python
cd <path_to_migraphx_samples>
sh ./3rdParty/InstallOpenCVDependences.sh
```
### 修改CMakeLists.txt
- 如果使用ubuntu系统,需要修改CMakeLists.txt中依赖库路径:
将"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib64/"修改为"${CMAKE_CURRENT_SOURCE_DIR}/depend/lib/"
- **MIGraphX2.3.0及以上版本需要c++17**
### 构建工程
### 安装OpenCV并构建工程
``` ```
rbuild build -d depend rbuild build -d depend
...@@ -101,18 +112,10 @@ rbuild build -d depend ...@@ -101,18 +112,10 @@ rbuild build -d depend
将依赖库依赖加入环境变量LD_LIBRARY_PATH,在~/.bashrc中添加如下语句: 将依赖库依赖加入环境变量LD_LIBRARY_PATH,在~/.bashrc中添加如下语句:
**Centos**:
``` ```
export LD_LIBRARY_PATH=<path_to_yolov5_migraphx>/depend/lib64/:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=<path_to_yolov5_migraphx>/depend/lib64/:$LD_LIBRARY_PATH
``` ```
**Ubuntu**:
```
export LD_LIBRARY_PATH=<path_to_yolov5_migraphx>/depend/lib/:$LD_LIBRARY_PATH
```
然后执行: 然后执行:
``` ```
...@@ -121,23 +124,37 @@ source ~/.bashrc ...@@ -121,23 +124,37 @@ source ~/.bashrc
### 运行示例 ### 运行示例
成功编译YoloV5工程后,执行如下令运行动态shape推理该示例: YoloV5示例程序编译成功后,执行如下令运行该示例:
``` ```
# 进入yolov5 migraphx工程根目录 # 进入yolov5 migraphx工程根目录
cd <path_to_yolov5_migraphx> cd <path_to_yolov5_migraphx>
# 进入build目录 # 进入build目录
cd ./build/ cd build/
```
1. 静态推理
```
./YOLOV5 0
```
程序运行结束后,会在当前目录生成YOLOV5静态推理检测结果可视化图像Result.jpg
<img src="./Resource/Images/Result.jpg" alt="Result" style="zoom:50%;" />
2. 动态推理
```
# 开启环境变量 # 开启环境变量
export MIGRAPHX_DYNAMIC_SHAPE=1 export MIGRAPHX_DYNAMIC_SHAPE=1
# 执行示例程序 # 执行动态推理示例程序
./YOLOV5 ./YOLOV5 1
``` ```
程序运行结束会在build目录生成YoloV5动态shape推理检测结果图像 程序运行结束会在build目录生成YoloV5动态shape推理检测结果可视化图像Result0.jpg、Result1.jpg
<img src="./Resource/Images/Result0.jpg" alt="Result" style="zoom:50%;" /> <img src="./Resource/Images/Result0.jpg" alt="Result" style="zoom:50%;" />
......
...@@ -3,7 +3,8 @@ ...@@ -3,7 +3,8 @@
<!--YOLOV5检测器 --> <!--YOLOV5检测器 -->
<DetectorYOLOV5> <DetectorYOLOV5>
<ModelPath>"../Resource/Models/yolov5s_Nx3xNxN.onnx"</ModelPath> <ModelPathDynamic>"../Resource/Models/yolov5s_Nx3xNxN.onnx"</ModelPathDynamic>
<ModelPathStatic>"../Resource/Models/yolov5s.onnx"</ModelPathStatic>
<ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile> <ClassNameFile>"../Resource/Models/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16--> <UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20--> <NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
......
...@@ -21,7 +21,7 @@ DetectorYOLOV5::~DetectorYOLOV5() ...@@ -21,7 +21,7 @@ DetectorYOLOV5::~DetectorYOLOV5()
} }
ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector) ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializationParameterOfDetector, bool dynamic)
{ {
// 读取配置文件 // 读取配置文件
std::string configFilePath=initializationParameterOfDetector.configFilePath; std::string configFilePath=initializationParameterOfDetector.configFilePath;
...@@ -39,7 +39,14 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -39,7 +39,14 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
// 获取配置文件参数 // 获取配置文件参数
cv::FileNode netNode = configurationFile["DetectorYOLOV5"]; cv::FileNode netNode = configurationFile["DetectorYOLOV5"];
std::string modelPath=(std::string)netNode["ModelPath"]; if(dynamic)
{
modelPath=(std::string)netNode["ModelPathDynamic"];
}
else
{
modelPath=(std::string)netNode["ModelPathStatic"];
}
std::string pathOfClassNameFile=(std::string)netNode["ClassNameFile"]; std::string pathOfClassNameFile=(std::string)netNode["ClassNameFile"];
yolov5Parameter.confidenceThreshold = (float)netNode["ConfidenceThreshold"]; yolov5Parameter.confidenceThreshold = (float)netNode["ConfidenceThreshold"];
yolov5Parameter.nmsThreshold = (float)netNode["NMSThreshold"]; yolov5Parameter.nmsThreshold = (float)netNode["NMSThreshold"];
...@@ -47,27 +54,63 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -47,27 +54,63 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
yolov5Parameter.numberOfClasses=(int)netNode["NumberOfClasses"]; yolov5Parameter.numberOfClasses=(int)netNode["NumberOfClasses"];
useFP16=(bool)(int)netNode["UseFP16"]; useFP16=(bool)(int)netNode["UseFP16"];
// 加载模型 if(dynamic)
if(Exists(modelPath)==false)
{ {
LOG_ERROR(stdout,"%s not exist!\n",modelPath.c_str()); // 加载模型
return MODEL_NOT_EXIST; if(Exists(modelPath)==false)
{
LOG_ERROR(stdout,"%s not exist!\n",modelPath.c_str());
return MODEL_NOT_EXIST;
}
migraphx::onnx_options onnx_options;
onnx_options.map_input_dims["images"]={1,3,800,800};//
net = migraphx::parse_onnx(modelPath, onnx_options);
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
// 获取模型输入属性
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes();
inputName=inputMap.begin()->first;
inputShape=inputMap.begin()->second;
int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// log
LOG_INFO(stdout,"InputMaxSize:%dx%d\n",inputSize.width,inputSize.height);
} }
else
migraphx::onnx_options onnx_options; {
onnx_options.map_input_dims["images"]={1,3,800,800}; // 加载模型
net = migraphx::parse_onnx(modelPath, onnx_options); if(Exists(modelPath)==false)
LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); {
LOG_ERROR(stdout,"%s not exist!\n",modelPath.c_str());
// 获取模型输入属性 return MODEL_NOT_EXIST;
std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes(); }
inputName=inputMap.begin()->first; net = migraphx::parse_onnx(modelPath);
inputShape=inputMap.begin()->second; LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str());
int N=inputShape.lens()[0];
int C=inputShape.lens()[1]; // 获取模型输入属性
int H=inputShape.lens()[2]; std::unordered_map<std::string, migraphx::shape> inputMap=net.get_parameter_shapes();
int W=inputShape.lens()[3]; inputName=inputMap.begin()->first;
inputSize=cv::Size(W,H); inputShape=inputMap.begin()->second;
int N=inputShape.lens()[0];
int C=inputShape.lens()[1];
int H=inputShape.lens()[2];
int W=inputShape.lens()[3];
inputSize=cv::Size(W,H);
// log
LOG_INFO(stdout,"InputSize:%dx%d\n",inputSize.width,inputSize.height);
}
LOG_INFO(stdout,"InputName:%s\n",inputName.c_str());
LOG_INFO(stdout,"ConfidenceThreshold:%f\n",yolov5Parameter.confidenceThreshold);
LOG_INFO(stdout,"NMSThreshold:%f\n",yolov5Parameter.nmsThreshold);
LOG_INFO(stdout,"objectThreshold:%f\n",yolov5Parameter.objectThreshold);
LOG_INFO(stdout,"NumberOfClasses:%d\n",yolov5Parameter.numberOfClasses);
// 设置模型为GPU模式 // 设置模型为GPU模式
migraphx::target gpuTarget = migraphx::gpu::target{}; migraphx::target gpuTarget = migraphx::gpu::target{};
...@@ -105,19 +148,11 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ ...@@ -105,19 +148,11 @@ ErrorCode DetectorYOLOV5::Initialize(InitializationParameterOfDetector initializ
classNames.resize(yolov5Parameter.numberOfClasses); classNames.resize(yolov5Parameter.numberOfClasses);
} }
// log
LOG_INFO(stdout,"InputMaxSize:%dx%d\n",inputSize.width,inputSize.height);
LOG_INFO(stdout,"InputName:%s\n",inputName.c_str());
LOG_INFO(stdout,"ConfidenceThreshold:%f\n",yolov5Parameter.confidenceThreshold);
LOG_INFO(stdout,"NMSThreshold:%f\n",yolov5Parameter.nmsThreshold);
LOG_INFO(stdout,"objectThreshold:%f\n",yolov5Parameter.objectThreshold);
LOG_INFO(stdout,"NumberOfClasses:%d\n",yolov5Parameter.numberOfClasses);
return SUCCESS; return SUCCESS;
} }
ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection) ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection, bool dynamic)
{ {
if(srcImage.empty()||srcImage.type()!=CV_8UC3) if(srcImage.empty()||srcImage.type()!=CV_8UC3)
{ {
...@@ -125,21 +160,39 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_ ...@@ -125,21 +160,39 @@ ErrorCode DetectorYOLOV5::Detect(const cv::Mat &srcImage, std::vector<std::size_
return IMAGE_ERROR; return IMAGE_ERROR;
} }
inputSize = cv::Size(relInputShape[3], relInputShape[2]); // 数据预处理
// 数据预处理并转换为NCHW格式
cv::Mat inputBlob; cv::Mat inputBlob;
cv::dnn::blobFromImage(srcImage, std::vector<std::size_t> relInputShape;
int height, width;
if(dynamic)
{
width = srcImage.rows;
height = srcImage.cols;
relInputShape = {1,3,height,width};
inputBlob = cv::dnn::blobFromImage(srcImage);
}
else
{
cv::dnn::blobFromImage(srcImage,
inputBlob, inputBlob,
1 / 255.0, 1 / 255.0,
inputSize, inputSize,
cv::Scalar(0, 0, 0), cv::Scalar(0, 0, 0),
true, true,
false); false);
}
// 创建输入数据 // 创建输入数据
migraphx::parameter_map inputData; migraphx::parameter_map inputData;
inputData[inputName]= migraphx::argument{migraphx::shape(inputShape.type(), relInputShape), (float*)inputBlob.data}; if(dynamic)
{
inputData[inputName]= migraphx::argument{migraphx::shape(inputShape.type(), relInputShape), (float*)inputBlob.data};
}
else
{
inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data};
}
// 推理 // 推理
std::vector<migraphx::argument> inferenceResults = net.eval(inputData); std::vector<migraphx::argument> inferenceResults = net.eval(inputData);
......
...@@ -23,9 +23,9 @@ public: ...@@ -23,9 +23,9 @@ public:
~DetectorYOLOV5(); ~DetectorYOLOV5();
ErrorCode Initialize(InitializationParameterOfDetector initializationParameterOfDetector); ErrorCode Initialize(InitializationParameterOfDetector initializationParameterOfDetector, bool dynamic);
ErrorCode Detect(const cv::Mat &srcImage, std::vector<std::size_t> &relInputShape, std::vector<ResultOfDetection> &resultsOfDetection); ErrorCode Detect(const cv::Mat &srcImage, std::vector<ResultOfDetection> &resultsOfDetection, bool dynamic);
private: private:
cv::FileStorage configurationFile; cv::FileStorage configurationFile;
...@@ -33,6 +33,7 @@ private: ...@@ -33,6 +33,7 @@ private:
migraphx::program net; migraphx::program net;
cv::Size inputSize; cv::Size inputSize;
std::string inputName; std::string inputName;
std::string modelPath;
migraphx::shape inputShape; migraphx::shape inputShape;
bool useFP16; bool useFP16;
......
...@@ -5,13 +5,106 @@ ...@@ -5,13 +5,106 @@
#include <Filesystem.h> #include <Filesystem.h>
#include <YOLOV5.h> #include <YOLOV5.h>
int main() void MIGraphXSamplesUsage(char* programName)
{
printf("Usage : %s <index> \n", programName);
printf("index:\n");
printf("\t 0) YOLOV5 sample.\n");
printf("\t 1) YOLOV5 Dynamic sample.\n");
}
void Sample_YOLOV5();
void Sample_YOLOV5_Dynamic();
int main(int argc, char *argv[])
{
if (argc < 2 || argc > 2)
{
MIGraphXSamplesUsage(argv[0]);
return -1;
}
if (!strncmp(argv[1], "-h", 2))
{
MIGraphXSamplesUsage(argv[0]);
return 0;
}
switch (*argv[1])
{
case '0':
{
Sample_YOLOV5();
break;
}
case '1':
{
Sample_YOLOV5_Dynamic();
break;
}
default :
{
MIGraphXSamplesUsage(argv[0]);
break;
}
}
return 0;
}
void Sample_YOLOV5()
{ {
// 创建YOLOV5检测器 // 创建YOLOV5检测器
migraphxSamples::DetectorYOLOV5 detector; migraphxSamples::DetectorYOLOV5 detector;
migraphxSamples::InitializationParameterOfDetector initParamOfDetectorYOLOV5; migraphxSamples::InitializationParameterOfDetector initParamOfDetectorYOLOV5;
initParamOfDetectorYOLOV5.configFilePath = CONFIG_FILE; initParamOfDetectorYOLOV5.configFilePath = CONFIG_FILE;
migraphxSamples::ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV5); migraphxSamples::ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV5, false);
if(errorCode!=migraphxSamples::SUCCESS)
{
LOG_ERROR(stdout, "fail to initialize detector!\n");
exit(-1);
}
LOG_INFO(stdout, "succeed to initialize detector\n");
// 读取测试图片
cv::Mat srcImage = cv::imread("../Resource/Images/DynamicPics/image1.jpg",1);
// 推理
std::vector<migraphxSamples::ResultOfDetection> predictions;
double time1 = cv::getTickCount();
detector.Detect(srcImage, predictions, false);
double time2 = cv::getTickCount();
double elapsedTime = (time2 - time1)*1000 / cv::getTickFrequency();
LOG_INFO(stdout, "inference time:%f ms\n", elapsedTime);
// 获取推理结果
LOG_INFO(stdout,"========== Detection Results ==========\n");
for(int i=0;i<predictions.size();++i)
{
migraphxSamples::ResultOfDetection result=predictions[i];
cv::rectangle(srcImage,result.boundingBox,cv::Scalar(0,255,255),2);
std::string label = cv::format("%.2f", result.confidence);
label = result.className + " " + label;
int left = predictions[i].boundingBox.x;
int top = predictions[i].boundingBox.y;
int baseLine;
cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
top = max(top, labelSize.height);
cv::putText(srcImage, label, cv::Point(left, top-10), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 255), 2);
LOG_INFO(stdout,"box:%d %d %d %d,label:%d,confidence:%f\n",predictions[i].boundingBox.x,
predictions[i].boundingBox.y,predictions[i].boundingBox.width,predictions[i].boundingBox.height,predictions[i].classID,predictions[i].confidence);
}
cv::imwrite("Result.jpg",srcImage);
LOG_INFO(stdout,"Detection results have been saved to ./Result.jpg\n");
}
void Sample_YOLOV5_Dynamic()
{
// 创建YOLOV5检测器
migraphxSamples::DetectorYOLOV5 detector;
migraphxSamples::InitializationParameterOfDetector initParamOfDetectorYOLOV5;
initParamOfDetectorYOLOV5.configFilePath = CONFIG_FILE;
migraphxSamples::ErrorCode errorCode=detector.Initialize(initParamOfDetectorYOLOV5, true);
if(errorCode!=migraphxSamples::SUCCESS) if(errorCode!=migraphxSamples::SUCCESS)
{ {
LOG_ERROR(stdout, "fail to initialize detector!\n"); LOG_ERROR(stdout, "fail to initialize detector!\n");
...@@ -26,7 +119,7 @@ int main() ...@@ -26,7 +119,7 @@ int main()
cv::glob(folder,imagePathList); cv::glob(folder,imagePathList);
for (int i = 0; i < imagePathList.size(); ++i) for (int i = 0; i < imagePathList.size(); ++i)
{ {
cv:: Mat srcImage=cv::imread(imagePathList[i], 1); cv::Mat srcImage=cv::imread(imagePathList[i], 1);
srcImages.push_back(srcImage); srcImages.push_back(srcImage);
} }
...@@ -37,10 +130,13 @@ int main() ...@@ -37,10 +130,13 @@ int main()
for (int i = 0; i < srcImages.size(); ++i) for (int i = 0; i < srcImages.size(); ++i)
{ {
// 生成不同尺寸的图像
cv::resize(srcImages[i], srcImages[i], cv::Size(inputShapes[i][3], inputShapes[i][2]));
// 推理 // 推理
std::vector<migraphxSamples::ResultOfDetection> predictions; std::vector<migraphxSamples::ResultOfDetection> predictions;
double time1 = cv::getTickCount(); double time1 = cv::getTickCount();
detector.Detect(srcImages[i], inputShapes[i], predictions); detector.Detect(srcImages[i], predictions, true);
double time2 = cv::getTickCount(); double time2 = cv::getTickCount();
double elapsedTime = (time2 - time1)*1000 / cv::getTickFrequency(); double elapsedTime = (time2 - time1)*1000 / cv::getTickFrequency();
LOG_INFO(stdout, "inference image%d time:%f ms\n", i, elapsedTime); LOG_INFO(stdout, "inference image%d time:%f ms\n", i, elapsedTime);
...@@ -68,6 +164,4 @@ int main() ...@@ -68,6 +164,4 @@ int main()
cv::imwrite(imgName, srcImages[i]); cv::imwrite(imgName, srcImages[i]);
LOG_INFO(stdout,"Detection results have been saved to ./Result%d.jpg\n", i); LOG_INFO(stdout,"Detection results have been saved to ./Result%d.jpg\n", i);
} }
return 0;
} }
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment