#include #include #include #include #include #include #include #include namespace migraphxSamples { Classifier::Classifier() { } Classifier::~Classifier() { configurationFile.release(); } ErrorCode Classifier::Initialize(InitializationParameterOfClassifier initializationParameterOfClassifier) { // 读取配置文件 std::string configFilePath=initializationParameterOfClassifier.configFilePath; if(Exists(configFilePath)==false) { LOG_ERROR(stdout, "no configuration file!\n"); return CONFIG_FILE_NOT_EXIST; } if(!configurationFile.open(configFilePath, cv::FileStorage::READ)) { LOG_ERROR(stdout, "fail to open configuration file\n"); return FAIL_TO_OPEN_CONFIG_FILE; } LOG_INFO(stdout, "succeed to open configuration file\n"); // 获取配置文件参数 cv::FileNode netNode = configurationFile["Classifier"]; std::string modelPath=(std::string)netNode["ModelPath"]; useInt8=(bool)(int)netNode["UseInt8"]; useFP16=(bool)(int)netNode["UseFP16"]; // 设置最大输入shape migraphx::onnx_options onnx_options; onnx_options.map_input_dims["data"]={1,3,224,224}; // 加载模型 if(Exists(modelPath)==false) { LOG_ERROR(stdout,"%s not exist!\n",modelPath.c_str()); return MODEL_NOT_EXIST; } net = migraphx::parse_onnx(modelPath, onnx_options); LOG_INFO(stdout,"succeed to load model: %s\n",GetFileName(modelPath).c_str()); // 获取模型输入/输出节点信息 std::cout<<"inputs:"< inputs=net.get_inputs(); for(auto i:inputs) { std::cout< outputs=net.get_outputs(); for(auto i:outputs) { std::cout<first; inputShape=inputs.begin()->second; int N=inputShape.lens()[0]; int C=inputShape.lens()[1]; int H=inputShape.lens()[2]; int W=inputShape.lens()[3]; inputSize=cv::Size(W,H); // 设置模型为GPU模式 migraphx::target gpuTarget = migraphx::gpu::target{}; // 量化 if(useInt8) { // 创建量化校准数据,建议使用测试集中的多张典型图像 cv::Mat srcImage=cv::imread("../Resource/Images/ImageNet_test.jpg",1); std::vector srcImages; for(int i=0;i image; for(int i =0;i imgRGB.cols) { cv::resize(imgRGB, shrink, cv::Size(256, int(ratio * imgRGB.rows)), 0, 0); } else { cv::resize(imgRGB, shrink, cv::Size(int(ratio * imgRGB.cols), 256), 0, 0); } // 裁剪中心窗口为224*224 int start_x = shrink.cols/2 - 224/2; int start_y = shrink.rows/2 - 224/2; cv::Rect rect(start_x, start_y, 224, 224); cv::Mat images = shrink(rect); image.push_back(images); } // normalize并转换为NCHW cv::Mat inputBlob; Image2BlobParams image2BlobParams; image2BlobParams.scalefactor=cv::Scalar(1/58.395, 1/57.12, 1/57.375); image2BlobParams.mean=cv::Scalar(123.675, 116.28, 103.53); image2BlobParams.swapRB=false; blobFromImagesWithParams(image,inputBlob,image2BlobParams); std::unordered_map inputData; inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data}; std::vector> calibrationData = {inputData}; // INT8量化 migraphx::quantize_int8(net, gpuTarget, calibrationData); } else if(useFP16) { migraphx::quantize_fp16(net); } // 编译模型 migraphx::compile_options options; options.device_id=0; // 设置GPU设备,默认为0号设备 options.offload_copy=true; net.compile(gpuTarget,options); LOG_INFO(stdout,"succeed to compile model: %s\n",GetFileName(modelPath).c_str()); // warm up std::unordered_map inputData; inputData[inputName]=migraphx::argument{inputShape}; net.eval(inputData); // log LOG_INFO(stdout,"InputSize:%dx%d\n",inputSize.width,inputSize.height); LOG_INFO(stdout,"InputName:%s\n",inputName.c_str()); LOG_INFO(stdout,"UseInt8:%d\n",(int)useInt8); LOG_INFO(stdout,"UseFP16:%d\n",(int)useFP16); return SUCCESS; } ErrorCode Classifier::Classify(const std::vector &srcImages,std::vector> &predictions) { if(srcImages.size()==0||srcImages[0].empty()||srcImages[0].depth()!=CV_8U) { LOG_ERROR(stdout, "image error!\n"); return IMAGE_ERROR; } // 数据预处理 std::vector image; for(int i =0;i imgRGB.cols) { cv::resize(imgRGB, shrink, cv::Size(256, int(ratio * imgRGB.rows)), 0, 0); } else { cv::resize(imgRGB, shrink, cv::Size(int(ratio * imgRGB.cols), 256), 0, 0); } // 裁剪中心窗口为224*224 int start_x = shrink.cols/2 - 224/2; int start_y = shrink.rows/2 - 224/2; cv::Rect rect(start_x, start_y, 224, 224); cv::Mat images = shrink(rect); image.push_back(images); } // normalize并转换为NCHW cv::Mat inputBlob; Image2BlobParams image2BlobParams; image2BlobParams.scalefactor=cv::Scalar(1/58.395, 1/57.12, 1/57.375); image2BlobParams.mean=cv::Scalar(123.675, 116.28, 103.53); image2BlobParams.swapRB=false; blobFromImagesWithParams(image,inputBlob,image2BlobParams); // 创建输入数据 std::unordered_map inputData; inputData[inputName]= migraphx::argument{inputShape, (float*)inputBlob.data}; // 推理 std::vector results = net.eval(inputData); // 获取输出节点的属性 migraphx::argument result = results[0]; // 获取第一个输出节点的数据 migraphx::shape outputShape=result.get_shape(); // 输出节点的shape std::vector outputSize=outputShape.lens();// 每一维大小,维度顺序为(N,C,H,W) int numberOfOutput=outputShape.elements();// 输出节点元素的个数 float *logits=(float *)result.data();// 输出节点数据指针 // 获取每张图像的预测结果 int numberOfClasses=numberOfOutput/srcImages.size(); for(int i=0;i logit; for(int j=0;j resultOfPredictions; for(int j=0;j