Commit 3ca34c48 authored by Your Name's avatar Your Name
Browse files

修改配置文件

parent f56dcc58
<?xml version="1.0" encoding="GB2312"?> <?xml version="1.0" encoding="GB2312"?>
<opencv_storage> <opencv_storage>
<!--分类器-->
<Classifier>
<ModelPath>"../Resource/Models/Classifier/mnist-12.onnx"</ModelPath>
<Scale>0.003922</Scale><!--缩放尺度-->
<MeanValue1>0.0</MeanValue1><!--均值-->
<MeanValue2>0.0</MeanValue2>
<MeanValue3>0.0</MeanValue3>
<SwapRB>0</SwapRB>
<Crop>0</Crop>
<UseInt8>0</UseInt8><!--是否使用int8,不支持-->
<UseFP16>0</UseFP16><!--是否使用FP16-->
<AddSoftmax>1</AddSoftmax><!--是否需要添加Softmax计算(如果onnx模型中包含了softmax,则设置为0)-->
</Classifier>
<!--超分辨率重建-->
<Espcn>
<ModelPath>"../Resource/Models/Super_Resolution/super.onnx"</ModelPath>
</Espcn>
<!--Unet-->
<Unet>
<ModelPath>"../Resource/Models/Segmentation/unet_13_256.onnx"</ModelPath>
</Unet>
<!--Bert-->
<Bert>
<ModelPath>"../Resource/Models/NLP/Bert/bertsquad-10.onnx"</ModelPath>
</Bert>
<!--GPT2-->
<GPT2>
<ModelPath>"../Resource/Models/NLP/GPT2/GPT2_shici.onnx"</ModelPath>
</GPT2>
<!--SSD检测器-->
<DetectorSSD>
<ModelPath>"../Resource/Models/Detector/SSD/yufacedetectnet-open-v2.onnx"</ModelPath>
<Scale>1.0</Scale><!--缩放尺度-->
<MeanValue1>0</MeanValue1><!--均值,顺序为bgr-->
<MeanValue2>0</MeanValue2>
<MeanValue3>0</MeanValue3>
<SwapRB>0</SwapRB>
<Crop>0</Crop>
<UseInt8>0</UseInt8><!--是否使用int8,不支持-->
<UseFP16>0</UseFP16><!--是否使用FP16-->
<!--////////////////// SSD网络结构参数 ////////////////// -->
<!--priorbox层的个数-->
<PriorBoxLayerNumber>4</PriorBoxLayerNumber>
<!--每个priorbox层的minisize和maxSize(需要与输出检测层顺序保持一致,下面涉及每个priorbox层参数的都需要保持顺序一致)-->
<MinSize11>10</MinSize11>
<MinSize12>16</MinSize12>
<MinSize13>24</MinSize13>
<MinSize21>32</MinSize21>
<MinSize22>48</MinSize22>
<MinSize31>64</MinSize31>
<MinSize32>96</MinSize32>
<MinSize41>128</MinSize41>
<MinSize42>192</MinSize42>
<MinSize43>256</MinSize43>
<!--每个priorbox层的Flip和Clip(使用0,1表示)-->
<Flip1>0</Flip1>
<Flip2>0</Flip2>
<Flip3>0</Flip3>
<Flip4>0</Flip4>
<Clip1>0</Clip1>
<Clip2>0</Clip2>
<Clip3>0</Clip3>
<Clip4>0</Clip4>
<!--每个priorbox层的宽高比(不包括1,且忽略flip,比如宽高比设置为0.3333和0.25且flip为true,则只需要写0.3333和0.25,如果宽高比只有1,则不用填写该项)-->
<!-- <AspectRatio11>0.3333</AspectRatio11>
<AspectRatio12>0.25</AspectRatio12>
<AspectRatio21>0.3333</AspectRatio21>
<AspectRatio22>0.25</AspectRatio22>
<AspectRatio31>0.3333</AspectRatio31>
<AspectRatio32>0.25</AspectRatio32>
<AspectRatio41>0.3333</AspectRatio41>
<AspectRatio42>0.25</AspectRatio42> -->
<!--每个priorbox层的step-->
<PriorBoxStepWidth1>8</PriorBoxStepWidth1><!--第一个priorbox层的step的width-->
<PriorBoxStepWidth2>16</PriorBoxStepWidth2>
<PriorBoxStepWidth3>32</PriorBoxStepWidth3>
<PriorBoxStepWidth4>64</PriorBoxStepWidth4>
<PriorBoxStepHeight1>8</PriorBoxStepHeight1><!--第一个priorbox层的step的height-->
<PriorBoxStepHeight2>16</PriorBoxStepHeight2>
<PriorBoxStepHeight3>32</PriorBoxStepHeight3>
<PriorBoxStepHeight4>64</PriorBoxStepHeight4>
<!--priorbox层中的offset-->
<Offset>0.5</Offset>
<!--DetectionOutput参数-->
<ClassNumber>2</ClassNumber>
<TopK>400</TopK>
<KeepTopK>200</KeepTopK>
<NMSThreshold>0.3</NMSThreshold>
<ConfidenceThreshold>0.9</ConfidenceThreshold>
</DetectorSSD>
<!--RetinaFace检测器-->
<DetectorRetinaFace>
<ModelPath>"../Resource/Models/Detector/RetinaFace/mobilenet0.25_Final.onnx"</ModelPath>
<Scale>1.0</Scale><!--缩放尺度-->
<MeanValue1>104</MeanValue1><!--均值,顺序为bgr-->
<MeanValue2>117</MeanValue2>
<MeanValue3>123</MeanValue3>
<SwapRB>0</SwapRB>
<Crop>0</Crop>
<UseInt8>0</UseInt8><!--是否使用int8,不支持-->
<UseFP16>0</UseFP16><!--是否使用FP16-->
<!--////////////////// RetinaFace检测器参数 ////////////////// -->
<!--priorbox层的个数-->
<PriorBoxLayerNumber>3</PriorBoxLayerNumber>
<!--每个priorbox层的minisize和maxSize(需要与输出检测层顺序保持一致,下面涉及每个priorbox层参数的都需要保持顺序一致)-->
<MinSize11>16</MinSize11>
<MinSize12>32</MinSize12>
<MinSize21>64</MinSize21>
<MinSize22>128</MinSize22>
<MinSize31>256</MinSize31>
<MinSize32>512</MinSize32>
<!--每个priorbox层的Flip和Clip(使用0,1表示)-->
<Flip1>0</Flip1>
<Flip2>0</Flip2>
<Flip3>0</Flip3>
<Clip1>0</Clip1>
<Clip2>0</Clip2>
<Clip3>0</Clip3>
<!--每个priorbox层的宽高比(由于RetinaFace只包含宽高比为1的anchor,所以这里不需要设置宽高比)-->
<!-- <AspectRatio11>0.3333</AspectRatio11>
<AspectRatio12>0.25</AspectRatio12>
<AspectRatio21>0.3333</AspectRatio21>
<AspectRatio22>0.25</AspectRatio22>
<AspectRatio31>0.3333</AspectRatio31>
<AspectRatio32>0.25</AspectRatio32>
<AspectRatio41>0.3333</AspectRatio41>
<AspectRatio42>0.25</AspectRatio42> -->
<!--每个priorbox层的step-->
<PriorBoxStepWidth1>8</PriorBoxStepWidth1><!--第一个priorbox层的step的width-->
<PriorBoxStepWidth2>16</PriorBoxStepWidth2>
<PriorBoxStepWidth3>32</PriorBoxStepWidth3>
<PriorBoxStepHeight1>8</PriorBoxStepHeight1><!--第一个priorbox层的step的height-->
<PriorBoxStepHeight2>16</PriorBoxStepHeight2>
<PriorBoxStepHeight3>32</PriorBoxStepHeight3>
<!--priorbox层中的offset-->
<Offset>0.5</Offset>
<!--DetectionOutput参数-->
<ClassNumber>2</ClassNumber>
<TopK>400</TopK>
<KeepTopK>200</KeepTopK>
<NMSThreshold>0.3</NMSThreshold>
<ConfidenceThreshold>0.9</ConfidenceThreshold>
</DetectorRetinaFace>
<!--YOLOV3检测器 -->
<DetectorYOLOV3>
<ModelPath>"../Resource/Models/Detector/YOLOV3/yolov3-tiny.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/Detector/YOLOV3/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.2</ConfidenceThreshold>
<NMSThreshold>0.4</NMSThreshold>
<ObjectThreshold>0.4</ObjectThreshold>
</DetectorYOLOV3>
<!--YOLOV5检测器 -->
<DetectorYOLOV5>
<ModelPath>"../Resource/Models/Detector/YOLOV5/yolov5s.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/Detector/YOLOV5/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.25</ConfidenceThreshold>
<NMSThreshold>0.5</NMSThreshold>
<ObjectThreshold>0.5</ObjectThreshold>
</DetectorYOLOV5>
<!--MTCNN检测器 -->
<DetectorMTCNN>
<PNet>
<ModelPath>"../Resource/Models/Detector/MTCNN/PNet.onnx"</ModelPath>
<MaxHeight>512</MaxHeight>
<MaxWidth>512</MaxWidth>
<ConfidenceThreshold>0.90</ConfidenceThreshold>
<UseFP16>0</UseFP16>
</PNet>
</DetectorMTCNN>
<!--YOLOV7检测器 -->
<DetectorYOLOV7>
<ModelPath>"../Resource/Models/Detector/YOLOV7/yolov7-tiny.onnx"</ModelPath>
<ClassNameFile>"../Resource/Models/Detector/YOLOV7/coco.names"</ClassNameFile>
<UseFP16>0</UseFP16><!--是否使用FP16-->
<NumberOfClasses>80</NumberOfClasses><!--类别数(不包括背景类),COCO:80,VOC:20-->
<ConfidenceThreshold>0.25</ConfidenceThreshold>
<NMSThreshold>0.5</NMSThreshold>
<ObjectThreshold>0.5</ObjectThreshold>
</DetectorYOLOV7>
<!--CRNN动态文本识别 --> <!--CRNN动态文本识别 -->
<CrnnDynamic> <CrnnDynamic>
<ModelPath>"../Resource/Models/Ocr/CRNN/crnn_dynamic.onnx"</ModelPath> <ModelPath>"../Resource/Models/Ocr/CRNN/crnn_dynamic.onnx"</ModelPath>
</CrnnDynamic> </CrnnDynamic>
<!--PaddleOCR车牌检测 -->
<OcrDB>
<ModelPath>"../Resource/Models/PaddleOCR/VLPR/db.onnx"</ModelPath>
<BinaryThreshold>0.3</BinaryThreshold>
<BoxThreshold>0.5</BoxThreshold>
<UnclipRatio>1.6</UnclipRatio>
<LimitSideLen>2500</LimitSideLen>
<ScoreMode>"fast"</ScoreMode>
</OcrDB>
<!--PaddleOCR车牌识别 -->
<OcrSVTR>
<ModelPath>"../Resource/Models/PaddleOCR/VLPR/svtr.onnx"</ModelPath>
<DictPath>"../Resource/Models/PaddleOCR/VLPR/ppocr_keys_v1.txt"</DictPath>
</OcrSVTR>
</opencv_storage> </opencv_storage>
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment