Unverified Commit 5e4d1891 authored by Double_V's avatar Double_V Committed by GitHub
Browse files

Merge branch 'dygraph' into sdmgr

parents 0cc1b5dc e323c8bd
Global:
use_gpu: True
epoch_num: 72
log_smooth_window: 20
print_batch_step: 5
save_model_dir: ./output/rec/srn_new
save_epoch_step: 3
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [0, 5000]
cal_metric_during_train: True
pretrained_model:
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_words/ch/word_1.jpg
# for data or label process
character_dict_path:
max_text_length: 25
num_heads: 8
infer_mode: False
use_space_char: False
save_res_path: ./output/rec/predicts_srn.txt
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
clip_norm: 10.0
lr:
learning_rate: 0.0001
Architecture:
model_type: rec
algorithm: SRN
in_channels: 1
Transform:
Backbone:
name: ResNetFPN
Head:
name: SRNHead
max_text_length: 25
num_heads: 8
num_encoder_TUs: 2
num_decoder_TUs: 4
hidden_dims: 512
Loss:
name: SRNLoss
PostProcess:
name: SRNLabelDecode
Metric:
name: RecMetric
main_indicator: acc
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data/ic15_data/
label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- SRNLabelEncode: # Class handling label
- SRNRecResizeImg:
image_shape: [1, 64, 256]
- KeepKeys:
keep_keys: ['image',
'label',
'length',
'encoder_word_pos',
'gsrm_word_pos',
'gsrm_slf_attn_bias1',
'gsrm_slf_attn_bias2'] # dataloader will return list in this order
loader:
shuffle: False
batch_size_per_card: 64
drop_last: False
num_workers: 4
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/ic15_data
label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- SRNLabelEncode: # Class handling label
- SRNRecResizeImg:
image_shape: [1, 64, 256]
- KeepKeys:
keep_keys: ['image',
'label',
'length',
'encoder_word_pos',
'gsrm_word_pos',
'gsrm_slf_attn_bias1',
'gsrm_slf_attn_bias2']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 32
num_workers: 4
===========================train_params===========================
model_name:rec_r50_fpn_vd_none_srn
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train
norm_train:tools/train.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.checkpoints:
norm_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
train_model:./inference/rec_r50_vd_srn_train/best_accuracy
infer_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
infer_quant:False
inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="1,64,256" --rec_algorithm="SRN" --use_space_char=False
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
# Jeston端基础训练预测功能测试 # Jeston端基础训练预测功能测试
Jeston端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 Jeston端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。
## 1. 测试结论汇总 ## 1. 测试结论汇总
...@@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth ...@@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth
### 2.2 功能测试 ### 2.2 功能测试
先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
`test_train_inference_python.sh`包含5种[运行模式](./test_train_inference_python.md),在Jeston端,仅需要测试预测推理的模式即可: `test_inference_inference.sh`仅有一个模式`whole_infer`,在Jeston端,仅需要测试预测推理的模式即可:
``` ```
- 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
```shell ```shell
bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
# 用法1: # 用法1:
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1' bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1'
``` ```
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`lite_train_lite_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: 运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
``` ```
test_tipc/output/ test_tipc/output/
|- results_python.log # 运行指令状态的日志 |- results_python.log # 运行指令状态的日志
......
...@@ -25,7 +25,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then ...@@ -25,7 +25,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
# pretrain lite train data # pretrain lite train data
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate
if [ ${model_name} == "ch_PPOCRv2_det" ]; then if [[ ${model_name} =~ "PPOCRv2_det" ]];then
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar --no-check-certificate
cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../
fi fi
...@@ -45,12 +45,12 @@ if [ ${MODE} = "lite_train_lite_infer" ];then ...@@ -45,12 +45,12 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate
cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../ cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../
cd ./train_data && tar xf total_text_lite.tar && ln -s total_text && cd ../ cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../
fi fi
if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ] || [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ] || [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate
wget -nc -P ./train_data/ wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate
cd ./train_data && tar xf total_text_lite.tar && ln -s total_text && cd ../ cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../
fi fi
if [ ${model_name} == "det_mv3_db_v2.0" ]; then if [ ${model_name} == "det_mv3_db_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate
...@@ -61,6 +61,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then ...@@ -61,6 +61,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && cd ../ cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && cd ../
fi fi
if [ ${model_name} == "ch_ppocr_mobile_v2.0_rec_FPGM" ]; then
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar --no-check-certificate
cd ./pretrain_models/ && tar xf ch_ppocr_mobile_v2.0_rec_train.tar && cd ../
fi
elif [ ${MODE} = "whole_train_whole_infer" ];then elif [ ${MODE} = "whole_train_whole_infer" ];then
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate
...@@ -74,15 +78,15 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then ...@@ -74,15 +78,15 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then
cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../
fi fi
if [ ${model_name} == "en_server_pgnetA" ]; then if [ ${model_name} == "en_server_pgnetA" ]; then
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/total_text.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate
cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../ cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../
cd ./train_data && tar xf total_text.tar && ln -s total_text && cd ../ cd ./train_data && tar xf total_text.tar && ln -s total_text_lite total_text && cd ../
fi fi
if [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then if [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/total_text.tar --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate
cd ./train_data && tar xf total_text.tar && ln -s total_text && cd ../ cd ./train_data && tar xf total_text.tar && ln -s total_text_lite total_text && cd ../
fi fi
elif [ ${MODE} = "lite_train_whole_infer" ];then elif [ ${MODE} = "lite_train_whole_infer" ];then
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate
...@@ -99,68 +103,145 @@ elif [ ${MODE} = "lite_train_whole_infer" ];then ...@@ -99,68 +103,145 @@ elif [ ${MODE} = "lite_train_whole_infer" ];then
fi fi
elif [ ${MODE} = "whole_infer" ];then elif [ ${MODE} = "whole_infer" ];then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate
cd ./inference && tar xf rec_inference.tar && tar xf ch_det_data_50.tar && cd ../
if [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ]; then if [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_train" eval_model_name="ch_ppocr_mobile_v2.0_det_train"
rm -rf ./train_data/icdar2015 rm -rf ./train_data/icdar2015
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate
cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && cd ../
elif [ ${model_name} = "ch_ppocr_mobile_v2.0_det_PACT" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_prune_infer"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar --no-check-certificate
cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then elif [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
cd ./inference && tar xf ch_ppocr_server_v2.0_det_train.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ch_ppocr_server_v2.0_det_train.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ch_ppocr_mobile_v2.0" ]; then elif [ ${model_name} = "ch_ppocr_mobile_v2.0" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ch_ppocr_server_v2.0" ]; then elif [ ${model_name} = "ch_ppocr_server_v2.0" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate
cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ]; then elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_PACT" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_rec_infer" eval_model_name="ch_ppocr_mobile_v2.0_rec_slim_infer"
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && cd ../
cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../ elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_FPGM" ]; then
elif [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then eval_model_name="ch_PP-OCRv2_rec_infer"
eval_model_name="ch_ppocr_server_v2.0_rec_infer" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && cd ../
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate
cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../
fi fi
if [ ${model_name} = "ch_PPOCRv2_det" ]; then if [[ ${model_name} =~ "ch_PPOCRv2_det" ]]; then
eval_model_name="ch_PP-OCRv2_det_infer" eval_model_name="ch_PP-OCRv2_det_infer"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate
cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../
fi fi
if [ ${model_name} = "ch_PPOCRv2_det" ]; then if [[ ${model_name} =~ "PPOCRv2_ocr_rec" ]]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate eval_model_name="ch_PP-OCRv2_rec_infer"
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/e2e_server_pgnetA_infer.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate
cd ./inference && tar xf e2e_server_pgnetA_infer.tar && tar xf ch_det_data_50.tar && cd ../ wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar --no-check-certificate
fi cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_PP-OCRv2_rec_slim_quant_infer.tar && cd ../
fi
if [ ${model_name} == "en_server_pgnetA" ]; then if [ ${model_name} == "en_server_pgnetA" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate
cd ./inference && tar xf en_server_pgnetA.tar && cd ../ cd ./inference && tar xf en_server_pgnetA.tar && tar xf ch_det_data_50.tar && cd ../
fi fi
if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ]; then if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_r50_vd_sast_icdar15_v2.0_train.tar && cd ../ cd ./inference/ && tar xf det_r50_vd_sast_icdar15_v2.0_train.tar && tar xf ch_det_data_50.tar && cd ../
fi
if [ ${model_name} == "rec_mv3_none_none_ctc_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_none_ctc_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_mv3_none_none_ctc_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_r34_vd_none_none_ctc_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_none_ctc_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_r34_vd_none_none_ctc_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_mv3_none_bilstm_ctc_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_mv3_none_bilstm_ctc_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_r34_vd_none_bilstm_ctc_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_r34_vd_none_bilstm_ctc_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_mv3_tps_bilstm_ctc_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_mv3_tps_bilstm_ctc_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_r34_vd_tps_bilstm_ctc_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "ch_ppocr_server_v2.0_rec" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/ch_ppocr_server_v2.0_rec_train.tar --no-check-certificate
cd ./inference/ && tar xf ch_ppocr_server_v2.0_rec_train.tar && cd ../
fi
if [ ${model_name} == "ch_ppocr_mobile_v2.0_rec" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar --no-check-certificate
cd ./inference/ && tar xf ch_ppocr_mobile_v2.0_rec_train.tar && cd ../
fi
if [ ${model_name} == "rec_mtb_nrtr" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_mtb_nrtr_train.tar && cd ../
fi
if [ ${model_name} == "rec_mv3_tps_bilstm_att_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_mv3_tps_bilstm_att_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_r34_vd_tps_bilstm_att_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_r34_vd_tps_bilstm_att_v2.0_train.tar && cd ../
fi
if [ ${model_name} == "rec_r31_sar" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_r31_sar_train.tar && cd ../
fi
if [ ${model_name} == "rec_r50_fpn_vd_none_srn" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar --no-check-certificate
cd ./inference/ && tar xf rec_r50_vd_srn_train.tar && cd ../
fi
if [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_r50_vd_sast_totaltext_v2.0_train.tar && cd ../
fi fi
if [ ${model_name} == "det_mv3_db_v2.0" ]; then if [ ${model_name} == "det_mv3_db_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_mv3_db_v2.0_train.tar && cd ../ cd ./inference/ && tar xf det_mv3_db_v2.0_train.tar && tar xf ch_det_data_50.tar && cd ../
fi fi
if [ ${model_name} == "det_r50_db_v2.0" ]; then if [ ${model_name} == "det_r50_db_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && cd ../ cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && tar xf ch_det_data_50.tar && cd ../
fi
if [ ${model_name} == "det_mv3_pse_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_mv3_pse_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_mv3_pse_v2.0_train.tar & cd ../
fi
if [ ${model_name} == "det_r50_vd_pse_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_vd_pse_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_r50_vd_pse_v2.0_train.tar & cd ../
fi
if [ ${model_name} == "det_mv3_east_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_mv3_east_v2.0_train.tar & cd ../
fi
if [ ${model_name} == "det_r50_vd_east_v2.0" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar --no-check-certificate
cd ./inference/ && tar xf det_r50_vd_east_v2.0_train.tar & cd ../
fi fi
fi fi
if [ ${MODE} = "klquant_whole_infer" ]; then if [ ${MODE} = "klquant_whole_infer" ]; then
if [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ]; then wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar --no-check-certificate
cd ./train_data/ && tar xf icdar2015_lite.tar
ln -s ./icdar2015_lite ./icdar2015 && cd ../
if [ ${model_name} = "ch_ppocr_mobile_v2.0_det_KL" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../
...@@ -171,6 +252,13 @@ if [ ${MODE} = "klquant_whole_infer" ]; then ...@@ -171,6 +252,13 @@ if [ ${MODE} = "klquant_whole_infer" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate
cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../
fi fi
if [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_KL" ]; then
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate
wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate
cd ./train_data/ && tar xf ic15_data.tar && cd ../
cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf rec_inference.tar && cd ../
fi
fi fi
if [ ${MODE} = "cpp_infer" ];then if [ ${MODE} = "cpp_infer" ];then
...@@ -192,8 +280,11 @@ fi ...@@ -192,8 +280,11 @@ fi
if [ ${MODE} = "serving_infer" ];then if [ ${MODE} = "serving_infer" ];then
# prepare serving env # prepare serving env
python_name=$(func_parser_value "${lines[2]}") python_name_list=$(func_parser_value "${lines[2]}")
wget https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl IFS='|'
array=(${python_name_list})
python_name=${array[0]}
wget -nc https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${python_name} -m pip install install paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl ${python_name} -m pip install install paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${python_name} -m pip install paddle_serving_client==0.6.1 ${python_name} -m pip install paddle_serving_client==0.6.1
${python_name} -m pip install paddle-serving-app==0.6.3 ${python_name} -m pip install paddle-serving-app==0.6.3
......
# 飞桨训推一体认证(TIPC) # 飞桨训推一体全流程(TIPC)
## 1. 简介 ## 1. 简介
飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了PaddleOCR中所有模型的飞桨训推一体认证 (Training and Inference Pipeline Certification(TIPC)) 信息和测试工具,方便用户查阅每种模型的训练推理部署打通情况,并可以进行一键测试。 飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了PaddleOCR中所有模型的飞桨训推一体全流程(Training and Inference Pipeline Criterion(TIPC)信息和测试工具,方便用户查阅每种模型的训练推理部署打通情况,并可以进行一键测试。
<div align="center"> <div align="center">
<img src="docs/guide.png" width="1000"> <img src="docs/guide.png" width="1000">
......
...@@ -64,10 +64,11 @@ function func_cpp_inference(){ ...@@ -64,10 +64,11 @@ function func_cpp_inference(){
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}") set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_mkldnn=$(func_set_params "${cpp_use_mkldnn_key}" "${use_mkldnn}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}") set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
......
#!/bin/bash #!/bin/bash
source test_tipc/common_func.sh source test_tipc/common_func.sh
source test_tipc/test_train_inference_python.sh #source test_tipc/test_train_inference_python.sh
FILENAME=$1 FILENAME=$1
# MODE be one of ['whole_infer'] # MODE be one of ['whole_infer']
MODE=$2 MODE=$2
dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) dataline=$(awk 'NR==1, NR==20{print}' $FILENAME)
# parser params # parser params
IFS=$'\n' IFS=$'\n'
...@@ -35,18 +35,101 @@ precision_list=$(func_parser_value "${lines[12]}") ...@@ -35,18 +35,101 @@ precision_list=$(func_parser_value "${lines[12]}")
infer_model_key=$(func_parser_key "${lines[13]}") infer_model_key=$(func_parser_key "${lines[13]}")
image_dir_key=$(func_parser_key "${lines[14]}") image_dir_key=$(func_parser_key "${lines[14]}")
infer_img_dir=$(func_parser_value "${lines[14]}") infer_img_dir=$(func_parser_value "${lines[14]}")
save_log_key=$(func_parser_key "${lines[15]}") rec_model_key=$(func_parser_key "${lines[15]}")
rec_model_value=$(func_parser_value "${lines[15]}")
benchmark_key=$(func_parser_key "${lines[16]}") benchmark_key=$(func_parser_key "${lines[16]}")
benchmark_value=$(func_parser_value "${lines[16]}") benchmark_value=$(func_parser_value "${lines[16]}")
infer_key1=$(func_parser_key "${lines[17]}") infer_key1=$(func_parser_key "${lines[17]}")
infer_value1=$(func_parser_value "${lines[17]}") infer_value1=$(func_parser_value "${lines[17]}")
LOG_PATH="./test_tipc/output" LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log" status_log="${LOG_PATH}/results_python.log"
function func_inference(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
_log_path=$4
_img_dir=$5
_flag_quant=$6
# inference
for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
for precision in ${precision_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then
continue
fi # skip when enable fp16 but disable mkldnn
if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
continue
fi # skip when quant model inference but precision is not int8
set_precision=$(func_set_params "${precision_key}" "${precision}")
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${use_trt_list[*]}; do
for precision in ${precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if [ ${MODE} = "whole_infer" ]; then if [ ${MODE} = "whole_infer" ]; then
GPUID=$3 GPUID=$3
if [ ${#GPUID} -le 0 ];then if [ ${#GPUID} -le 0 ];then
...@@ -68,7 +151,6 @@ if [ ${MODE} = "whole_infer" ]; then ...@@ -68,7 +151,6 @@ if [ ${MODE} = "whole_infer" ]; then
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
echo ${infer_run_exports[Count]} echo ${infer_run_exports[Count]}
echo $export_cmd
eval $export_cmd eval $export_cmd
status_export=$? status_export=$?
status_check $status_export "${export_cmd}" "${status_log}" status_check $status_export "${export_cmd}" "${status_log}"
...@@ -85,3 +167,4 @@ if [ ${MODE} = "whole_infer" ]; then ...@@ -85,3 +167,4 @@ if [ ${MODE} = "whole_infer" ]; then
done done
fi fi
...@@ -10,7 +10,7 @@ lines=(${dataline}) ...@@ -10,7 +10,7 @@ lines=(${dataline})
# parser serving # parser serving
model_name=$(func_parser_value "${lines[1]}") model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}") python_list=$(func_parser_value "${lines[2]}")
trans_model_py=$(func_parser_value "${lines[3]}") trans_model_py=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}") infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}") infer_model_dir_value=$(func_parser_value "${lines[4]}")
...@@ -54,14 +54,15 @@ function func_serving(){ ...@@ -54,14 +54,15 @@ function func_serving(){
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" python_list=(${python_list})
trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd eval $trans_model_cmd
cd ${serving_dir_value} cd ${serving_dir_value}
echo $PWD echo $PWD
unset https_proxy unset https_proxy
unset http_proxy unset http_proxy
for python in ${python[*]}; do for python in ${python_list[*]}; do
if [ ${python} = "cpp"]; then if [ ${python} = "cpp" ]; then
for use_gpu in ${web_use_gpu_list[*]}; do for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then if [ ${use_gpu} = "null" ]; then
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
...@@ -91,9 +92,6 @@ function func_serving(){ ...@@ -91,9 +92,6 @@ function func_serving(){
echo ${ues_gpu} echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then if [ ${use_gpu} = "null" ]; then
for use_mkldnn in ${web_use_mkldnn_list[*]}; do for use_mkldnn in ${web_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ]; then
continue
fi
for threads in ${web_cpu_threads_list[*]}; do for threads in ${web_cpu_threads_list[*]}; do
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
...@@ -124,6 +122,9 @@ function func_serving(){ ...@@ -124,6 +122,9 @@ function func_serving(){
continue continue
fi fi
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
if [ ${use_trt} = True ]; then
device_type=2
fi
set_precision=$(func_set_params "${web_precision_key}" "${precision}") set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
eval $web_service_cmd eval $web_service_cmd
......
...@@ -90,36 +90,39 @@ infer_value1=$(func_parser_value "${lines[50]}") ...@@ -90,36 +90,39 @@ infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer # parser klquant_infer
if [ ${MODE} = "klquant_whole_infer" ]; then if [ ${MODE} = "klquant_whole_infer" ]; then
dataline=$(awk 'NR==1 NR==17{print}' $FILENAME) dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
lines=(${dataline}) lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}") model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}") python=$(func_parser_value "${lines[2]}")
export_weight=$(func_parser_key "${lines[3]}")
save_infer_key=$(func_parser_key "${lines[4]}")
# parser inference model # parser inference model
infer_model_dir_list=$(func_parser_value "${lines[3]}") infer_model_dir_list=$(func_parser_value "${lines[5]}")
infer_export_list=$(func_parser_value "${lines[4]}") infer_export_list=$(func_parser_value "${lines[6]}")
infer_is_quant=$(func_parser_value "${lines[5]}") infer_is_quant=$(func_parser_value "${lines[7]}")
# parser inference # parser inference
inference_py=$(func_parser_value "${lines[6]}") inference_py=$(func_parser_value "${lines[8]}")
use_gpu_key=$(func_parser_key "${lines[7]}") use_gpu_key=$(func_parser_key "${lines[9]}")
use_gpu_list=$(func_parser_value "${lines[7]}") use_gpu_list=$(func_parser_value "${lines[9]}")
use_mkldnn_key=$(func_parser_key "${lines[8]}") use_mkldnn_key=$(func_parser_key "${lines[10]}")
use_mkldnn_list=$(func_parser_value "${lines[8]}") use_mkldnn_list=$(func_parser_value "${lines[10]}")
cpu_threads_key=$(func_parser_key "${lines[9]}") cpu_threads_key=$(func_parser_key "${lines[11]}")
cpu_threads_list=$(func_parser_value "${lines[9]}") cpu_threads_list=$(func_parser_value "${lines[11]}")
batch_size_key=$(func_parser_key "${lines[10]}") batch_size_key=$(func_parser_key "${lines[12]}")
batch_size_list=$(func_parser_value "${lines[10]}") batch_size_list=$(func_parser_value "${lines[12]}")
use_trt_key=$(func_parser_key "${lines[11]}") use_trt_key=$(func_parser_key "${lines[13]}")
use_trt_list=$(func_parser_value "${lines[11]}") use_trt_list=$(func_parser_value "${lines[13]}")
precision_key=$(func_parser_key "${lines[12]}") precision_key=$(func_parser_key "${lines[14]}")
precision_list=$(func_parser_value "${lines[12]}") precision_list=$(func_parser_value "${lines[14]}")
infer_model_key=$(func_parser_key "${lines[13]}") infer_model_key=$(func_parser_key "${lines[15]}")
image_dir_key=$(func_parser_key "${lines[14]}") image_dir_key=$(func_parser_key "${lines[16]}")
infer_img_dir=$(func_parser_value "${lines[14]}") infer_img_dir=$(func_parser_value "${lines[16]}")
save_log_key=$(func_parser_key "${lines[15]}") save_log_key=$(func_parser_key "${lines[17]}")
benchmark_key=$(func_parser_key "${lines[16]}") save_log_value=$(func_parser_value "${lines[17]}")
benchmark_value=$(func_parser_value "${lines[16]}") benchmark_key=$(func_parser_key "${lines[18]}")
infer_key1=$(func_parser_key "${lines[17]}") benchmark_value=$(func_parser_value "${lines[18]}")
infer_value1=$(func_parser_value "${lines[17]}") infer_key1=$(func_parser_key "${lines[19]}")
infer_value1=$(func_parser_value "${lines[19]}")
fi fi
LOG_PATH="./test_tipc/output" LOG_PATH="./test_tipc/output"
...@@ -157,10 +160,12 @@ function func_inference(){ ...@@ -157,10 +160,12 @@ function func_inference(){
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
...@@ -189,8 +194,9 @@ function func_inference(){ ...@@ -189,8 +194,9 @@ function func_inference(){
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${precision_key}" "${precision}") set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
...@@ -235,7 +241,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then ...@@ -235,7 +241,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
fi fi
#run inference #run inference
is_quant=${infer_quant_flag[Count]} is_quant=${infer_quant_flag[Count]}
if [ ${MODE} = "klquant_infer" ]; then if [ ${MODE} = "klquant_whole_infer" ]; then
is_quant="True" is_quant="True"
fi fi
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
...@@ -316,10 +322,6 @@ else ...@@ -316,10 +322,6 @@ else
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}"
fi fi
# load pretrain from norm training if current trainer is pact or fpgm trainer
if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then
set_pretrain="${load_norm_train_model}"
fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}") set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
...@@ -335,10 +337,7 @@ else ...@@ -335,10 +337,7 @@ else
status_check $? "${cmd}" "${status_log}" status_check $? "${cmd}" "${status_log}"
set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}")
# save norm trained models to set pretrain for pact training and fpgm training
if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1 ]; then
load_norm_train_model=${set_eval_pretrain}
fi
# run eval # run eval
if [ ${eval_py} != "null" ]; then if [ ${eval_py} != "null" ]; then
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
......
...@@ -145,8 +145,6 @@ def main(args): ...@@ -145,8 +145,6 @@ def main(args):
for ino in range(len(img_list)): for ino in range(len(img_list)):
logger.info("Predicts of {}:{}".format(valid_image_file_list[ino], logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
cls_res[ino])) cls_res[ino]))
logger.info(
"The predict time about text angle classify module is as follows: ")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -195,6 +195,7 @@ def create_predictor(args, mode, logger): ...@@ -195,6 +195,7 @@ def create_predictor(args, mode, logger):
max_batch_size=args.max_batch_size, max_batch_size=args.max_batch_size,
min_subgraph_size=args.min_subgraph_size) min_subgraph_size=args.min_subgraph_size)
# skip the minmum trt subgraph # skip the minmum trt subgraph
use_dynamic_shape = True
if mode == "det": if mode == "det":
min_input_shape = { min_input_shape = {
"x": [1, 3, 50, 50], "x": [1, 3, 50, 50],
...@@ -211,7 +212,7 @@ def create_predictor(args, mode, logger): ...@@ -211,7 +212,7 @@ def create_predictor(args, mode, logger):
"nearest_interp_v2_0.tmp_0": [1, 256, 2, 2] "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2]
} }
max_input_shape = { max_input_shape = {
"x": [1, 3, 1280, 1280], "x": [1, 3, 1536, 1536],
"conv2d_92.tmp_0": [1, 120, 400, 400], "conv2d_92.tmp_0": [1, 120, 400, 400],
"conv2d_91.tmp_0": [1, 24, 200, 200], "conv2d_91.tmp_0": [1, 24, 200, 200],
"conv2d_59.tmp_0": [1, 96, 400, 400], "conv2d_59.tmp_0": [1, 96, 400, 400],
...@@ -260,19 +261,20 @@ def create_predictor(args, mode, logger): ...@@ -260,19 +261,20 @@ def create_predictor(args, mode, logger):
max_input_shape.update(max_pact_shape) max_input_shape.update(max_pact_shape)
opt_input_shape.update(opt_pact_shape) opt_input_shape.update(opt_pact_shape)
elif mode == "rec": elif mode == "rec":
if args.rec_algorithm != "CRNN":
use_dynamic_shape = False
min_input_shape = {"x": [1, 3, 32, 10]} min_input_shape = {"x": [1, 3, 32, 10]}
max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]} max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1536]}
opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]} opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]}
elif mode == "cls": elif mode == "cls":
min_input_shape = {"x": [1, 3, 48, 10]} min_input_shape = {"x": [1, 3, 48, 10]}
max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]} max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]}
opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]} opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]}
else: else:
min_input_shape = {"x": [1, 3, 10, 10]} use_dynamic_shape = False
max_input_shape = {"x": [1, 3, 512, 512]} if use_dynamic_shape:
opt_input_shape = {"x": [1, 3, 256, 256]} config.set_trt_dynamic_shape_info(
config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape, min_input_shape, max_input_shape, opt_input_shape)
opt_input_shape)
else: else:
config.disable_gpu() config.disable_gpu()
...@@ -311,7 +313,10 @@ def create_predictor(args, mode, logger): ...@@ -311,7 +313,10 @@ def create_predictor(args, mode, logger):
def get_infer_gpuid(): def get_infer_gpuid():
cmd = "env | grep CUDA_VISIBLE_DEVICES" if not paddle.fluid.core.is_compiled_with_rocm():
cmd = "env | grep CUDA_VISIBLE_DEVICES"
else:
cmd = "env | grep HIP_VISIBLE_DEVICES"
env_cuda = os.popen(cmd).readlines() env_cuda = os.popen(cmd).readlines()
if len(env_cuda) == 0: if len(env_cuda) == 0:
return 0 return 0
......
...@@ -53,6 +53,7 @@ def draw_det_res(dt_boxes, config, img, img_name, save_path): ...@@ -53,6 +53,7 @@ def draw_det_res(dt_boxes, config, img, img_name, save_path):
logger.info("The detected Image saved in {}".format(save_path)) logger.info("The detected Image saved in {}".format(save_path))
@paddle.no_grad()
def main(): def main():
global_config = config['Global'] global_config = config['Global']
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment