Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
3c906d41
Commit
3c906d41
authored
Sep 27, 2021
by
tink2123
Browse files
merge dygraph
parents
8308f332
6a41a37a
Changes
97
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
492 additions
and
21 deletions
+492
-21
tests/configs/rec_icdar15_r34_train.yml
tests/configs/rec_icdar15_r34_train.yml
+99
-0
tests/ocr_det_params.txt
tests/ocr_det_params.txt
+3
-3
tests/ocr_kl_quant_params.txt
tests/ocr_kl_quant_params.txt
+51
-0
tests/ocr_ppocr_mobile_params.txt
tests/ocr_ppocr_mobile_params.txt
+1
-1
tests/ocr_ppocr_server_params.txt
tests/ocr_ppocr_server_params.txt
+66
-0
tests/ocr_rec_params.txt
tests/ocr_rec_params.txt
+16
-1
tests/ocr_rec_server_params.txt
tests/ocr_rec_server_params.txt
+81
-0
tests/prepare.sh
tests/prepare.sh
+19
-5
tests/readme.md
tests/readme.md
+6
-2
tests/results/det_results_gpu_trt_fp16_cpp.txt
tests/results/det_results_gpu_trt_fp16_cpp.txt
+50
-0
tests/results/det_results_gpu_trt_fp32_cpp.txt
tests/results/det_results_gpu_trt_fp32_cpp.txt
+50
-0
tests/test.sh
tests/test.sh
+4
-2
tools/export_model.py
tools/export_model.py
+2
-0
tools/infer/predict_det.py
tools/infer/predict_det.py
+12
-2
tools/infer/predict_rec.py
tools/infer/predict_rec.py
+22
-3
tools/infer/utility.py
tools/infer/utility.py
+7
-0
tools/program.py
tools/program.py
+3
-2
No files found.
tests/configs/rec_icdar15_r34_train.yml
0 → 100644
View file @
3c906d41
Global
:
use_gpu
:
true
epoch_num
:
72
log_smooth_window
:
20
print_batch_step
:
10
save_model_dir
:
./output/rec/ic15/
save_epoch_step
:
3
# evaluation is run every 2000 iterations
eval_batch_step
:
[
0
,
2000
]
cal_metric_during_train
:
True
pretrained_model
:
checkpoints
:
save_inference_dir
:
./
use_visualdl
:
False
infer_img
:
doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path
:
ppocr/utils/en_dict.txt
character_type
:
EN
max_text_length
:
25
infer_mode
:
False
use_space_char
:
False
save_res_path
:
./output/rec/predicts_ic15.txt
Optimizer
:
name
:
Adam
beta1
:
0.9
beta2
:
0.999
lr
:
learning_rate
:
0.0005
regularizer
:
name
:
'
L2'
factor
:
0
Architecture
:
model_type
:
rec
algorithm
:
CRNN
Transform
:
Backbone
:
name
:
ResNet
layers
:
34
Neck
:
name
:
SequenceEncoder
encoder_type
:
rnn
hidden_size
:
256
Head
:
name
:
CTCHead
fc_decay
:
0
Loss
:
name
:
CTCLoss
PostProcess
:
name
:
CTCLabelDecode
Metric
:
name
:
RecMetric
main_indicator
:
acc
Train
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data/
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_train.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
CTCLabelEncode
:
# Class handling label
-
RecResizeImg
:
image_shape
:
[
3
,
32
,
100
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
True
batch_size_per_card
:
256
drop_last
:
True
num_workers
:
8
use_shared_memory
:
False
Eval
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_test.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
CTCLabelEncode
:
# Class handling label
-
RecResizeImg
:
image_shape
:
[
3
,
32
,
100
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
False
drop_last
:
False
batch_size_per_card
:
256
num_workers
:
4
use_shared_memory
:
False
tests/ocr_det_params.txt
View file @
3c906d41
...
@@ -12,7 +12,7 @@ train_model_name:latest
...
@@ -12,7 +12,7 @@ train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
null:null
##
##
trainer:norm_train|pact_train
trainer:norm_train|pact_train
|fpgm_train
norm_train:tools/train.py -c tests/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
norm_train:tools/train.py -c tests/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:deploy/slim/quantization/quant.py -c tests/configs/det_mv3_db.yml -o
pact_train:deploy/slim/quantization/quant.py -c tests/configs/det_mv3_db.yml -o
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c tests/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c tests/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
...
@@ -21,7 +21,7 @@ null:null
...
@@ -21,7 +21,7 @@ null:null
null:null
null:null
##
##
===========================eval_params===========================
===========================eval_params===========================
eval:
tools/eval.py -c tests/configs/det_mv3_db.yml -o
eval:
null
null:null
null:null
##
##
===========================infer_params===========================
===========================infer_params===========================
...
@@ -35,7 +35,7 @@ export1:null
...
@@ -35,7 +35,7 @@ export1:null
export2:null
export2:null
##
##
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
infer_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o
infer_export:tools/export_model.py -c configs/det/
ch_ppocr_v2.0/ch_
det_mv3_db
_v2.0
.yml -o
infer_quant:False
infer_quant:False
inference:tools/infer/predict_det.py
inference:tools/infer/predict_det.py
--use_gpu:True|False
--use_gpu:True|False
...
...
tests/ocr_kl_quant_params.txt
0 → 100644
View file @
3c906d41
===========================train_params===========================
model_name:ocr_system
python:python3.7
gpu_list:null
Global.use_gpu:null
Global.auto_cast:null
Global.epoch_num:null
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:null
Global.pretrained_model:null
train_model_name:null
train_infer_img_dir:null
null:null
##
trainer:
norm_train:null
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:null
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
kl_quant:deploy/slim/quantization/quant_kl.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:TrueFalse
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null
--benchmark:True
null:null
tests/ocr_ppocr_mobile_params.txt
View file @
3c906d41
===========================train_params===========================
===========================train_params===========================
model_name:ocr_system
model_name:ocr_system
_mobile
python:python3.7
python:python3.7
gpu_list:null
gpu_list:null
Global.use_gpu:null
Global.use_gpu:null
...
...
tests/ocr_ppocr_server_params.txt
0 → 100644
View file @
3c906d41
===========================train_params===========================
model_name:ocr_system_server
python:python3.7
gpu_list:null
Global.use_gpu:null
Global.auto_cast:null
Global.epoch_num:null
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:null
Global.pretrained_model:null
train_model_name:null
train_infer_img_dir:null
null:null
##
trainer:
norm_train:null
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:null
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:./inference/ch_ppocr_server_v2.0_det_infer/
infer_export:null
infer_quant:False
inference:tools/infer/predict_system.py
--use_gpu:True
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null
--benchmark:True
--rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_server_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr system
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/
--benchmark:True
\ No newline at end of file
tests/ocr_rec_params.txt
View file @
3c906d41
...
@@ -64,3 +64,18 @@ inference:./deploy/cpp_infer/build/ppocr rec
...
@@ -64,3 +64,18 @@ inference:./deploy/cpp_infer/build/ppocr rec
--image_dir:./inference/rec_inference/
--image_dir:./inference/rec_inference/
null:null
null:null
--benchmark:True
--benchmark:True
===========================serving_params===========================
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en
\ No newline at end of file
tests/ocr_rec_server_params.txt
0 → 100644
View file @
3c906d41
===========================train_params===========================
model_name:ocr_server_rec
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_infer=2|whole_train_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train|pact_train
norm_train:tools/train.py -c tests/configs/rec_icdar15_r34_train.yml -o
pact_train:deploy/slim/quantization/quant.py -c tests/configs/rec_icdar15_r34_train.yml -o
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c tests/configs/rec_icdar15_r34_train.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c tests/configs/rec_icdar15_r34_train.yml -o
quant_export:deploy/slim/quantization/export_model.py -c tests/configs/rec_icdar15_r34_train.yml -o
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/
infer_export:null
infer_quant:False
inference:tools/infer/predict_rec.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|fp16|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr rec
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--rec_model_dir:
--image_dir:./inference/rec_inference/
null:null
--benchmark:True
===========================serving_params===========================
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en
\ No newline at end of file
tests/prepare.sh
View file @
3c906d41
...
@@ -75,17 +75,28 @@ elif [ ${MODE} = "infer" ];then
...
@@ -75,17 +75,28 @@ elif [ ${MODE} = "infer" ];then
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
cd
./inference
&&
tar
xf ch_ppocr_server_v2.0_det_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
cd
./inference
&&
tar
xf ch_ppocr_server_v2.0_det_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
elif
[
${
model_name
}
=
"ocr_system"
]
;
then
elif
[
${
model_name
}
=
"ocr_system
_mobile
"
]
;
then
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
else
elif
[
${
model_name
}
=
"ocr_system_server"
]
;
then
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf ch_ppocr_server_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_server_v2.0_rec_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
elif
[
${
model_name
}
=
"ocr_rec"
]
;
then
rm
-rf
./train_data/ic15_data
rm
-rf
./train_data/ic15_data
eval_model_name
=
"ch_ppocr_mobile_v2.0_rec_infer"
eval_model_name
=
"ch_ppocr_mobile_v2.0_rec_infer"
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf rec_inference.tar
&&
cd
../
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf rec_inference.tar
&&
cd
../
elif
[
${
model_name
}
=
"ocr_server_rec"
]
;
then
rm
-rf
./train_data/ic15_data
eval_model_name
=
"ch_ppocr_server_v2.0_rec_infer"
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf rec_inference.tar
&&
cd
../
fi
fi
elif
[
${
MODE
}
=
"cpp_infer"
]
;
then
elif
[
${
MODE
}
=
"cpp_infer"
]
;
then
if
[
${
model_name
}
=
"ocr_det"
]
;
then
if
[
${
model_name
}
=
"ocr_det"
]
;
then
...
@@ -107,12 +118,15 @@ fi
...
@@ -107,12 +118,15 @@ fi
if
[
${
MODE
}
=
"serving_infer"
]
;
then
if
[
${
MODE
}
=
"serving_infer"
]
;
then
# prepare serving env
# prepare serving env
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
${
python_name
}
-m
pip
install install
paddle-serving-server-gpu
==
0.6.1.post101
wget https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install install
paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.6.1
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.6.1
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.6.
1
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.6.
3
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
&&
cd
../
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
&&
tar
xf ch_ppocr_server_v2.0_rec_infer.tar
&&
tar
xf ch_ppocr_server_v2.0_det_infer.tar
cd
../
fi
fi
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
...
...
tests/readme.md
View file @
3c906d41
# 介绍
#
从训练到推理部署工具链测试方法
介绍
test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从训练到预测的流程测试。
test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从训练到预测的流程测试。
...
@@ -36,7 +36,7 @@ test.sh包含四种运行模式,每种模式的运行数据不同,分别用
...
@@ -36,7 +36,7 @@ test.sh包含四种运行模式,每种模式的运行数据不同,分别用
-
模式1:lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
-
模式1:lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
```
shell
```
shell
bash
test
/prepare.sh ./tests/ocr_det_params.txt
'lite_train_infer'
bash test
s
/prepare.sh ./tests/ocr_det_params.txt
'lite_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt
'lite_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt
'lite_train_infer'
```
```
...
@@ -66,3 +66,7 @@ bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer'
...
@@ -66,3 +66,7 @@ bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer'
bash tests/prepare.sh ./tests/ocr_det_params.txt
'cpp_infer'
bash tests/prepare.sh ./tests/ocr_det_params.txt
'cpp_infer'
bash tests/test.sh ./tests/ocr_det_params.txt
'cpp_infer'
bash tests/test.sh ./tests/ocr_det_params.txt
'cpp_infer'
```
```
# 日志输出
最终在
```tests/output```
目录下生成.log后缀的日志文件
tests/results/det_results_gpu_trt_fp16_cpp.txt
0 → 100644
View file @
3c906d41
This diff is collapsed.
Click to expand it.
tests/results/det_results_gpu_trt_fp32_cpp.txt
0 → 100644
View file @
3c906d41
This diff is collapsed.
Click to expand it.
tests/test.sh
View file @
3c906d41
...
@@ -321,7 +321,7 @@ function func_serving(){
...
@@ -321,7 +321,7 @@ function func_serving(){
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
continue
fi
fi
if
[[
${
use_trt
}
=
"Fal
g_quant
se"
||
${
precision
}
=
~
"int8"
]]
;
then
if
[[
${
use_trt
}
=
"False"
||
${
precision
}
=
~
"int8"
]]
&&
[[
${
_flag_quant
}
=
"True"
]]
;
then
continue
continue
fi
fi
_save_log_path
=
"
${
_log_path
}
/infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
_save_log_path
=
"
${
_log_path
}
/infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
...
@@ -433,7 +433,9 @@ if [ ${MODE} = "infer" ]; then
...
@@ -433,7 +433,9 @@ if [ ${MODE} = "infer" ]; then
save_infer_dir
=
$(
dirname
$infer_model
)
save_infer_dir
=
$(
dirname
$infer_model
)
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
infer_model
}
"
)
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
infer_model
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_dir
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_dir
}
"
)
export_cmd
=
"
${
python
}
${
norm_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
export_cmd
=
"
${
python
}
${
infer_run_exports
[Count]
}
${
set_export_weight
}
${
set_save_infer_key
}
"
echo
${
infer_run_exports
[Count]
}
echo
$export_cmd
eval
$export_cmd
eval
$export_cmd
status_export
=
$?
status_export
=
$?
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
...
...
tools/export_model.py
View file @
3c906d41
...
@@ -60,6 +60,8 @@ def export_single_model(model, arch_config, save_path, logger):
...
@@ -60,6 +60,8 @@ def export_single_model(model, arch_config, save_path, logger):
"When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training"
"When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training"
)
)
infer_shape
[
-
1
]
=
100
infer_shape
[
-
1
]
=
100
if
arch_config
[
"algorithm"
]
==
"NRTR"
:
infer_shape
=
[
1
,
32
,
100
]
elif
arch_config
[
"model_type"
]
==
"table"
:
elif
arch_config
[
"model_type"
]
==
"table"
:
infer_shape
=
[
3
,
488
,
488
]
infer_shape
=
[
3
,
488
,
488
]
model
=
to_static
(
model
=
to_static
(
...
...
tools/infer/predict_det.py
View file @
3c906d41
...
@@ -89,6 +89,14 @@ class TextDetector(object):
...
@@ -89,6 +89,14 @@ class TextDetector(object):
postprocess_params
[
"sample_pts_num"
]
=
2
postprocess_params
[
"sample_pts_num"
]
=
2
postprocess_params
[
"expand_scale"
]
=
1.0
postprocess_params
[
"expand_scale"
]
=
1.0
postprocess_params
[
"shrink_ratio_of_width"
]
=
0.3
postprocess_params
[
"shrink_ratio_of_width"
]
=
0.3
elif
self
.
det_algorithm
==
"PSE"
:
postprocess_params
[
'name'
]
=
'PSEPostProcess'
postprocess_params
[
"thresh"
]
=
args
.
det_pse_thresh
postprocess_params
[
"box_thresh"
]
=
args
.
det_pse_box_thresh
postprocess_params
[
"min_area"
]
=
args
.
det_pse_min_area
postprocess_params
[
"box_type"
]
=
args
.
det_pse_box_type
postprocess_params
[
"scale"
]
=
args
.
det_pse_scale
self
.
det_pse_box_type
=
args
.
det_pse_box_type
else
:
else
:
logger
.
info
(
"unknown det_algorithm:{}"
.
format
(
self
.
det_algorithm
))
logger
.
info
(
"unknown det_algorithm:{}"
.
format
(
self
.
det_algorithm
))
sys
.
exit
(
0
)
sys
.
exit
(
0
)
...
@@ -209,7 +217,7 @@ class TextDetector(object):
...
@@ -209,7 +217,7 @@ class TextDetector(object):
preds
[
'f_score'
]
=
outputs
[
1
]
preds
[
'f_score'
]
=
outputs
[
1
]
preds
[
'f_tco'
]
=
outputs
[
2
]
preds
[
'f_tco'
]
=
outputs
[
2
]
preds
[
'f_tvo'
]
=
outputs
[
3
]
preds
[
'f_tvo'
]
=
outputs
[
3
]
elif
self
.
det_algorithm
==
'DB'
:
elif
self
.
det_algorithm
in
[
'DB'
,
'PSE'
]
:
preds
[
'maps'
]
=
outputs
[
0
]
preds
[
'maps'
]
=
outputs
[
0
]
else
:
else
:
raise
NotImplementedError
raise
NotImplementedError
...
@@ -217,7 +225,9 @@ class TextDetector(object):
...
@@ -217,7 +225,9 @@ class TextDetector(object):
#self.predictor.try_shrink_memory()
#self.predictor.try_shrink_memory()
post_result
=
self
.
postprocess_op
(
preds
,
shape_list
)
post_result
=
self
.
postprocess_op
(
preds
,
shape_list
)
dt_boxes
=
post_result
[
0
][
'points'
]
dt_boxes
=
post_result
[
0
][
'points'
]
if
self
.
det_algorithm
==
"SAST"
and
self
.
det_sast_polygon
:
if
(
self
.
det_algorithm
==
"SAST"
and
self
.
det_sast_polygon
)
or
(
self
.
det_algorithm
==
"PSE"
and
self
.
det_pse_box_type
==
'poly'
):
dt_boxes
=
self
.
filter_tag_det_res_only_clip
(
dt_boxes
,
ori_im
.
shape
)
dt_boxes
=
self
.
filter_tag_det_res_only_clip
(
dt_boxes
,
ori_im
.
shape
)
else
:
else
:
dt_boxes
=
self
.
filter_tag_det_res
(
dt_boxes
,
ori_im
.
shape
)
dt_boxes
=
self
.
filter_tag_det_res
(
dt_boxes
,
ori_im
.
shape
)
...
...
tools/infer/predict_rec.py
View file @
3c906d41
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
import
os
import
os
import
sys
import
sys
from
PIL
import
Image
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'../..'
)))
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'../..'
)))
...
@@ -61,6 +61,13 @@ class TextRecognizer(object):
...
@@ -61,6 +61,13 @@ class TextRecognizer(object):
"character_dict_path"
:
args
.
rec_char_dict_path
,
"character_dict_path"
:
args
.
rec_char_dict_path
,
"use_space_char"
:
args
.
use_space_char
"use_space_char"
:
args
.
use_space_char
}
}
elif
self
.
rec_algorithm
==
'NRTR'
:
postprocess_params
=
{
'name'
:
'NRTRLabelDecode'
,
"character_type"
:
args
.
rec_char_type
,
"character_dict_path"
:
args
.
rec_char_dict_path
,
"use_space_char"
:
args
.
use_space_char
}
self
.
postprocess_op
=
build_post_process
(
postprocess_params
)
self
.
postprocess_op
=
build_post_process
(
postprocess_params
)
self
.
predictor
,
self
.
input_tensor
,
self
.
output_tensors
,
self
.
config
=
\
self
.
predictor
,
self
.
input_tensor
,
self
.
output_tensors
,
self
.
config
=
\
utility
.
create_predictor
(
args
,
'rec'
,
logger
)
utility
.
create_predictor
(
args
,
'rec'
,
logger
)
...
@@ -87,6 +94,16 @@ class TextRecognizer(object):
...
@@ -87,6 +94,16 @@ class TextRecognizer(object):
def
resize_norm_img
(
self
,
img
,
max_wh_ratio
):
def
resize_norm_img
(
self
,
img
,
max_wh_ratio
):
imgC
,
imgH
,
imgW
=
self
.
rec_image_shape
imgC
,
imgH
,
imgW
=
self
.
rec_image_shape
if
self
.
rec_algorithm
==
'NRTR'
:
img
=
cv2
.
cvtColor
(
img
,
cv2
.
COLOR_BGR2GRAY
)
# return padding_im
image_pil
=
Image
.
fromarray
(
np
.
uint8
(
img
))
img
=
image_pil
.
resize
([
100
,
32
],
Image
.
ANTIALIAS
)
img
=
np
.
array
(
img
)
norm_img
=
np
.
expand_dims
(
img
,
-
1
)
norm_img
=
norm_img
.
transpose
((
2
,
0
,
1
))
return
norm_img
.
astype
(
np
.
float32
)
/
128.
-
1.
assert
imgC
==
img
.
shape
[
2
]
assert
imgC
==
img
.
shape
[
2
]
max_wh_ratio
=
max
(
max_wh_ratio
,
imgW
/
imgH
)
max_wh_ratio
=
max
(
max_wh_ratio
,
imgW
/
imgH
)
imgW
=
int
((
32
*
max_wh_ratio
))
imgW
=
int
((
32
*
max_wh_ratio
))
...
@@ -252,13 +269,15 @@ class TextRecognizer(object):
...
@@ -252,13 +269,15 @@ class TextRecognizer(object):
else
:
else
:
self
.
input_tensor
.
copy_from_cpu
(
norm_img_batch
)
self
.
input_tensor
.
copy_from_cpu
(
norm_img_batch
)
self
.
predictor
.
run
()
self
.
predictor
.
run
()
outputs
=
[]
outputs
=
[]
for
output_tensor
in
self
.
output_tensors
:
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
output
=
output_tensor
.
copy_to_cpu
()
outputs
.
append
(
output
)
outputs
.
append
(
output
)
if
self
.
benchmark
:
if
self
.
benchmark
:
self
.
autolog
.
times
.
stamp
()
self
.
autolog
.
times
.
stamp
()
if
len
(
outputs
)
!=
1
:
preds
=
outputs
else
:
preds
=
outputs
[
0
]
preds
=
outputs
[
0
]
rec_result
=
self
.
postprocess_op
(
preds
)
rec_result
=
self
.
postprocess_op
(
preds
)
for
rno
in
range
(
len
(
rec_result
)):
for
rno
in
range
(
len
(
rec_result
)):
...
...
tools/infer/utility.py
View file @
3c906d41
...
@@ -63,6 +63,13 @@ def init_args():
...
@@ -63,6 +63,13 @@ def init_args():
parser
.
add_argument
(
"--det_sast_nms_thresh"
,
type
=
float
,
default
=
0.2
)
parser
.
add_argument
(
"--det_sast_nms_thresh"
,
type
=
float
,
default
=
0.2
)
parser
.
add_argument
(
"--det_sast_polygon"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--det_sast_polygon"
,
type
=
str2bool
,
default
=
False
)
# PSE parmas
parser
.
add_argument
(
"--det_pse_thresh"
,
type
=
float
,
default
=
0
)
parser
.
add_argument
(
"--det_pse_box_thresh"
,
type
=
float
,
default
=
0.85
)
parser
.
add_argument
(
"--det_pse_min_area"
,
type
=
float
,
default
=
16
)
parser
.
add_argument
(
"--det_pse_box_type"
,
type
=
str
,
default
=
'box'
)
parser
.
add_argument
(
"--det_pse_scale"
,
type
=
int
,
default
=
1
)
# params for text recognizer
# params for text recognizer
parser
.
add_argument
(
"--rec_algorithm"
,
type
=
str
,
default
=
'CRNN'
)
parser
.
add_argument
(
"--rec_algorithm"
,
type
=
str
,
default
=
'CRNN'
)
parser
.
add_argument
(
"--rec_model_dir"
,
type
=
str
)
parser
.
add_argument
(
"--rec_model_dir"
,
type
=
str
)
...
...
tools/program.py
View file @
3c906d41
...
@@ -353,7 +353,7 @@ def eval(model,
...
@@ -353,7 +353,7 @@ def eval(model,
valid_dataloader
,
valid_dataloader
,
post_process_class
,
post_process_class
,
eval_class
,
eval_class
,
model_type
,
model_type
=
None
,
use_srn
=
False
,
use_srn
=
False
,
use_sar
=
False
):
use_sar
=
False
):
model
.
eval
()
model
.
eval
()
...
@@ -404,7 +404,8 @@ def preprocess(is_train=False):
...
@@ -404,7 +404,8 @@ def preprocess(is_train=False):
alg
=
config
[
'Architecture'
][
'algorithm'
]
alg
=
config
[
'Architecture'
][
'algorithm'
]
assert
alg
in
[
assert
alg
in
[
'EAST'
,
'DB'
,
'SAST'
,
'Rosetta'
,
'CRNN'
,
'STARNet'
,
'RARE'
,
'SRN'
,
'EAST'
,
'DB'
,
'SAST'
,
'Rosetta'
,
'CRNN'
,
'STARNet'
,
'RARE'
,
'SRN'
,
'CLS'
,
'PGNet'
,
'Distillation'
,
'NRTR'
,
'TableAttn'
,
'SAR'
,
'ASTER'
'CLS'
,
'PGNet'
,
'Distillation'
,
'NRTR'
,
'TableAttn'
,
'SAR'
,
'PSE'
,
'ASTER'
]
]
device
=
'gpu:{}'
.
format
(
dist
.
ParallelEnv
().
dev_id
)
if
use_gpu
else
'cpu'
device
=
'gpu:{}'
.
format
(
dist
.
ParallelEnv
().
dev_id
)
if
use_gpu
else
'cpu'
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment