Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
32fdd08b
Commit
32fdd08b
authored
Dec 01, 2021
by
LDOUBLEV
Browse files
Merge branch 'dygraph' of
https://github.com/PaddlePaddle/PaddleOCR
into dygraph
parents
b77f9ec0
b1d26ded
Changes
62
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1003 additions
and
46 deletions
+1003
-46
test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml
test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml
+103
-0
test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt
test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt
+52
-0
test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt
...nfigs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt
+1
-1
test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt
...configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt
+1
-1
test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml
...gs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml
+103
-0
test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt
...onfigs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt
+52
-0
test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt
...onfigs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt
+1
-1
test_tipc/configs/rec_r31_sar/rec_r31_sar.yml
test_tipc/configs/rec_r31_sar/rec_r31_sar.yml
+98
-0
test_tipc/configs/rec_r31_sar/train_infer_python.txt
test_tipc/configs/rec_r31_sar/train_infer_python.txt
+52
-0
test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt
...gs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt
+1
-1
test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt
...figs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt
+1
-1
test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml
..._r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml
+102
-0
test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt
...igs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt
+52
-0
test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt
...igs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt
+1
-1
test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml
..._tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml
+108
-0
test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt
...pc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt
+52
-0
test_tipc/docs/jeston_test_train_inference_python.md
test_tipc/docs/jeston_test_train_inference_python.md
+5
-5
test_tipc/prepare.sh
test_tipc/prepare.sh
+16
-7
test_tipc/test_inference_python.sh
test_tipc/test_inference_python.sh
+169
-0
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+33
-28
No files found.
test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml
0 → 100644
View file @
32fdd08b
Global
:
use_gpu
:
True
epoch_num
:
21
log_smooth_window
:
20
print_batch_step
:
10
save_model_dir
:
./output/rec/nrtr/
save_epoch_step
:
1
# evaluation is run every 2000 iterations
eval_batch_step
:
[
0
,
2000
]
cal_metric_during_train
:
True
pretrained_model
:
checkpoints
:
save_inference_dir
:
use_visualdl
:
False
infer_img
:
doc/imgs_words_en/word_10.png
# for data or label process
character_dict_path
:
ppocr/utils/EN_symbol_dict.txt
max_text_length
:
25
infer_mode
:
False
use_space_char
:
False
save_res_path
:
./output/rec/predicts_nrtr.txt
Optimizer
:
name
:
Adam
beta1
:
0.9
beta2
:
0.99
clip_norm
:
5.0
lr
:
name
:
Cosine
learning_rate
:
0.0005
warmup_epoch
:
2
regularizer
:
name
:
'
L2'
factor
:
0.
Architecture
:
model_type
:
rec
algorithm
:
NRTR
in_channels
:
1
Transform
:
Backbone
:
name
:
MTB
cnn_num
:
2
Head
:
name
:
Transformer
d_model
:
512
num_encoder_layers
:
6
beam_size
:
-1
# When Beam size is greater than 0, it means to use beam search when evaluation.
Loss
:
name
:
NRTRLoss
smoothing
:
True
PostProcess
:
name
:
NRTRLabelDecode
Metric
:
name
:
RecMetric
main_indicator
:
acc
Train
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data/
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_train.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
NRTRLabelEncode
:
# Class handling label
-
NRTRRecResizeImg
:
image_shape
:
[
100
,
32
]
resize_type
:
PIL
# PIL or OpenCV
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
True
batch_size_per_card
:
512
drop_last
:
True
num_workers
:
8
Eval
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_test.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
NRTRLabelEncode
:
# Class handling label
-
NRTRRecResizeImg
:
image_shape
:
[
100
,
32
]
resize_type
:
PIL
# PIL or OpenCV
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
False
drop_last
:
False
batch_size_per_card
:
256
num_workers
:
1
use_shared_memory
:
False
test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt
0 → 100644
View file @
32fdd08b
===========================train_params===========================
model_name:rec_mtb_nrtr
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train
norm_train:tools/train.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:null
infer_export:tools/export_model.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o
infer_quant:False
inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/EN_symbol_dict.txt --rec_image_shape="1,32,100" --rec_algorithm="NRTR"
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt
View file @
32fdd08b
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--use_tensorrt:True|False
--precision:fp32|
fp16|
int8
--precision:fp32|int8
--rec_model_dir:
--rec_model_dir:
--image_dir:./inference/rec_inference
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--save_log_path:./test/output/
...
...
test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt
View file @
32fdd08b
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--use_tensorrt:True|False
--precision:fp32|
fp16|
int8
--precision:fp32|int8
--rec_model_dir:
--rec_model_dir:
--image_dir:./inference/rec_inference
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--save_log_path:./test/output/
...
...
test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml
0 → 100644
View file @
32fdd08b
Global
:
use_gpu
:
True
epoch_num
:
72
log_smooth_window
:
20
print_batch_step
:
10
save_model_dir
:
./output/rec/rec_mv3_tps_bilstm_att/
save_epoch_step
:
3
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step
:
[
0
,
2000
]
cal_metric_during_train
:
True
pretrained_model
:
checkpoints
:
save_inference_dir
:
use_visualdl
:
False
infer_img
:
doc/imgs_words/ch/word_1.jpg
# for data or label process
character_dict_path
:
max_text_length
:
25
infer_mode
:
False
use_space_char
:
False
save_res_path
:
./output/rec/predicts_mv3_tps_bilstm_att.txt
Optimizer
:
name
:
Adam
beta1
:
0.9
beta2
:
0.999
lr
:
learning_rate
:
0.0005
regularizer
:
name
:
'
L2'
factor
:
0.00001
Architecture
:
model_type
:
rec
algorithm
:
RARE
Transform
:
name
:
TPS
num_fiducial
:
20
loc_lr
:
0.1
model_name
:
small
Backbone
:
name
:
MobileNetV3
scale
:
0.5
model_name
:
large
Neck
:
name
:
SequenceEncoder
encoder_type
:
rnn
hidden_size
:
96
Head
:
name
:
AttentionHead
hidden_size
:
96
Loss
:
name
:
AttentionLoss
PostProcess
:
name
:
AttnLabelDecode
Metric
:
name
:
RecMetric
main_indicator
:
acc
Train
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data/
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_train.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
AttnLabelEncode
:
# Class handling label
-
RecResizeImg
:
image_shape
:
[
3
,
32
,
100
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
True
batch_size_per_card
:
256
drop_last
:
True
num_workers
:
8
Eval
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_test.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
AttnLabelEncode
:
# Class handling label
-
RecResizeImg
:
image_shape
:
[
3
,
32
,
100
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
False
drop_last
:
False
batch_size_per_card
:
256
num_workers
:
1
test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt
0 → 100644
View file @
32fdd08b
===========================train_params===========================
model_name:rec_mv3_tps_bilstm_att_v2.0
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train
norm_train:tools/train.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:null
infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o
infer_quant:False
inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE"
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt
View file @
32fdd08b
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--use_tensorrt:True|False
--precision:fp32|
fp16|
int8
--precision:fp32|int8
--rec_model_dir:
--rec_model_dir:
--image_dir:./inference/rec_inference
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--save_log_path:./test/output/
...
...
test_tipc/configs/rec_r31_sar/rec_r31_sar.yml
0 → 100644
View file @
32fdd08b
Global
:
use_gpu
:
true
epoch_num
:
5
log_smooth_window
:
20
print_batch_step
:
20
save_model_dir
:
./sar_rec
save_epoch_step
:
1
# evaluation is run every 2000 iterations
eval_batch_step
:
[
0
,
2000
]
cal_metric_during_train
:
True
pretrained_model
:
checkpoints
:
save_inference_dir
:
use_visualdl
:
False
infer_img
:
# for data or label process
character_dict_path
:
ppocr/utils/dict90.txt
max_text_length
:
30
infer_mode
:
False
use_space_char
:
False
rm_symbol
:
True
save_res_path
:
./output/rec/predicts_sar.txt
Optimizer
:
name
:
Adam
beta1
:
0.9
beta2
:
0.999
lr
:
name
:
Piecewise
decay_epochs
:
[
3
,
4
]
values
:
[
0.001
,
0.0001
,
0.00001
]
regularizer
:
name
:
'
L2'
factor
:
0
Architecture
:
model_type
:
rec
algorithm
:
SAR
Transform
:
Backbone
:
name
:
ResNet31
Head
:
name
:
SARHead
Loss
:
name
:
SARLoss
PostProcess
:
name
:
SARLabelDecode
Metric
:
name
:
RecMetric
Train
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data/
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_train.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
SARLabelEncode
:
# Class handling label
-
SARRecResizeImg
:
image_shape
:
[
3
,
48
,
48
,
160
]
# h:48 w:[48,160]
width_downsample_ratio
:
0.25
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
valid_ratio'
]
# dataloader will return list in this order
loader
:
shuffle
:
True
batch_size_per_card
:
64
drop_last
:
True
num_workers
:
8
use_shared_memory
:
False
Eval
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_test.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
SARLabelEncode
:
# Class handling label
-
SARRecResizeImg
:
image_shape
:
[
3
,
48
,
48
,
160
]
width_downsample_ratio
:
0.25
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
valid_ratio'
]
# dataloader will return list in this order
loader
:
shuffle
:
False
drop_last
:
False
batch_size_per_card
:
64
num_workers
:
4
use_shared_memory
:
False
test_tipc/configs/rec_r31_sar/train_infer_python.txt
0 → 100644
View file @
32fdd08b
===========================train_params===========================
model_name:rec_r31_sar
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train
norm_train:tools/train.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:null
infer_export:tools/export_model.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o
infer_quant:False
inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/dict90.txt --rec_image_shape="3,48,48,160" --rec_algorithm="SAR"
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|fp16|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt
View file @
32fdd08b
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--use_tensorrt:True|False
--precision:fp32|
fp16|
int8
--precision:fp32|int8
--rec_model_dir:
--rec_model_dir:
--image_dir:./inference/rec_inference
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--save_log_path:./test/output/
...
...
test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt
View file @
32fdd08b
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--use_tensorrt:True|False
--precision:fp32|
fp16|
int8
--precision:fp32|int8
--rec_model_dir:
--rec_model_dir:
--image_dir:./inference/rec_inference
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--save_log_path:./test/output/
...
...
test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml
0 → 100644
View file @
32fdd08b
Global
:
use_gpu
:
True
epoch_num
:
400
log_smooth_window
:
20
print_batch_step
:
10
save_model_dir
:
./output/rec/b3_rare_r34_none_gru/
save_epoch_step
:
3
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step
:
[
0
,
2000
]
cal_metric_during_train
:
True
pretrained_model
:
checkpoints
:
save_inference_dir
:
use_visualdl
:
False
infer_img
:
doc/imgs_words/ch/word_1.jpg
# for data or label process
character_dict_path
:
max_text_length
:
25
infer_mode
:
False
use_space_char
:
False
save_res_path
:
./output/rec/predicts_b3_rare_r34_none_gru.txt
Optimizer
:
name
:
Adam
beta1
:
0.9
beta2
:
0.999
lr
:
learning_rate
:
0.0005
regularizer
:
name
:
'
L2'
factor
:
0.00000
Architecture
:
model_type
:
rec
algorithm
:
RARE
Transform
:
name
:
TPS
num_fiducial
:
20
loc_lr
:
0.1
model_name
:
large
Backbone
:
name
:
ResNet
layers
:
34
Neck
:
name
:
SequenceEncoder
encoder_type
:
rnn
hidden_size
:
256
#96
Head
:
name
:
AttentionHead
# AttentionHead
hidden_size
:
256
#
l2_decay
:
0.00001
Loss
:
name
:
AttentionLoss
PostProcess
:
name
:
AttnLabelDecode
Metric
:
name
:
RecMetric
main_indicator
:
acc
Train
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data/
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_train.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
AttnLabelEncode
:
# Class handling label
-
RecResizeImg
:
image_shape
:
[
3
,
32
,
100
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
True
batch_size_per_card
:
256
drop_last
:
True
num_workers
:
8
Eval
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_test.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
AttnLabelEncode
:
# Class handling label
-
RecResizeImg
:
image_shape
:
[
3
,
32
,
100
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
]
# dataloader will return list in this order
loader
:
shuffle
:
False
drop_last
:
False
batch_size_per_card
:
256
num_workers
:
8
test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt
0 → 100644
View file @
32fdd08b
===========================train_params===========================
model_name:rec_r34_vd_tps_bilstm_att_v2.0
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train
norm_train:tools/train.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:null
infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o
infer_quant:False
inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE"
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt
View file @
32fdd08b
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
...
@@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--use_tensorrt:True|False
--precision:fp32|
fp16|
int8
--precision:fp32|int8
--rec_model_dir:
--rec_model_dir:
--image_dir:./inference/rec_inference
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--save_log_path:./test/output/
...
...
test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml
0 → 100644
View file @
32fdd08b
Global
:
use_gpu
:
True
epoch_num
:
72
log_smooth_window
:
20
print_batch_step
:
5
save_model_dir
:
./output/rec/srn_new
save_epoch_step
:
3
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step
:
[
0
,
5000
]
cal_metric_during_train
:
True
pretrained_model
:
checkpoints
:
save_inference_dir
:
use_visualdl
:
False
infer_img
:
doc/imgs_words/ch/word_1.jpg
# for data or label process
character_dict_path
:
max_text_length
:
25
num_heads
:
8
infer_mode
:
False
use_space_char
:
False
save_res_path
:
./output/rec/predicts_srn.txt
Optimizer
:
name
:
Adam
beta1
:
0.9
beta2
:
0.999
clip_norm
:
10.0
lr
:
learning_rate
:
0.0001
Architecture
:
model_type
:
rec
algorithm
:
SRN
in_channels
:
1
Transform
:
Backbone
:
name
:
ResNetFPN
Head
:
name
:
SRNHead
max_text_length
:
25
num_heads
:
8
num_encoder_TUs
:
2
num_decoder_TUs
:
4
hidden_dims
:
512
Loss
:
name
:
SRNLoss
PostProcess
:
name
:
SRNLabelDecode
Metric
:
name
:
RecMetric
main_indicator
:
acc
Train
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data/
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_train.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
SRNLabelEncode
:
# Class handling label
-
SRNRecResizeImg
:
image_shape
:
[
1
,
64
,
256
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
,
'
encoder_word_pos'
,
'
gsrm_word_pos'
,
'
gsrm_slf_attn_bias1'
,
'
gsrm_slf_attn_bias2'
]
# dataloader will return list in this order
loader
:
shuffle
:
False
batch_size_per_card
:
64
drop_last
:
False
num_workers
:
4
Eval
:
dataset
:
name
:
SimpleDataSet
data_dir
:
./train_data/ic15_data
label_file_list
:
[
"
./train_data/ic15_data/rec_gt_test.txt"
]
transforms
:
-
DecodeImage
:
# load image
img_mode
:
BGR
channel_first
:
False
-
SRNLabelEncode
:
# Class handling label
-
SRNRecResizeImg
:
image_shape
:
[
1
,
64
,
256
]
-
KeepKeys
:
keep_keys
:
[
'
image'
,
'
label'
,
'
length'
,
'
encoder_word_pos'
,
'
gsrm_word_pos'
,
'
gsrm_slf_attn_bias1'
,
'
gsrm_slf_attn_bias2'
]
loader
:
shuffle
:
False
drop_last
:
False
batch_size_per_card
:
32
num_workers
:
4
test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt
0 → 100644
View file @
32fdd08b
===========================train_params===========================
model_name:rec_r50_fpn_vd_none_srn
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train
norm_train:tools/train.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:null
infer_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o
infer_quant:False
inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="1,64,256" --rec_algorithm="SRN" --use_space_char=False
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
test_tipc/docs/jeston_test_train_inference_python.md
View file @
32fdd08b
# Jeston端基础训练预测功能测试
# Jeston端基础训练预测功能测试
Jeston端基础训练预测功能测试的主程序为
`test_
train
_inference
_python
.sh`
,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。
Jeston端基础训练预测功能测试的主程序为
`test_
inference
_inference.sh`
,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。
## 1. 测试结论汇总
## 1. 测试结论汇总
...
@@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth
...
@@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth
### 2.2 功能测试
### 2.2 功能测试
先运行
`prepare.sh`
准备数据和模型,然后运行
`test_
train
_inference
_python
.sh`
进行测试,最终在
```test_tipc/output```
目录下生成
`python_infer_*.log`
格式的日志文件。
先运行
`prepare.sh`
准备数据和模型,然后运行
`test_
inference
_inference.sh`
进行测试,最终在
```test_tipc/output```
目录下生成
`python_infer_*.log`
格式的日志文件。
`test_
train
_inference
_python.sh`
包含5种
[
运行模式
](
./test_train_inference_python.md
)
,在Jeston端,仅需要测试预测推理的模式即可:
`test_
inference
_inference
.sh`
仅有一个模式
`whole_infer`
,在Jeston端,仅需要测试预测推理的模式即可:
```
```
- 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
- 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
```
shell
```
shell
bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
# 用法1:
# 用法1:
bash test_tipc/test_inference_
jeston
.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
bash test_tipc/test_inference_
inference
.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer'
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1'
bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1'
```
```
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`
lite_train_lit
e_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`
whol
e_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
```
```
test_tipc/output/
test_tipc/output/
|- results_python.log # 运行指令状态的日志
|- results_python.log # 运行指令状态的日志
...
...
test_tipc/prepare.sh
View file @
32fdd08b
...
@@ -45,7 +45,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
...
@@ -45,7 +45,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar
--no-check-certificate
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar
--no-check-certificate
wget
-nc
-P
./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar
--no-check-certificate
wget
-nc
-P
./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar
--no-check-certificate
cd
./pretrain_models/
&&
tar
xf en_server_pgnetA.tar
&&
cd
../
cd
./pretrain_models/
&&
tar
xf en_server_pgnetA.tar
&&
cd
../
cd
./train_data
&&
tar
xf total_text_lite.tar
&&
ln
-s
total_text
&&
cd
../
cd
./train_data
&&
tar
xf total_text_lite.tar
&&
ln
-s
total_text_lite
total_text
&&
cd
../
fi
fi
if
[
${
model_name
}
==
"det_r50_vd_sast_icdar15_v2.0"
]
||
[
${
model_name
}
==
"det_r50_vd_sast_totaltext_v2.0"
]
;
then
if
[
${
model_name
}
==
"det_r50_vd_sast_icdar15_v2.0"
]
||
[
${
model_name
}
==
"det_r50_vd_sast_totaltext_v2.0"
]
;
then
wget
-nc
-P
./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams
--no-check-certificate
wget
-nc
-P
./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams
--no-check-certificate
...
@@ -61,6 +61,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
...
@@ -61,6 +61,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar
--no-check-certificate
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar
--no-check-certificate
cd
./inference/
&&
tar
xf det_r50_vd_db_v2.0_train.tar
&&
cd
../
cd
./inference/
&&
tar
xf det_r50_vd_db_v2.0_train.tar
&&
cd
../
fi
fi
if
[
${
model_name
}
==
"ch_ppocr_mobile_v2.0_rec_FPGM"
]
;
then
wget
-nc
-P
./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar
--no-check-certificate
cd
./pretrain_models/
&&
tar
xf ch_ppocr_mobile_v2.0_rec_train.tar
&&
cd
../
fi
elif
[
${
MODE
}
=
"whole_train_whole_infer"
]
;
then
elif
[
${
MODE
}
=
"whole_train_whole_infer"
]
;
then
wget
-nc
-P
./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams
--no-check-certificate
wget
-nc
-P
./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams
--no-check-certificate
...
@@ -137,11 +141,6 @@ elif [ ${MODE} = "whole_infer" ];then
...
@@ -137,11 +141,6 @@ elif [ ${MODE} = "whole_infer" ];then
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar
--no-check-certificate
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar
--no-check-certificate
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
fi
fi
if
[
${
model_name
}
=
"ch_PPOCRv2_det"
]
;
then
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
--no-check-certificate
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/e2e_server_pgnetA_infer.tar
--no-check-certificate
cd
./inference
&&
tar
xf e2e_server_pgnetA_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
fi
if
[
${
model_name
}
==
"en_server_pgnetA"
]
;
then
if
[
${
model_name
}
==
"en_server_pgnetA"
]
;
then
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar
--no-check-certificate
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar
--no-check-certificate
cd
./inference
&&
tar
xf en_server_pgnetA.tar
&&
cd
../
cd
./inference
&&
tar
xf en_server_pgnetA.tar
&&
cd
../
...
@@ -160,7 +159,10 @@ elif [ ${MODE} = "whole_infer" ];then
...
@@ -160,7 +159,10 @@ elif [ ${MODE} = "whole_infer" ];then
fi
fi
fi
fi
if
[
${
MODE
}
=
"klquant_whole_infer"
]
;
then
if
[
${
MODE
}
=
"klquant_whole_infer"
]
;
then
if
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_det"
]
;
then
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
--no-check-certificate
cd
./train_data/
&&
tar
xf icdar2015_lite.tar
ln
-s
./icdar2015_lite ./icdar2015
&&
cd
../
if
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_det_KL"
]
;
then
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
--no-check-certificate
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
--no-check-certificate
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
--no-check-certificate
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
--no-check-certificate
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
...
@@ -171,6 +173,13 @@ if [ ${MODE} = "klquant_whole_infer" ]; then
...
@@ -171,6 +173,13 @@ if [ ${MODE} = "klquant_whole_infer" ]; then
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar
--no-check-certificate
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar
--no-check-certificate
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
fi
fi
if
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_rec_KL"
]
;
then
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
--no-check-certificate
wget
-nc
-P
./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar
--no-check-certificate
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar
--no-check-certificate
cd
./train_data/
&&
tar
xf ic15_data.tar
&&
cd
../
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
&&
tar
xf rec_inference.tar
&&
cd
../
fi
fi
fi
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
...
...
test_tipc/test_inference_
jest
on.sh
→
test_tipc/test_inference_
pyth
on.sh
View file @
32fdd08b
#!/bin/bash
#!/bin/bash
source
test_tipc/common_func.sh
source
test_tipc/common_func.sh
source
test_tipc/test_train_inference_python.sh
#
source test_tipc/test_train_inference_python.sh
FILENAME
=
$1
FILENAME
=
$1
# MODE be one of ['whole_infer']
# MODE be one of ['whole_infer']
MODE
=
$2
MODE
=
$2
dataline
=
$(
awk
'NR==1, NR==
17
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==1, NR==
20
{print}'
$FILENAME
)
# parser params
# parser params
IFS
=
$'
\n
'
IFS
=
$'
\n
'
...
@@ -35,18 +35,100 @@ precision_list=$(func_parser_value "${lines[12]}")
...
@@ -35,18 +35,100 @@ precision_list=$(func_parser_value "${lines[12]}")
infer_model_key
=
$(
func_parser_key
"
${
lines
[13]
}
"
)
infer_model_key
=
$(
func_parser_key
"
${
lines
[13]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[14]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[14]
}
"
)
infer_img_dir
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
infer_img_dir
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
save_log_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
rec_model_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
rec_model_value
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
benchmark_key
=
$(
func_parser_key
"
${
lines
[16]
}
"
)
benchmark_key
=
$(
func_parser_key
"
${
lines
[16]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[17]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[17]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
LOG_PATH
=
"./test_tipc/output"
LOG_PATH
=
"./test_tipc/output"
mkdir
-p
${
LOG_PATH
}
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_python.log"
status_log
=
"
${
LOG_PATH
}
/results_python.log"
function
func_inference
(){
IFS
=
'|'
_python
=
$1
_script
=
$2
_model_dir
=
$3
_log_path
=
$4
_img_dir
=
$5
_flag_quant
=
$6
# inference
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
for
use_mkldnn
in
${
use_mkldnn_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
precision
in
${
precision_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
&&
[
${
precision
}
=
"fp16"
]
;
then
continue
fi
# skip when enable fp16 but disable mkldnn
if
[
${
_flag_quant
}
=
"True"
]
&&
[
${
precision
}
!=
"int8"
]
;
then
continue
fi
# skip when quant model inference but precision is not int8
set_precision
=
$(
func_set_params
"
${
precision_key
}
"
"
${
precision
}
"
)
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_cpu_threads
=
$(
func_set_params
"
${
cpu_threads_key
}
"
"
${
threads
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_infer_params0
=
$(
func_set_params
"
${
rec_model_key
}
"
"
${
rec_model_value
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_params0
}
${
set_infer_data
}
${
set_benchmark
}
${
set_precision
}
${
set_infer_params1
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
done
done
done
done
elif
[
${
use_gpu
}
=
"True"
]
||
[
${
use_gpu
}
=
"gpu"
]
;
then
for
use_trt
in
${
use_trt_list
[*]
}
;
do
for
precision
in
${
precision_list
[*]
}
;
do
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"False"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/python_infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_tensorrt
=
$(
func_set_params
"
${
use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
precision_key
}
"
"
${
precision
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_infer_params0
=
$(
func_set_params
"
${
save_log_key
}
"
"
${
save_log_value
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
${
set_infer_params1
}
${
set_infer_params0
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
done
done
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if
[
${
MODE
}
=
"whole_infer"
]
;
then
if
[
${
MODE
}
=
"whole_infer"
]
;
then
GPUID
=
$3
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
if
[
${#
GPUID
}
-le
0
]
;
then
...
@@ -68,7 +150,6 @@ if [ ${MODE} = "whole_infer" ]; then
...
@@ -68,7 +150,6 @@ if [ ${MODE} = "whole_infer" ]; then
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_dir
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_dir
}
"
)
export_cmd
=
"
${
python
}
${
infer_run_exports
[Count]
}
${
set_export_weight
}
${
set_save_infer_key
}
"
export_cmd
=
"
${
python
}
${
infer_run_exports
[Count]
}
${
set_export_weight
}
${
set_save_infer_key
}
"
echo
${
infer_run_exports
[Count]
}
echo
${
infer_run_exports
[Count]
}
echo
$export_cmd
eval
$export_cmd
eval
$export_cmd
status_export
=
$?
status_export
=
$?
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
...
@@ -85,3 +166,4 @@ if [ ${MODE} = "whole_infer" ]; then
...
@@ -85,3 +166,4 @@ if [ ${MODE} = "whole_infer" ]; then
done
done
fi
fi
test_tipc/test_train_inference_python.sh
View file @
32fdd08b
...
@@ -90,36 +90,39 @@ infer_value1=$(func_parser_value "${lines[50]}")
...
@@ -90,36 +90,39 @@ infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer
# parser klquant_infer
if
[
${
MODE
}
=
"klquant_whole_infer"
]
;
then
if
[
${
MODE
}
=
"klquant_whole_infer"
]
;
then
dataline
=
$(
awk
'NR==1 NR==17{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==1
,
NR==17{print}'
$FILENAME
)
lines
=(
${
dataline
}
)
lines
=(
${
dataline
}
)
model_name
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
model_name
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
python
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
python
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
export_weight
=
$(
func_parser_key
"
${
lines
[3]
}
"
)
save_infer_key
=
$(
func_parser_key
"
${
lines
[4]
}
"
)
# parser inference model
# parser inference model
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[
3
]
}
"
)
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[
5
]
}
"
)
infer_export_list
=
$(
func_parser_value
"
${
lines
[
4
]
}
"
)
infer_export_list
=
$(
func_parser_value
"
${
lines
[
6
]
}
"
)
infer_is_quant
=
$(
func_parser_value
"
${
lines
[
5
]
}
"
)
infer_is_quant
=
$(
func_parser_value
"
${
lines
[
7
]
}
"
)
# parser inference
# parser inference
inference_py
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
inference_py
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
use_gpu_key
=
$(
func_parser_key
"
${
lines
[7]
}
"
)
use_gpu_key
=
$(
func_parser_key
"
${
lines
[9]
}
"
)
use_gpu_list
=
$(
func_parser_value
"
${
lines
[7]
}
"
)
use_gpu_list
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[8]
}
"
)
use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[10]
}
"
)
use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
cpu_threads_key
=
$(
func_parser_key
"
${
lines
[9]
}
"
)
cpu_threads_key
=
$(
func_parser_key
"
${
lines
[11]
}
"
)
cpu_threads_list
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
cpu_threads_list
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
batch_size_key
=
$(
func_parser_key
"
${
lines
[10]
}
"
)
batch_size_key
=
$(
func_parser_key
"
${
lines
[12]
}
"
)
batch_size_list
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
batch_size_list
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
use_trt_key
=
$(
func_parser_key
"
${
lines
[11]
}
"
)
use_trt_key
=
$(
func_parser_key
"
${
lines
[13]
}
"
)
use_trt_list
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
use_trt_list
=
$(
func_parser_value
"
${
lines
[13]
}
"
)
precision_key
=
$(
func_parser_key
"
${
lines
[12]
}
"
)
precision_key
=
$(
func_parser_key
"
${
lines
[14]
}
"
)
precision_list
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
precision_list
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
infer_model_key
=
$(
func_parser_key
"
${
lines
[13]
}
"
)
infer_model_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[14]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[16]
}
"
)
infer_img_dir
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
infer_img_dir
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
save_log_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
save_log_key
=
$(
func_parser_key
"
${
lines
[17]
}
"
)
benchmark_key
=
$(
func_parser_key
"
${
lines
[16]
}
"
)
save_log_value
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
benchmark_key
=
$(
func_parser_key
"
${
lines
[18]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[17]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[18]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[19]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[19]
}
"
)
fi
fi
LOG_PATH
=
"./test_tipc/output"
LOG_PATH
=
"./test_tipc/output"
...
@@ -159,8 +162,9 @@ function func_inference(){
...
@@ -159,8 +162,9 @@ function func_inference(){
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_cpu_threads
=
$(
func_set_params
"
${
cpu_threads_key
}
"
"
${
threads
}
"
)
set_cpu_threads
=
$(
func_set_params
"
${
cpu_threads_key
}
"
"
${
threads
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_infer_params0
=
$(
func_set_params
"
${
save_log_key
}
"
"
${
save_log_value
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
${
set_precision
}
${
set_infer_params1
}
>
${
_save_log_path
}
2>&1 "
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_params0
}
${
set_infer_data
}
${
set_benchmark
}
${
set_precision
}
${
set_infer_params1
}
>
${
_save_log_path
}
2>&1 "
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
...
@@ -189,8 +193,9 @@ function func_inference(){
...
@@ -189,8 +193,9 @@ function func_inference(){
set_tensorrt
=
$(
func_set_params
"
${
use_trt_key
}
"
"
${
use_trt
}
"
)
set_tensorrt
=
$(
func_set_params
"
${
use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
precision_key
}
"
"
${
precision
}
"
)
set_precision
=
$(
func_set_params
"
${
precision_key
}
"
"
${
precision
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_infer_params0
=
$(
func_set_params
"
${
save_log_key
}
"
"
${
save_log_value
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
${
set_infer_params1
}
>
${
_save_log_path
}
2>&1 "
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
${
set_infer_params1
}
${
set_infer_params0
}
>
${
_save_log_path
}
2>&1 "
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
...
@@ -235,7 +240,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
...
@@ -235,7 +240,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
fi
fi
#run inference
#run inference
is_quant
=
${
infer_quant_flag
[Count]
}
is_quant
=
${
infer_quant_flag
[Count]
}
if
[
${
MODE
}
=
"klquant_infer"
]
;
then
if
[
${
MODE
}
=
"klquant_
whole_
infer"
]
;
then
is_quant
=
"True"
is_quant
=
"True"
fi
fi
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment