Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
55d71902
Commit
55d71902
authored
Nov 12, 2021
by
Leif
Browse files
Merge remote-tracking branch 'origin/dygraph' into dygraph
parents
1d03fd33
10b7e706
Changes
77
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
368 additions
and
207 deletions
+368
-207
ppocr/modeling/heads/table_att_head.py
ppocr/modeling/heads/table_att_head.py
+24
-16
ppocr/utils/network.py
ppocr/utils/network.py
+12
-10
ppocr/utils/save_load.py
ppocr/utils/save_load.py
+18
-58
test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml
test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml
+0
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
...model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
+16
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_jetson.txt
...ile/model_linux_gpu_normal_normal_infer_python_jetson.txt
+18
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
...mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
+12
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+14
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+18
-0
test_tipc/configs/ppocr_det_mobile/train_infer_python.txt
test_tipc/configs/ppocr_det_mobile/train_infer_python.txt
+51
-0
test_tipc/configs/ppocr_det_mobile/train_linux_cpu_normal_normal_infer_python_mac.txt
...mobile/train_linux_cpu_normal_normal_infer_python_mac.txt
+0
-0
test_tipc/configs/ppocr_det_mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt
...mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt
+0
-0
test_tipc/configs/ppocr_det_mobile/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt
.../train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt
+51
-0
test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
...train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
+51
-0
test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_normal_infer_python_windows.txt
...le/train_linux_gpu_normal_normal_infer_python_windows.txt
+0
-0
test_tipc/configs/ppocr_det_mobile_params.txt
test_tipc/configs/ppocr_det_mobile_params.txt
+0
-123
test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml
test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml
+0
-0
test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+14
-0
test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+18
-0
test_tipc/configs/ppocr_det_server/train_infer_python.txt
test_tipc/configs/ppocr_det_server/train_infer_python.txt
+51
-0
No files found.
ppocr/modeling/heads/table_att_head.py
View file @
55d71902
...
@@ -23,14 +23,22 @@ import numpy as np
...
@@ -23,14 +23,22 @@ import numpy as np
class
TableAttentionHead
(
nn
.
Layer
):
class
TableAttentionHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
hidden_size
,
loc_type
,
in_max_len
=
488
,
**
kwargs
):
def
__init__
(
self
,
in_channels
,
hidden_size
,
loc_type
,
in_max_len
=
488
,
max_text_length
=
100
,
max_elem_length
=
800
,
max_cell_num
=
500
,
**
kwargs
):
super
(
TableAttentionHead
,
self
).
__init__
()
super
(
TableAttentionHead
,
self
).
__init__
()
self
.
input_size
=
in_channels
[
-
1
]
self
.
input_size
=
in_channels
[
-
1
]
self
.
hidden_size
=
hidden_size
self
.
hidden_size
=
hidden_size
self
.
elem_num
=
30
self
.
elem_num
=
30
self
.
max_text_length
=
100
self
.
max_text_length
=
max_text_length
self
.
max_elem_length
=
500
self
.
max_elem_length
=
max_elem_length
self
.
max_cell_num
=
500
self
.
max_cell_num
=
max_cell_num
self
.
structure_attention_cell
=
AttentionGRUCell
(
self
.
structure_attention_cell
=
AttentionGRUCell
(
self
.
input_size
,
hidden_size
,
self
.
elem_num
,
use_gru
=
False
)
self
.
input_size
,
hidden_size
,
self
.
elem_num
,
use_gru
=
False
)
...
@@ -42,11 +50,11 @@ class TableAttentionHead(nn.Layer):
...
@@ -42,11 +50,11 @@ class TableAttentionHead(nn.Layer):
self
.
loc_generator
=
nn
.
Linear
(
hidden_size
,
4
)
self
.
loc_generator
=
nn
.
Linear
(
hidden_size
,
4
)
else
:
else
:
if
self
.
in_max_len
==
640
:
if
self
.
in_max_len
==
640
:
self
.
loc_fea_trans
=
nn
.
Linear
(
400
,
self
.
max_elem_length
+
1
)
self
.
loc_fea_trans
=
nn
.
Linear
(
400
,
self
.
max_elem_length
+
1
)
elif
self
.
in_max_len
==
800
:
elif
self
.
in_max_len
==
800
:
self
.
loc_fea_trans
=
nn
.
Linear
(
625
,
self
.
max_elem_length
+
1
)
self
.
loc_fea_trans
=
nn
.
Linear
(
625
,
self
.
max_elem_length
+
1
)
else
:
else
:
self
.
loc_fea_trans
=
nn
.
Linear
(
256
,
self
.
max_elem_length
+
1
)
self
.
loc_fea_trans
=
nn
.
Linear
(
256
,
self
.
max_elem_length
+
1
)
self
.
loc_generator
=
nn
.
Linear
(
self
.
input_size
+
hidden_size
,
4
)
self
.
loc_generator
=
nn
.
Linear
(
self
.
input_size
+
hidden_size
,
4
)
def
_char_to_onehot
(
self
,
input_char
,
onehot_dim
):
def
_char_to_onehot
(
self
,
input_char
,
onehot_dim
):
...
@@ -69,7 +77,7 @@ class TableAttentionHead(nn.Layer):
...
@@ -69,7 +77,7 @@ class TableAttentionHead(nn.Layer):
output_hiddens
=
[]
output_hiddens
=
[]
if
self
.
training
and
targets
is
not
None
:
if
self
.
training
and
targets
is
not
None
:
structure
=
targets
[
0
]
structure
=
targets
[
0
]
for
i
in
range
(
self
.
max_elem_length
+
1
):
for
i
in
range
(
self
.
max_elem_length
+
1
):
elem_onehots
=
self
.
_char_to_onehot
(
elem_onehots
=
self
.
_char_to_onehot
(
structure
[:,
i
],
onehot_dim
=
self
.
elem_num
)
structure
[:,
i
],
onehot_dim
=
self
.
elem_num
)
(
outputs
,
hidden
),
alpha
=
self
.
structure_attention_cell
(
(
outputs
,
hidden
),
alpha
=
self
.
structure_attention_cell
(
...
@@ -96,7 +104,7 @@ class TableAttentionHead(nn.Layer):
...
@@ -96,7 +104,7 @@ class TableAttentionHead(nn.Layer):
alpha
=
None
alpha
=
None
max_elem_length
=
paddle
.
to_tensor
(
self
.
max_elem_length
)
max_elem_length
=
paddle
.
to_tensor
(
self
.
max_elem_length
)
i
=
0
i
=
0
while
i
<
max_elem_length
+
1
:
while
i
<
max_elem_length
+
1
:
elem_onehots
=
self
.
_char_to_onehot
(
elem_onehots
=
self
.
_char_to_onehot
(
temp_elem
,
onehot_dim
=
self
.
elem_num
)
temp_elem
,
onehot_dim
=
self
.
elem_num
)
(
outputs
,
hidden
),
alpha
=
self
.
structure_attention_cell
(
(
outputs
,
hidden
),
alpha
=
self
.
structure_attention_cell
(
...
@@ -119,7 +127,7 @@ class TableAttentionHead(nn.Layer):
...
@@ -119,7 +127,7 @@ class TableAttentionHead(nn.Layer):
loc_concat
=
paddle
.
concat
([
output
,
loc_fea
],
axis
=
2
)
loc_concat
=
paddle
.
concat
([
output
,
loc_fea
],
axis
=
2
)
loc_preds
=
self
.
loc_generator
(
loc_concat
)
loc_preds
=
self
.
loc_generator
(
loc_concat
)
loc_preds
=
F
.
sigmoid
(
loc_preds
)
loc_preds
=
F
.
sigmoid
(
loc_preds
)
return
{
'structure_probs'
:
structure_probs
,
'loc_preds'
:
loc_preds
}
return
{
'structure_probs'
:
structure_probs
,
'loc_preds'
:
loc_preds
}
class
AttentionGRUCell
(
nn
.
Layer
):
class
AttentionGRUCell
(
nn
.
Layer
):
...
...
ppocr/utils/network.py
View file @
55d71902
...
@@ -24,15 +24,17 @@ from ppocr.utils.logging import get_logger
...
@@ -24,15 +24,17 @@ from ppocr.utils.logging import get_logger
def
download_with_progressbar
(
url
,
save_path
):
def
download_with_progressbar
(
url
,
save_path
):
logger
=
get_logger
()
logger
=
get_logger
()
response
=
requests
.
get
(
url
,
stream
=
True
)
response
=
requests
.
get
(
url
,
stream
=
True
)
total_size_in_bytes
=
int
(
response
.
headers
.
get
(
'content-length'
,
0
))
if
response
.
status_code
==
200
:
total_size_in_bytes
=
int
(
response
.
headers
.
get
(
'content-length'
,
1
))
block_size
=
1024
# 1 Kibibyte
block_size
=
1024
# 1 Kibibyte
progress_bar
=
tqdm
(
total
=
total_size_in_bytes
,
unit
=
'iB'
,
unit_scale
=
True
)
progress_bar
=
tqdm
(
total
=
total_size_in_bytes
,
unit
=
'iB'
,
unit_scale
=
True
)
with
open
(
save_path
,
'wb'
)
as
file
:
with
open
(
save_path
,
'wb'
)
as
file
:
for
data
in
response
.
iter_content
(
block_size
):
for
data
in
response
.
iter_content
(
block_size
):
progress_bar
.
update
(
len
(
data
))
progress_bar
.
update
(
len
(
data
))
file
.
write
(
data
)
file
.
write
(
data
)
progress_bar
.
close
()
progress_bar
.
close
()
if
total_size_in_bytes
==
0
or
progress_bar
.
n
!=
total_size_in_bytes
:
else
:
logger
.
error
(
"Something went wrong while downloading models"
)
logger
.
error
(
"Something went wrong while downloading models"
)
sys
.
exit
(
0
)
sys
.
exit
(
0
)
...
...
ppocr/utils/save_load.py
View file @
55d71902
...
@@ -25,7 +25,7 @@ import paddle
...
@@ -25,7 +25,7 @@ import paddle
from
ppocr.utils.logging
import
get_logger
from
ppocr.utils.logging
import
get_logger
__all__
=
[
'
init_model'
,
'save_model'
,
'load_dygraph_params
'
]
__all__
=
[
'
load_model
'
]
def
_mkdir_if_not_exist
(
path
,
logger
):
def
_mkdir_if_not_exist
(
path
,
logger
):
...
@@ -44,7 +44,7 @@ def _mkdir_if_not_exist(path, logger):
...
@@ -44,7 +44,7 @@ def _mkdir_if_not_exist(path, logger):
raise
OSError
(
'Failed to mkdir {}'
.
format
(
path
))
raise
OSError
(
'Failed to mkdir {}'
.
format
(
path
))
def
init
_model
(
config
,
model
,
optimizer
=
None
,
lr_scheduler
=
None
):
def
load
_model
(
config
,
model
,
optimizer
=
None
):
"""
"""
load model from checkpoint or pretrained_model
load model from checkpoint or pretrained_model
"""
"""
...
@@ -54,15 +54,14 @@ def init_model(config, model, optimizer=None, lr_scheduler=None):
...
@@ -54,15 +54,14 @@ def init_model(config, model, optimizer=None, lr_scheduler=None):
pretrained_model
=
global_config
.
get
(
'pretrained_model'
)
pretrained_model
=
global_config
.
get
(
'pretrained_model'
)
best_model_dict
=
{}
best_model_dict
=
{}
if
checkpoints
:
if
checkpoints
:
assert
os
.
path
.
exists
(
checkpoints
+
".
pdparams
"
),
\
if
checkpoints
.
endswith
(
'
pdparams
'
):
"Given dir {}.pdparams not exist."
.
format
(
checkpoints
)
checkpoints
=
checkpoints
.
replace
(
'.pdparams'
,
''
)
assert
os
.
path
.
exists
(
checkpoints
+
".pdopt"
),
\
assert
os
.
path
.
exists
(
checkpoints
+
".pdopt"
),
\
"Given dir {}.pdopt not exist."
.
format
(
checkpoints
)
f
"The
{
checkpoints
}
.pdopt does not exists!"
para_dict
=
paddle
.
load
(
checkpoints
+
'.pdparams'
)
load_pretrained_params
(
model
,
checkpoints
)
opti_dict
=
paddle
.
load
(
checkpoints
+
'.pdopt'
)
optim_dict
=
paddle
.
load
(
checkpoints
+
'.pdopt'
)
model
.
set_state_dict
(
para_dict
)
if
optimizer
is
not
None
:
if
optimizer
is
not
None
:
optimizer
.
set_state_dict
(
opti_dict
)
optimizer
.
set_state_dict
(
opti
m
_dict
)
if
os
.
path
.
exists
(
checkpoints
+
'.states'
):
if
os
.
path
.
exists
(
checkpoints
+
'.states'
):
with
open
(
checkpoints
+
'.states'
,
'rb'
)
as
f
:
with
open
(
checkpoints
+
'.states'
,
'rb'
)
as
f
:
...
@@ -73,70 +72,31 @@ def init_model(config, model, optimizer=None, lr_scheduler=None):
...
@@ -73,70 +72,31 @@ def init_model(config, model, optimizer=None, lr_scheduler=None):
best_model_dict
[
'start_epoch'
]
=
states_dict
[
'epoch'
]
+
1
best_model_dict
[
'start_epoch'
]
=
states_dict
[
'epoch'
]
+
1
logger
.
info
(
"resume from {}"
.
format
(
checkpoints
))
logger
.
info
(
"resume from {}"
.
format
(
checkpoints
))
elif
pretrained_model
:
elif
pretrained_model
:
if
not
isinstance
(
pretrained_model
,
list
):
load_pretrained_params
(
model
,
pretrained_model
)
pretrained_model
=
[
pretrained_model
]
for
pretrained
in
pretrained_model
:
if
not
(
os
.
path
.
isdir
(
pretrained
)
or
os
.
path
.
exists
(
pretrained
+
'.pdparams'
)):
raise
ValueError
(
"Model pretrain path {} does not "
"exists."
.
format
(
pretrained
))
param_state_dict
=
paddle
.
load
(
pretrained
+
'.pdparams'
)
model
.
set_state_dict
(
param_state_dict
)
logger
.
info
(
"load pretrained model from {}"
.
format
(
pretrained_model
))
else
:
else
:
logger
.
info
(
'train from scratch'
)
logger
.
info
(
'train from scratch'
)
return
best_model_dict
return
best_model_dict
def
load_dygraph_params
(
config
,
model
,
logger
,
optimizer
):
ckp
=
config
[
'Global'
][
'checkpoints'
]
if
ckp
and
os
.
path
.
exists
(
ckp
+
".pdparams"
):
pre_best_model_dict
=
init_model
(
config
,
model
,
optimizer
)
return
pre_best_model_dict
else
:
pm
=
config
[
'Global'
][
'pretrained_model'
]
if
pm
is
None
:
return
{}
if
not
os
.
path
.
exists
(
pm
)
and
not
os
.
path
.
exists
(
pm
+
".pdparams"
):
logger
.
info
(
f
"The pretrained_model
{
pm
}
does not exists!"
)
return
{}
pm
=
pm
if
pm
.
endswith
(
'.pdparams'
)
else
pm
+
'.pdparams'
params
=
paddle
.
load
(
pm
)
state_dict
=
model
.
state_dict
()
new_state_dict
=
{}
for
k1
,
k2
in
zip
(
state_dict
.
keys
(),
params
.
keys
()):
if
list
(
state_dict
[
k1
].
shape
)
==
list
(
params
[
k2
].
shape
):
new_state_dict
[
k1
]
=
params
[
k2
]
else
:
logger
.
info
(
f
"The shape of model params
{
k1
}
{
state_dict
[
k1
].
shape
}
not matched with loaded params
{
k2
}
{
params
[
k2
].
shape
}
!"
)
model
.
set_state_dict
(
new_state_dict
)
logger
.
info
(
f
"loaded pretrained_model successful from
{
pm
}
"
)
return
{}
def
load_pretrained_params
(
model
,
path
):
def
load_pretrained_params
(
model
,
path
):
if
path
is
None
:
logger
=
get_logger
()
return
False
if
path
.
endswith
(
'pdparams'
):
if
not
os
.
path
.
exists
(
path
)
and
not
os
.
path
.
exists
(
path
+
".pdparams"
):
path
=
path
.
replace
(
'.pdparams'
,
''
)
print
(
f
"The pretrained_model
{
path
}
does not exists!"
)
assert
os
.
path
.
exists
(
path
+
".pdparams"
),
\
return
False
f
"The
{
path
}
.pdparams does not exists!"
path
=
path
if
path
.
endswith
(
'.pdparams'
)
else
path
+
'.pdparams'
params
=
paddle
.
load
(
path
+
'.pdparams'
)
params
=
paddle
.
load
(
path
)
state_dict
=
model
.
state_dict
()
state_dict
=
model
.
state_dict
()
new_state_dict
=
{}
new_state_dict
=
{}
for
k1
,
k2
in
zip
(
state_dict
.
keys
(),
params
.
keys
()):
for
k1
,
k2
in
zip
(
state_dict
.
keys
(),
params
.
keys
()):
if
list
(
state_dict
[
k1
].
shape
)
==
list
(
params
[
k2
].
shape
):
if
list
(
state_dict
[
k1
].
shape
)
==
list
(
params
[
k2
].
shape
):
new_state_dict
[
k1
]
=
params
[
k2
]
new_state_dict
[
k1
]
=
params
[
k2
]
else
:
else
:
print
(
logger
.
info
(
f
"The shape of model params
{
k1
}
{
state_dict
[
k1
].
shape
}
not matched with loaded params
{
k2
}
{
params
[
k2
].
shape
}
!"
f
"The shape of model params
{
k1
}
{
state_dict
[
k1
].
shape
}
not matched with loaded params
{
k2
}
{
params
[
k2
].
shape
}
!"
)
)
model
.
set_state_dict
(
new_state_dict
)
model
.
set_state_dict
(
new_state_dict
)
print
(
f
"load pretrain successful from
{
path
}
"
)
logger
.
info
(
f
"load pretrain successful from
{
path
}
"
)
return
model
return
model
...
...
test_tipc/configs/det_mv3_db.yml
→
test_tipc/configs/
ppocr_det_mobile/
det_mv3_db.yml
View file @
55d71902
File moved
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
0 → 100644
View file @
55d71902
===========================cpp_infer_params===========================
model_name:ocr_det
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
\ No newline at end of file
test_tipc/configs/
jeston_
ppocr_det_mobile
_params
.txt
→
test_tipc/configs/ppocr_det_mobile
/model_linux_gpu_normal_normal_infer_python_jetson
.txt
View file @
55d71902
===========================
train
_params===========================
===========================
infer
_params===========================
model_name:ocr_det
model_name:ocr_det
python:python
python:python
gpu_list:null
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer
Global.use_gpu:null
Global.auto_cast:null
Global.epoch_num:null
Global.save_model_dir:null
Train.loader.batch_size_per_card:null
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:null
null:null
##
trainer:null
norm_train:null
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:null
norm_export:null
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
inference_dir:null
train_model:./inference/ch_ppocr_mobile_v2.0_det_infer
infer_export:null
infer_export:null
infer_quant:False
infer_quant:False
inference:tools/infer/predict_det.py
inference:tools/infer/predict_det.py
...
...
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
0 → 100644
View file @
55d71902
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
runtime_device:ARM_CPU
--cpu_threads:1|4
--det_batch_size:1
--rec_batch_size:1
--system_batch_size:1
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
0 → 100644
View file @
55d71902
===========================paddle2onnx_params===========================
model_name:ocr_det_mobile
python:python3.7
2onnx: paddle2onnx
--model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./inference/det_mobile_onnx/model.onnx
--opset_version:10
--enable_onnx_checker:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
\ No newline at end of file
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
0 → 100644
View file @
55d71902
===========================serving_params===========================
model_name:ocr_det_mobile
python:python3.7|cpp
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs
\ No newline at end of file
test_tipc/configs/ppocr_
sys
_mobile
_params
.txt
→
test_tipc/configs/ppocr_
det
_mobile
/train_infer_python
.txt
View file @
55d71902
===========================train_params===========================
===========================train_params===========================
model_name:ocr_
system_mobile
model_name:ocr_
det
python:python3.7
python:python3.7
gpu_list:
null
gpu_list:
0|0,1
Global.use_gpu:
null
Global.use_gpu:
True|True
Global.auto_cast:null
Global.auto_cast:null
Global.epoch_num:
null
Global.epoch_num:
lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:
null
Train.loader.batch_size_per_card:
lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null
Global.pretrained_model:null
train_model_name:
null
train_model_name:
latest
train_infer_img_dir:
null
train_infer_img_dir:
./train_data/icdar2015/text_localization/ch4_test_images/
null:null
null:null
##
##
trainer:
trainer:
norm_train|pact_train|fpgm_train
norm_train:
null
norm_train:
tools/train.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:
null
pact_train:
deploy/slim/quantization/quant.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o
fpgm_train:
null
fpgm_train:
deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
distill_train:null
distill_train:null
null:null
null:null
null:null
null:null
...
@@ -27,41 +27,25 @@ null:null
...
@@ -27,41 +27,25 @@ null:null
===========================infer_params===========================
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.save_inference_dir:./output/
Global.pretrained_model:
Global.pretrained_model:
norm_export:
null
norm_export:
tools/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o
quant_export:
null
quant_export:
deploy/slim/quantization/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o
fpgm_export:
null
fpgm_export:
deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o
distill_export:null
distill_export:null
export1:null
export1:null
export2:null
export2:null
##
inference_dir:null
infer
_model:./inference/ch_ppocr_mobile_v2.0_det_
infer/
train
_model:./inference/ch_ppocr_mobile_v2.0_det_
train/best_accuracy
infer_export:
null
infer_export:
tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False
infer_quant:False
inference:tools/infer/predict_system.py
inference:tools/infer/predict_det.py
--use_gpu:True
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null
--benchmark:True
--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr system
--use_gpu:True|False
--use_gpu:True|False
--enable_mkldnn:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1
--rec_batch_num:1
--use_tensorrt:False|True
--use_tensorrt:False|True
--precision:fp32|fp16
--precision:fp32|fp16
|int8
--det_model_dir:
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--image_dir:./inference/ch_det_data_50/all-sum-510/
--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/
null:null
--benchmark:True
--benchmark:True
null:null
\ No newline at end of file
test_tipc/configs/
mac_
ppocr_det_mobile
_params
.txt
→
test_tipc/configs/ppocr_det_mobile
/train_linux_cpu_normal_normal_infer_python_mac
.txt
View file @
55d71902
File moved
test_tipc/configs/ppocr_det_mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt
0 → 100644
View file @
55d71902
test_tipc/configs/
fleet_
ppocr_det_mobile
_params
.txt
→
test_tipc/configs/ppocr_det_mobile
/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu
.txt
View file @
55d71902
===========================train_params===========================
===========================train_params===========================
model_name:ocr_det
model_name:ocr_det
python:python3.7
python:python3.7
gpu_list:xx.xx.xx.xx,
xx.xx.xx.xx
;0,1
gpu_list:xx.xx.xx.xx,
yy.yy.yy.yy
;0,1
Global.use_gpu:True
|True
Global.use_gpu:True
Global.auto_cast:
null
|amp
Global.auto_cast:
fp32
|amp
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
...
@@ -49,62 +49,3 @@ inference:tools/infer/predict_det.py
...
@@ -49,62 +49,3 @@ inference:tools/infer/predict_det.py
null:null
null:null
--benchmark:True
--benchmark:True
null:null
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
--cpu_threads:1|4
--batch_size:1
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/configs/
amp_
ppocr_det_mobile
_params
.txt
→
test_tipc/configs/ppocr_det_mobile
/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu
.txt
View file @
55d71902
...
@@ -49,62 +49,3 @@ inference:tools/infer/predict_det.py
...
@@ -49,62 +49,3 @@ inference:tools/infer/predict_det.py
null:null
null:null
--benchmark:True
--benchmark:True
null:null
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
--cpu_threads:1|4
--batch_size:1
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/configs/
win_
ppocr_det_mobile
_param
s.txt
→
test_tipc/configs/ppocr_det_mobile
/train_linux_gpu_normal_normal_infer_python_window
s.txt
View file @
55d71902
File moved
test_tipc/configs/ppocr_det_mobile_params.txt
deleted
100644 → 0
View file @
1d03fd33
===========================train_params===========================
model_name:ocr_det
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
##
trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o
distill_export:null
export1:null
export2:null
inference_dir:null
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
--cpu_threads:1|4
--batch_size:1
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
===========================paddle2onnx_params===========================
2onnx: paddle2onnx
--model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./inference/det_mobile_onnx/model.onnx
--opset_version:10
--enable_onnx_checker:True
inference:tools/infer/predict_det.py
--use_gpu:False
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
\ No newline at end of file
test_tipc/configs/det_r50_vd_db.yml
→
test_tipc/configs/
ppocr_det_server/
det_r50_vd_db.yml
View file @
55d71902
File moved
test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
0 → 100644
View file @
55d71902
===========================paddle2onnx_params===========================
model_name:ocr_det_server
python:python3.7
2onnx: paddle2onnx
--model_dir:./inference/ch_ppocr_server_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./inference/det_server_onnx/model.onnx
--opset_version:10
--enable_onnx_checker:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--det_model_dir:
--image_dir:./inference/det_inference
\ No newline at end of file
test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
0 → 100644
View file @
55d71902
===========================serving_params===========================
model_name:ocr_det_server
python:python3.7|cpp
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_server_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs_words_en
\ No newline at end of file
test_tipc/configs/ppocr_det_server
_params
.txt
→
test_tipc/configs/ppocr_det_server
/train_infer_python
.txt
View file @
55d71902
...
@@ -13,21 +13,21 @@ train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
...
@@ -13,21 +13,21 @@ train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
null:null
##
##
trainer:norm_train|pact_train|fpgm_export
trainer:norm_train|pact_train|fpgm_export
norm_train:tools/train.py -c test
s
/configs/det_r50_vd_db.yml -o
norm_train:tools/train.py -c test
_tipc
/configs/
ppocr_det_server/
det_r50_vd_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c test
s
/configs/det_r50_vd_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c test
_tipc
/configs/
ppocr_det_server/
det_r50_vd_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c test
s
/configs/det_r50_vd_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c test
_tipc
/configs/
ppocr_det_server/
det_r50_vd_db.yml -o
distill_train:null
distill_train:null
null:null
null:null
null:null
null:null
##
##
===========================eval_params===========================
===========================eval_params===========================
eval:tools/eval.py -c test
s
/configs/det_r50_vd_db.yml -o
eval:tools/eval.py -c test
_tipc
/configs/
ppocr_det_server/
det_r50_vd_db.yml -o
null:null
null:null
##
##
===========================infer_params===========================
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.save_inference_dir:./output/
Global.pretrained_model:
Global.pretrained_model:
norm_export:tools/export_model.py -c test
s
/configs/det_r50_vd_db.yml -o
norm_export:tools/export_model.py -c test
_tipc
/configs/
ppocr_det_server/
det_r50_vd_db.yml -o
quant_export:null
quant_export:null
fpgm_export:null
fpgm_export:null
distill_export:null
distill_export:null
...
@@ -49,36 +49,3 @@ inference:tools/infer/predict_det.py
...
@@ -49,36 +49,3 @@ inference:tools/infer/predict_det.py
--save_log_path:null
--save_log_path:null
--benchmark:True
--benchmark:True
null:null
null:null
\ No newline at end of file
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_server_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det_server
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment