Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
45a4aba4
Unverified
Commit
45a4aba4
authored
Oct 12, 2021
by
Double_V
Committed by
GitHub
Oct 12, 2021
Browse files
Merge branch 'dygraph' into sdmgr
parents
98162be4
033cc4cf
Changes
25
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
220 additions
and
52 deletions
+220
-52
tests/results/ppocr_det_mobile_results_fp32.txt
tests/results/ppocr_det_mobile_results_fp32.txt
+0
-0
tests/results/ppocr_det_mobile_results_fp32_cpp.txt
tests/results/ppocr_det_mobile_results_fp32_cpp.txt
+0
-0
tests/test.sh
tests/test.sh
+92
-52
tools/export_center.py
tools/export_center.py
+77
-0
tools/program.py
tools/program.py
+51
-0
No files found.
tests/results/
det
_results_
gpu_
fp32.txt
→
tests/results/
ppocr_det_mobile
_results_fp32.txt
View file @
45a4aba4
File moved
tests/results/
det_results_gpu_trt
_fp32_cpp.txt
→
tests/results/
ppocr_det_mobile_results
_fp32_cpp.txt
View file @
45a4aba4
File moved
tests/test.sh
View file @
45a4aba4
#!/bin/bash
#!/bin/bash
FILENAME
=
$1
FILENAME
=
$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer']
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer'
, 'serving_infer', 'klquant_infer'
]
MODE
=
$2
MODE
=
$2
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
dataline
=
$(
cat
${
FILENAME
}
)
dataline
=
$(
awk
'NR==67, NR==81{print}'
$FILENAME
)
elif
[
${
MODE
}
=
"serving_infer"
]
;
then
dataline
=
$(
awk
'NR==52, NR==66{print}'
$FILENAME
)
elif
[
${
MODE
}
=
"klquant_infer"
]
;
then
dataline
=
$(
awk
'NR==82, NR==98{print}'
$FILENAME
)
else
dataline
=
$(
awk
'NR==1, NR==51{print}'
$FILENAME
)
fi
# parser params
# parser params
IFS
=
$'
\n
'
IFS
=
$'
\n
'
...
@@ -144,61 +151,93 @@ benchmark_key=$(func_parser_key "${lines[49]}")
...
@@ -144,61 +151,93 @@ benchmark_key=$(func_parser_key "${lines[49]}")
benchmark_value
=
$(
func_parser_value
"
${
lines
[49]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[49]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
# parser serving
trans_model_py
=
$(
func_parser_value
"
${
lines
[67]
}
"
)
infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[68]
}
"
)
infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[68]
}
"
)
model_filename_key
=
$(
func_parser_key
"
${
lines
[69]
}
"
)
model_filename_value
=
$(
func_parser_value
"
${
lines
[69]
}
"
)
params_filename_key
=
$(
func_parser_key
"
${
lines
[70]
}
"
)
params_filename_value
=
$(
func_parser_value
"
${
lines
[70]
}
"
)
serving_server_key
=
$(
func_parser_key
"
${
lines
[71]
}
"
)
serving_server_value
=
$(
func_parser_value
"
${
lines
[71]
}
"
)
serving_client_key
=
$(
func_parser_key
"
${
lines
[72]
}
"
)
serving_client_value
=
$(
func_parser_value
"
${
lines
[72]
}
"
)
serving_dir_value
=
$(
func_parser_value
"
${
lines
[73]
}
"
)
web_service_py
=
$(
func_parser_value
"
${
lines
[74]
}
"
)
web_use_gpu_key
=
$(
func_parser_key
"
${
lines
[75]
}
"
)
web_use_gpu_list
=
$(
func_parser_value
"
${
lines
[75]
}
"
)
web_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[76]
}
"
)
web_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[76]
}
"
)
web_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[77]
}
"
)
web_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[77]
}
"
)
web_use_trt_key
=
$(
func_parser_key
"
${
lines
[78]
}
"
)
web_use_trt_list
=
$(
func_parser_value
"
${
lines
[78]
}
"
)
web_precision_key
=
$(
func_parser_key
"
${
lines
[79]
}
"
)
web_precision_list
=
$(
func_parser_value
"
${
lines
[79]
}
"
)
pipeline_py
=
$(
func_parser_value
"
${
lines
[80]
}
"
)
# parser serving
if
[
${
MODE
}
=
"klquant_infer"
]
;
then
# parser inference model
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
infer_export_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
infer_is_quant
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
# parser inference
inference_py
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
use_gpu_key
=
$(
func_parser_key
"
${
lines
[5]
}
"
)
use_gpu_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
cpu_threads_key
=
$(
func_parser_key
"
${
lines
[7]
}
"
)
cpu_threads_list
=
$(
func_parser_value
"
${
lines
[7]
}
"
)
batch_size_key
=
$(
func_parser_key
"
${
lines
[8]
}
"
)
batch_size_list
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
use_trt_key
=
$(
func_parser_key
"
${
lines
[9]
}
"
)
use_trt_list
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
precision_key
=
$(
func_parser_key
"
${
lines
[10]
}
"
)
precision_list
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
infer_model_key
=
$(
func_parser_key
"
${
lines
[11]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[12]
}
"
)
infer_img_dir
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
save_log_key
=
$(
func_parser_key
"
${
lines
[13]
}
"
)
benchmark_key
=
$(
func_parser_key
"
${
lines
[14]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
fi
# parser serving
if
[
${
MODE
}
=
"server_infer"
]
;
then
trans_model_py
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[2]
}
"
)
infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
model_filename_key
=
$(
func_parser_key
"
${
lines
[3]
}
"
)
model_filename_value
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
params_filename_key
=
$(
func_parser_key
"
${
lines
[4]
}
"
)
params_filename_value
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
serving_server_key
=
$(
func_parser_key
"
${
lines
[5]
}
"
)
serving_server_value
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
serving_client_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
serving_client_value
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
serving_dir_value
=
$(
func_parser_value
"
${
lines
[7]
}
"
)
web_service_py
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
web_use_gpu_key
=
$(
func_parser_key
"
${
lines
[9]
}
"
)
web_use_gpu_list
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
web_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[10]
}
"
)
web_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
web_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[11]
}
"
)
web_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
web_use_trt_key
=
$(
func_parser_key
"
${
lines
[12]
}
"
)
web_use_trt_list
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
web_precision_key
=
$(
func_parser_key
"
${
lines
[13]
}
"
)
web_precision_list
=
$(
func_parser_value
"
${
lines
[13]
}
"
)
pipeline_py
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
fi
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
# parser cpp inference model
# parser cpp inference model
cpp_infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[
53
]
}
"
)
cpp_infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[
1
]
}
"
)
cpp_infer_is_quant
=
$(
func_parser_value
"
${
lines
[
54
]
}
"
)
cpp_infer_is_quant
=
$(
func_parser_value
"
${
lines
[
2
]
}
"
)
# parser cpp inference
# parser cpp inference
inference_cmd
=
$(
func_parser_value
"
${
lines
[
55
]
}
"
)
inference_cmd
=
$(
func_parser_value
"
${
lines
[
3
]
}
"
)
cpp_use_gpu_key
=
$(
func_parser_key
"
${
lines
[
56
]
}
"
)
cpp_use_gpu_key
=
$(
func_parser_key
"
${
lines
[
4
]
}
"
)
cpp_use_gpu_list
=
$(
func_parser_value
"
${
lines
[
56
]
}
"
)
cpp_use_gpu_list
=
$(
func_parser_value
"
${
lines
[
4
]
}
"
)
cpp_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[5
7
]
}
"
)
cpp_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[5]
}
"
)
cpp_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[5
7
]
}
"
)
cpp_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
cpp_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[
58
]
}
"
)
cpp_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[
6
]
}
"
)
cpp_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[
58
]
}
"
)
cpp_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[
6
]
}
"
)
cpp_batch_size_key
=
$(
func_parser_key
"
${
lines
[
59
]
}
"
)
cpp_batch_size_key
=
$(
func_parser_key
"
${
lines
[
7
]
}
"
)
cpp_batch_size_list
=
$(
func_parser_value
"
${
lines
[
59
]
}
"
)
cpp_batch_size_list
=
$(
func_parser_value
"
${
lines
[
7
]
}
"
)
cpp_use_trt_key
=
$(
func_parser_key
"
${
lines
[
60
]
}
"
)
cpp_use_trt_key
=
$(
func_parser_key
"
${
lines
[
8
]
}
"
)
cpp_use_trt_list
=
$(
func_parser_value
"
${
lines
[
60
]
}
"
)
cpp_use_trt_list
=
$(
func_parser_value
"
${
lines
[
8
]
}
"
)
cpp_precision_key
=
$(
func_parser_key
"
${
lines
[
61
]
}
"
)
cpp_precision_key
=
$(
func_parser_key
"
${
lines
[
9
]
}
"
)
cpp_precision_list
=
$(
func_parser_value
"
${
lines
[
61
]
}
"
)
cpp_precision_list
=
$(
func_parser_value
"
${
lines
[
9
]
}
"
)
cpp_infer_model_key
=
$(
func_parser_key
"
${
lines
[
62
]
}
"
)
cpp_infer_model_key
=
$(
func_parser_key
"
${
lines
[
10
]
}
"
)
cpp_image_dir_key
=
$(
func_parser_key
"
${
lines
[
63
]
}
"
)
cpp_image_dir_key
=
$(
func_parser_key
"
${
lines
[
11
]
}
"
)
cpp_infer_img_dir
=
$(
func_parser_value
"
${
lines
[
63
]
}
"
)
cpp_infer_img_dir
=
$(
func_parser_value
"
${
lines
[
12
]
}
"
)
cpp_infer_key1
=
$(
func_parser_key
"
${
lines
[
64
]
}
"
)
cpp_infer_key1
=
$(
func_parser_key
"
${
lines
[
13
]
}
"
)
cpp_infer_value1
=
$(
func_parser_value
"
${
lines
[
64
]
}
"
)
cpp_infer_value1
=
$(
func_parser_value
"
${
lines
[
13
]
}
"
)
cpp_benchmark_key
=
$(
func_parser_key
"
${
lines
[
65
]
}
"
)
cpp_benchmark_key
=
$(
func_parser_key
"
${
lines
[
14
]
}
"
)
cpp_benchmark_value
=
$(
func_parser_value
"
${
lines
[
65
]
}
"
)
cpp_benchmark_value
=
$(
func_parser_value
"
${
lines
[
14
]
}
"
)
fi
fi
LOG_PATH
=
"./tests/output"
LOG_PATH
=
"./tests/output"
mkdir
-p
${
LOG_PATH
}
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results.log"
status_log
=
"
${
LOG_PATH
}
/results.log"
...
@@ -414,7 +453,7 @@ function func_cpp_inference(){
...
@@ -414,7 +453,7 @@ function func_cpp_inference(){
done
done
}
}
if
[
${
MODE
}
=
"infer"
]
;
then
if
[
${
MODE
}
=
"infer"
]
||
[
${
MODE
}
=
"klquant_infer"
]
;
then
GPUID
=
$3
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
if
[
${#
GPUID
}
-le
0
]
;
then
env
=
" "
env
=
" "
...
@@ -447,7 +486,6 @@ if [ ${MODE} = "infer" ]; then
...
@@ -447,7 +486,6 @@ if [ ${MODE} = "infer" ]; then
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
Count
=
$((
$Count
+
1
))
Count
=
$((
$Count
+
1
))
done
done
elif
[
${
MODE
}
=
"cpp_infer"
]
;
then
elif
[
${
MODE
}
=
"cpp_infer"
]
;
then
GPUID
=
$3
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
if
[
${#
GPUID
}
-le
0
]
;
then
...
@@ -481,6 +519,8 @@ elif [ ${MODE} = "serving_infer" ]; then
...
@@ -481,6 +519,8 @@ elif [ ${MODE} = "serving_infer" ]; then
#run serving
#run serving
func_serving
"
${
web_service_cmd
}
"
func_serving
"
${
web_service_cmd
}
"
else
else
IFS
=
"|"
IFS
=
"|"
export
Count
=
0
export
Count
=
0
...
...
tools/export_center.py
0 → 100644
View file @
45a4aba4
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
os
import
sys
import
pickle
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'..'
)))
from
ppocr.data
import
build_dataloader
from
ppocr.modeling.architectures
import
build_model
from
ppocr.postprocess
import
build_post_process
from
ppocr.utils.save_load
import
init_model
,
load_dygraph_params
from
ppocr.utils.utility
import
print_dict
import
tools.program
as
program
def
main
():
global_config
=
config
[
'Global'
]
# build dataloader
config
[
'Eval'
][
'dataset'
][
'name'
]
=
config
[
'Train'
][
'dataset'
][
'name'
]
config
[
'Eval'
][
'dataset'
][
'data_dir'
]
=
config
[
'Train'
][
'dataset'
][
'data_dir'
]
config
[
'Eval'
][
'dataset'
][
'label_file_list'
]
=
config
[
'Train'
][
'dataset'
][
'label_file_list'
]
eval_dataloader
=
build_dataloader
(
config
,
'Eval'
,
device
,
logger
)
# build post process
post_process_class
=
build_post_process
(
config
[
'PostProcess'
],
global_config
)
# build model
# for rec algorithm
if
hasattr
(
post_process_class
,
'character'
):
char_num
=
len
(
getattr
(
post_process_class
,
'character'
))
config
[
'Architecture'
][
"Head"
][
'out_channels'
]
=
char_num
#set return_features = True
config
[
'Architecture'
][
"Head"
][
"return_feats"
]
=
True
model
=
build_model
(
config
[
'Architecture'
])
best_model_dict
=
load_dygraph_params
(
config
,
model
,
logger
,
None
)
if
len
(
best_model_dict
):
logger
.
info
(
'metric in ckpt ***************'
)
for
k
,
v
in
best_model_dict
.
items
():
logger
.
info
(
'{}:{}'
.
format
(
k
,
v
))
# get features from train data
char_center
=
program
.
get_center
(
model
,
eval_dataloader
,
post_process_class
)
#serialize to disk
with
open
(
"train_center.pkl"
,
'wb'
)
as
f
:
pickle
.
dump
(
char_center
,
f
)
return
if
__name__
==
'__main__'
:
config
,
device
,
logger
,
vdl_writer
=
program
.
preprocess
()
main
()
tools/program.py
View file @
45a4aba4
...
@@ -404,6 +404,57 @@ def eval(model,
...
@@ -404,6 +404,57 @@ def eval(model,
return
metric
return
metric
def
update_center
(
char_center
,
post_result
,
preds
):
result
,
label
=
post_result
feats
,
logits
=
preds
logits
=
paddle
.
argmax
(
logits
,
axis
=-
1
)
feats
=
feats
.
numpy
()
logits
=
logits
.
numpy
()
for
idx_sample
in
range
(
len
(
label
)):
if
result
[
idx_sample
][
0
]
==
label
[
idx_sample
][
0
]:
feat
=
feats
[
idx_sample
]
logit
=
logits
[
idx_sample
]
for
idx_time
in
range
(
len
(
logit
)):
index
=
logit
[
idx_time
]
if
index
in
char_center
.
keys
():
char_center
[
index
][
0
]
=
(
char_center
[
index
][
0
]
*
char_center
[
index
][
1
]
+
feat
[
idx_time
])
/
(
char_center
[
index
][
1
]
+
1
)
char_center
[
index
][
1
]
+=
1
else
:
char_center
[
index
]
=
[
feat
[
idx_time
],
1
]
return
char_center
def
get_center
(
model
,
eval_dataloader
,
post_process_class
):
pbar
=
tqdm
(
total
=
len
(
eval_dataloader
),
desc
=
'get center:'
)
max_iter
=
len
(
eval_dataloader
)
-
1
if
platform
.
system
(
)
==
"Windows"
else
len
(
eval_dataloader
)
char_center
=
dict
()
for
idx
,
batch
in
enumerate
(
eval_dataloader
):
if
idx
>=
max_iter
:
break
images
=
batch
[
0
]
start
=
time
.
time
()
preds
=
model
(
images
)
batch
=
[
item
.
numpy
()
for
item
in
batch
]
# Obtain usable results from post-processing methods
total_time
+=
time
.
time
()
-
start
# Evaluate the results of the current batch
post_result
=
post_process_class
(
preds
,
batch
[
1
])
#update char_center
char_center
=
update_center
(
char_center
,
post_result
,
preds
)
pbar
.
update
(
1
)
pbar
.
close
()
for
key
in
char_center
.
keys
():
char_center
[
key
]
=
char_center
[
key
][
0
]
return
char_center
def
preprocess
(
is_train
=
False
):
def
preprocess
(
is_train
=
False
):
FLAGS
=
ArgsParser
().
parse_args
()
FLAGS
=
ArgsParser
().
parse_args
()
profiler_options
=
FLAGS
.
profiler_options
profiler_options
=
FLAGS
.
profiler_options
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment