Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
be0da20e
Unverified
Commit
be0da20e
authored
Nov 10, 2021
by
d2623587501
Committed by
GitHub
Nov 10, 2021
Browse files
Merge branch 'PaddlePaddle:dygraph' into dygraph
parents
376319d7
a8960021
Changes
25
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
24 additions
and
14 deletions
+24
-14
test_tipc/prepare.sh
test_tipc/prepare.sh
+2
-1
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+1
-1
test_tipc/test_serving.sh
test_tipc/test_serving.sh
+1
-1
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+15
-6
tools/infer/utility.py
tools/infer/utility.py
+5
-5
No files found.
test_tipc/prepare.sh
View file @
be0da20e
...
...
@@ -87,7 +87,8 @@ elif [ ${MODE} = "whole_infer" ];then
rm
-rf
./train_data/icdar2015
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
--no-check-certificate
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
--no-check-certificate
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
--no-check-certificate
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
cd
../
elif
[
${
model_name
}
=
"ocr_server_det"
]
;
then
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar
--no-check-certificate
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
--no-check-certificate
...
...
test_tipc/test_paddle2onnx.sh
View file @
be0da20e
...
...
@@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}")
# parser params
dataline
=
$(
awk
'NR==1
11
, NR==12
3
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==1, NR==12{print}'
$FILENAME
)
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
...
...
test_tipc/test_serving.sh
View file @
be0da20e
...
...
@@ -2,7 +2,7 @@
source
test_tipc/common_func.sh
FILENAME
=
$1
dataline
=
$(
awk
'NR==
67
, NR==8
4
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==
1
, NR==
1
8{print}'
$FILENAME
)
# parser params
IFS
=
$'
\n
'
...
...
test_tipc/test_train_inference_python.sh
View file @
be0da20e
...
...
@@ -244,7 +244,7 @@ else
export
Count
=
0
USE_GPU_KEY
=(
${
train_use_gpu_value
}
)
for
gpu
in
${
gpu_list
[*]
}
;
do
use_gpu
=
${
USE_GPU_KEY
[Count]
}
train_
use_gpu
=
${
USE_GPU_KEY
[Count]
}
Count
=
$((
$Count
+
1
))
ips
=
""
if
[
${
gpu
}
=
"-1"
]
;
then
...
...
@@ -302,11 +302,20 @@ else
set_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
pretrain_model_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
train_batch_key
}
"
"
${
train_batch_value
}
"
)
set_train_params1
=
$(
func_set_params
"
${
train_param_key1
}
"
"
${
train_param_value1
}
"
)
set_use_gpu
=
$(
func_set_params
"
${
train_use_gpu_key
}
"
"
${
use_gpu
}
"
)
set_use_gpu
=
$(
func_set_params
"
${
train_use_gpu_key
}
"
"
${
train_use_gpu
}
"
)
if
[
${#
ips
}
-le
26
]
;
then
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
"
nodes
=
1
else
IFS
=
","
ips_array
=(
${
ips
}
)
IFS
=
"|"
nodes
=
${#
ips_array
[@]
}
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
"
fi
# load pretrain from norm training if current trainer is pact or fpgm trainer
if
[
${
trainer
}
=
${
pact_key
}
]
||
[
${
trainer
}
=
${
fpgm_key
}
]
;
then
if
(
[
${
trainer
}
=
${
pact_key
}
]
||
[
${
trainer
}
=
${
fpgm_key
}
])
&&
[
${
nodes
}
-le
1
]
;
then
set_pretrain
=
"
${
load_norm_train_model
}
"
fi
...
...
@@ -325,7 +334,7 @@ else
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/
${
train_model_name
}
"
)
# save norm trained models to set pretrain for pact training and fpgm training
if
[
${
trainer
}
=
${
trainer_norm
}
]
;
then
if
[
${
trainer
}
=
${
trainer_norm
}
]
&&
[
${
nodes
}
-le
1]
;
then
load_norm_train_model
=
${
set_eval_pretrain
}
fi
# run eval
...
...
tools/infer/utility.py
View file @
be0da20e
...
...
@@ -205,7 +205,7 @@ def create_predictor(args, mode, logger):
"nearest_interp_v2_0.tmp_0"
:
[
1
,
256
,
2
,
2
]
}
max_input_shape
=
{
"x"
:
[
1
,
3
,
2000
,
200
0
],
"x"
:
[
1
,
3
,
1280
,
128
0
],
"conv2d_92.tmp_0"
:
[
1
,
120
,
400
,
400
],
"conv2d_91.tmp_0"
:
[
1
,
24
,
200
,
200
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
400
,
400
],
...
...
@@ -255,16 +255,16 @@ def create_predictor(args, mode, logger):
opt_input_shape
.
update
(
opt_pact_shape
)
elif
mode
==
"rec"
:
min_input_shape
=
{
"x"
:
[
1
,
3
,
32
,
10
]}
max_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
2000
]}
max_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
1024
]}
opt_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
320
]}
elif
mode
==
"cls"
:
min_input_shape
=
{
"x"
:
[
1
,
3
,
48
,
10
]}
max_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
48
,
2000
]}
max_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
48
,
1024
]}
opt_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
48
,
320
]}
else
:
min_input_shape
=
{
"x"
:
[
1
,
3
,
10
,
10
]}
max_input_shape
=
{
"x"
:
[
1
,
3
,
1000
,
1000
]}
opt_input_shape
=
{
"x"
:
[
1
,
3
,
500
,
500
]}
max_input_shape
=
{
"x"
:
[
1
,
3
,
512
,
512
]}
opt_input_shape
=
{
"x"
:
[
1
,
3
,
256
,
256
]}
config
.
set_trt_dynamic_shape_info
(
min_input_shape
,
max_input_shape
,
opt_input_shape
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment