Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
616ad6a1
"git@developer.sourcefind.cn:wangsen/paddle_dbnet.git" did not exist on "767cb9f00d497779d97cba95ec4503383877cb4f"
Commit
616ad6a1
authored
Jun 09, 2021
by
LDOUBLEV
Browse files
fix tps and fix trt
parent
796898e0
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
9 additions
and
36 deletions
+9
-36
ppocr/modeling/transforms/tps.py
ppocr/modeling/transforms/tps.py
+4
-9
tools/infer/utility.py
tools/infer/utility.py
+5
-27
No files found.
ppocr/modeling/transforms/tps.py
View file @
616ad6a1
...
@@ -230,15 +230,10 @@ class GridGenerator(nn.Layer):
...
@@ -230,15 +230,10 @@ class GridGenerator(nn.Layer):
def
build_inv_delta_C_paddle
(
self
,
C
):
def
build_inv_delta_C_paddle
(
self
,
C
):
""" Return inv_delta_C which is needed to calculate T """
""" Return inv_delta_C which is needed to calculate T """
F
=
self
.
F
F
=
self
.
F
hat_C
=
paddle
.
zeros
((
F
,
F
),
dtype
=
'float64'
)
# F x F
hat_eye
=
paddle
.
eye
(
F
,
dtype
=
'float64'
)
# F x F
for
i
in
range
(
0
,
F
):
tmp1
=
C
.
reshape
([
1
,
F
,
2
])
for
j
in
range
(
i
,
F
):
tmp2
=
C
.
reshape
([
F
,
1
,
2
])
if
i
==
j
:
hat_C
=
paddle
.
norm
(
tmp1
-
tmp2
,
axis
=
2
)
+
hat_eye
hat_C
[
i
,
j
]
=
1
else
:
r
=
paddle
.
norm
(
C
[
i
]
-
C
[
j
])
hat_C
[
i
,
j
]
=
r
hat_C
[
j
,
i
]
=
r
hat_C
=
(
hat_C
**
2
)
*
paddle
.
log
(
hat_C
)
hat_C
=
(
hat_C
**
2
)
*
paddle
.
log
(
hat_C
)
delta_C
=
paddle
.
concat
(
# F+3 x F+3
delta_C
=
paddle
.
concat
(
# F+3 x F+3
[
[
...
...
tools/infer/utility.py
View file @
616ad6a1
...
@@ -235,12 +235,13 @@ def create_predictor(args, mode, logger):
...
@@ -235,12 +235,13 @@ def create_predictor(args, mode, logger):
config
.
enable_tensorrt_engine
(
config
.
enable_tensorrt_engine
(
precision_mode
=
inference
.
PrecisionType
.
Float32
,
precision_mode
=
inference
.
PrecisionType
.
Float32
,
max_batch_size
=
args
.
max_batch_size
,
max_batch_size
=
args
.
max_batch_size
,
min_subgraph_size
=
10
)
# skip the minmum trt subgraph
min_subgraph_size
=
3
)
# skip the minmum trt subgraph
if
mode
==
"det"
and
"mobile"
in
model_file_path
:
if
mode
==
"det"
:
min_input_shape
=
{
min_input_shape
=
{
"x"
:
[
1
,
3
,
50
,
50
],
"x"
:
[
1
,
3
,
50
,
50
],
"conv2d_92.tmp_0"
:
[
1
,
96
,
20
,
20
],
"conv2d_92.tmp_0"
:
[
1
,
96
,
20
,
20
],
"conv2d_91.tmp_0"
:
[
1
,
96
,
10
,
10
],
"conv2d_91.tmp_0"
:
[
1
,
96
,
10
,
10
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
20
,
20
],
"nearest_interp_v2_1.tmp_0"
:
[
1
,
96
,
10
,
10
],
"nearest_interp_v2_1.tmp_0"
:
[
1
,
96
,
10
,
10
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
20
,
20
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
20
,
20
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
20
,
20
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
20
,
20
],
...
@@ -253,6 +254,7 @@ def create_predictor(args, mode, logger):
...
@@ -253,6 +254,7 @@ def create_predictor(args, mode, logger):
"x"
:
[
1
,
3
,
2000
,
2000
],
"x"
:
[
1
,
3
,
2000
,
2000
],
"conv2d_92.tmp_0"
:
[
1
,
96
,
400
,
400
],
"conv2d_92.tmp_0"
:
[
1
,
96
,
400
,
400
],
"conv2d_91.tmp_0"
:
[
1
,
96
,
200
,
200
],
"conv2d_91.tmp_0"
:
[
1
,
96
,
200
,
200
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
400
,
400
],
"nearest_interp_v2_1.tmp_0"
:
[
1
,
96
,
200
,
200
],
"nearest_interp_v2_1.tmp_0"
:
[
1
,
96
,
200
,
200
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
400
,
400
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
400
,
400
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
400
,
400
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
400
,
400
],
...
@@ -265,6 +267,7 @@ def create_predictor(args, mode, logger):
...
@@ -265,6 +267,7 @@ def create_predictor(args, mode, logger):
"x"
:
[
1
,
3
,
640
,
640
],
"x"
:
[
1
,
3
,
640
,
640
],
"conv2d_92.tmp_0"
:
[
1
,
96
,
160
,
160
],
"conv2d_92.tmp_0"
:
[
1
,
96
,
160
,
160
],
"conv2d_91.tmp_0"
:
[
1
,
96
,
80
,
80
],
"conv2d_91.tmp_0"
:
[
1
,
96
,
80
,
80
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
160
,
160
],
"nearest_interp_v2_1.tmp_0"
:
[
1
,
96
,
80
,
80
],
"nearest_interp_v2_1.tmp_0"
:
[
1
,
96
,
80
,
80
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
160
,
160
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
160
,
160
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
160
,
160
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
160
,
160
],
...
@@ -273,31 +276,6 @@ def create_predictor(args, mode, logger):
...
@@ -273,31 +276,6 @@ def create_predictor(args, mode, logger):
"elementwise_add_7"
:
[
1
,
56
,
40
,
40
],
"elementwise_add_7"
:
[
1
,
56
,
40
,
40
],
"nearest_interp_v2_0.tmp_0"
:
[
1
,
96
,
40
,
40
]
"nearest_interp_v2_0.tmp_0"
:
[
1
,
96
,
40
,
40
]
}
}
if
mode
==
"det"
and
"server"
in
model_file_path
:
min_input_shape
=
{
"x"
:
[
1
,
3
,
50
,
50
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
20
,
20
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
20
,
20
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
20
,
20
],
"nearest_interp_v2_4.tmp_0"
:
[
1
,
24
,
20
,
20
],
"nearest_interp_v2_5.tmp_0"
:
[
1
,
24
,
20
,
20
]
}
max_input_shape
=
{
"x"
:
[
1
,
3
,
2000
,
2000
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
400
,
400
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
400
,
400
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
400
,
400
],
"nearest_interp_v2_4.tmp_0"
:
[
1
,
24
,
400
,
400
],
"nearest_interp_v2_5.tmp_0"
:
[
1
,
24
,
400
,
400
]
}
opt_input_shape
=
{
"x"
:
[
1
,
3
,
640
,
640
],
"conv2d_59.tmp_0"
:
[
1
,
96
,
160
,
160
],
"nearest_interp_v2_2.tmp_0"
:
[
1
,
96
,
160
,
160
],
"nearest_interp_v2_3.tmp_0"
:
[
1
,
24
,
160
,
160
],
"nearest_interp_v2_4.tmp_0"
:
[
1
,
24
,
160
,
160
],
"nearest_interp_v2_5.tmp_0"
:
[
1
,
24
,
160
,
160
]
}
elif
mode
==
"rec"
:
elif
mode
==
"rec"
:
min_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
10
]}
min_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
10
]}
max_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
2000
]}
max_input_shape
=
{
"x"
:
[
args
.
rec_batch_num
,
3
,
32
,
2000
]}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment