Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
b626aa3e
Commit
b626aa3e
authored
Aug 28, 2020
by
tink2123
Browse files
formate code
parent
91f6f243
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
10 additions
and
23 deletions
+10
-23
ppocr/data/rec/dataset_traversal.py
ppocr/data/rec/dataset_traversal.py
+1
-0
ppocr/modeling/heads/self_attention/model.py
ppocr/modeling/heads/self_attention/model.py
+9
-23
No files found.
ppocr/data/rec/dataset_traversal.py
View file @
b626aa3e
...
@@ -257,6 +257,7 @@ class SimpleReader(object):
...
@@ -257,6 +257,7 @@ class SimpleReader(object):
norm_img
=
process_image_srn
(
norm_img
=
process_image_srn
(
img
=
img
,
img
=
img
,
image_shape
=
self
.
image_shape
,
image_shape
=
self
.
image_shape
,
char_ops
=
self
.
char_ops
,
num_heads
=
self
.
num_heads
,
num_heads
=
self
.
num_heads
,
max_text_length
=
self
.
max_text_length
)
max_text_length
=
self
.
max_text_length
)
else
:
else
:
...
...
ppocr/modeling/heads/self_attention/model.py
View file @
b626aa3e
...
@@ -4,9 +4,6 @@ import numpy as np
...
@@ -4,9 +4,6 @@ import numpy as np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
# Set seed for CE
dropout_seed
=
None
encoder_data_input_fields
=
(
encoder_data_input_fields
=
(
"src_word"
,
"src_word"
,
"src_pos"
,
"src_pos"
,
...
@@ -186,10 +183,7 @@ def multi_head_attention(queries,
...
@@ -186,10 +183,7 @@ def multi_head_attention(queries,
weights
=
layers
.
softmax
(
product
)
weights
=
layers
.
softmax
(
product
)
if
dropout_rate
:
if
dropout_rate
:
weights
=
layers
.
dropout
(
weights
=
layers
.
dropout
(
weights
,
weights
,
dropout_prob
=
dropout_rate
,
seed
=
None
,
is_test
=
False
)
dropout_prob
=
dropout_rate
,
seed
=
dropout_seed
,
is_test
=
False
)
out
=
layers
.
matmul
(
weights
,
v
)
out
=
layers
.
matmul
(
weights
,
v
)
return
out
return
out
...
@@ -221,7 +215,7 @@ def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
...
@@ -221,7 +215,7 @@ def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
act
=
"relu"
)
act
=
"relu"
)
if
dropout_rate
:
if
dropout_rate
:
hidden
=
layers
.
dropout
(
hidden
=
layers
.
dropout
(
hidden
,
dropout_prob
=
dropout_rate
,
seed
=
dropout_seed
,
is_test
=
False
)
hidden
,
dropout_prob
=
dropout_rate
,
seed
=
None
,
is_test
=
False
)
out
=
layers
.
fc
(
input
=
hidden
,
size
=
d_hid
,
num_flatten_dims
=
2
)
out
=
layers
.
fc
(
input
=
hidden
,
size
=
d_hid
,
num_flatten_dims
=
2
)
return
out
return
out
...
@@ -245,10 +239,7 @@ def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
...
@@ -245,10 +239,7 @@ def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
elif
cmd
==
"d"
:
# add dropout
elif
cmd
==
"d"
:
# add dropout
if
dropout_rate
:
if
dropout_rate
:
out
=
layers
.
dropout
(
out
=
layers
.
dropout
(
out
,
out
,
dropout_prob
=
dropout_rate
,
seed
=
None
,
is_test
=
False
)
dropout_prob
=
dropout_rate
,
seed
=
dropout_seed
,
is_test
=
False
)
return
out
return
out
...
@@ -272,9 +263,8 @@ def prepare_encoder(
...
@@ -272,9 +263,8 @@ def prepare_encoder(
This module is used at the bottom of the encoder stacks.
This module is used at the bottom of the encoder stacks.
"""
"""
src_word_emb
=
src_word
# layers.concat(res,axis=1)
src_word_emb
=
src_word
src_word_emb
=
layers
.
cast
(
src_word_emb
,
'float32'
)
src_word_emb
=
layers
.
cast
(
src_word_emb
,
'float32'
)
# print("src_word_emb",src_word_emb)
src_word_emb
=
layers
.
scale
(
x
=
src_word_emb
,
scale
=
src_emb_dim
**
0.5
)
src_word_emb
=
layers
.
scale
(
x
=
src_word_emb
,
scale
=
src_emb_dim
**
0.5
)
src_pos_enc
=
layers
.
embedding
(
src_pos_enc
=
layers
.
embedding
(
...
@@ -285,7 +275,7 @@ def prepare_encoder(
...
@@ -285,7 +275,7 @@ def prepare_encoder(
src_pos_enc
.
stop_gradient
=
True
src_pos_enc
.
stop_gradient
=
True
enc_input
=
src_word_emb
+
src_pos_enc
enc_input
=
src_word_emb
+
src_pos_enc
return
layers
.
dropout
(
return
layers
.
dropout
(
enc_input
,
dropout_prob
=
dropout_rate
,
seed
=
dropout_seed
,
enc_input
,
dropout_prob
=
dropout_rate
,
seed
=
None
,
is_test
=
False
)
if
dropout_rate
else
enc_input
is_test
=
False
)
if
dropout_rate
else
enc_input
...
@@ -310,7 +300,7 @@ def prepare_decoder(src_word,
...
@@ -310,7 +300,7 @@ def prepare_decoder(src_word,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
name
=
word_emb_param_name
,
name
=
word_emb_param_name
,
initializer
=
fluid
.
initializer
.
Normal
(
0.
,
src_emb_dim
**-
0.5
)))
initializer
=
fluid
.
initializer
.
Normal
(
0.
,
src_emb_dim
**-
0.5
)))
# print("target_word_emb",src_word_emb)
src_word_emb
=
layers
.
scale
(
x
=
src_word_emb
,
scale
=
src_emb_dim
**
0.5
)
src_word_emb
=
layers
.
scale
(
x
=
src_word_emb
,
scale
=
src_emb_dim
**
0.5
)
src_pos_enc
=
layers
.
embedding
(
src_pos_enc
=
layers
.
embedding
(
src_pos
,
src_pos
,
...
@@ -320,7 +310,7 @@ def prepare_decoder(src_word,
...
@@ -320,7 +310,7 @@ def prepare_decoder(src_word,
src_pos_enc
.
stop_gradient
=
True
src_pos_enc
.
stop_gradient
=
True
enc_input
=
src_word_emb
+
src_pos_enc
enc_input
=
src_word_emb
+
src_pos_enc
return
layers
.
dropout
(
return
layers
.
dropout
(
enc_input
,
dropout_prob
=
dropout_rate
,
seed
=
dropout_seed
,
enc_input
,
dropout_prob
=
dropout_rate
,
seed
=
None
,
is_test
=
False
)
if
dropout_rate
else
enc_input
is_test
=
False
)
if
dropout_rate
else
enc_input
...
@@ -465,12 +455,8 @@ def wrap_encoder(src_vocab_size,
...
@@ -465,12 +455,8 @@ def wrap_encoder(src_vocab_size,
img, src_pos, src_slf_attn_bias = enc_inputs
img, src_pos, src_slf_attn_bias = enc_inputs
img
img
"""
"""
if
enc_inputs
is
None
:
# This is used to implement independent encoder program in inference.
src_word
,
src_pos
,
src_slf_attn_bias
=
enc_inputs
#
src_word
,
src_pos
,
src_slf_attn_bias
=
make_all_inputs
(
encoder_data_input_fields
)
else
:
src_word
,
src_pos
,
src_slf_attn_bias
=
enc_inputs
#
enc_input
=
prepare_decoder
(
enc_input
=
prepare_decoder
(
src_word
,
src_word
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment