Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
PaddleOCR_paddle_onnxruntime
Commits
f1506916
Commit
f1506916
authored
May 18, 2023
by
sugon_cxj
Browse files
first commit
parent
55c28ed5
Pipeline
#266
canceled with stages
Changes
432
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2598 additions
and
0 deletions
+2598
-0
ppocr/modeling/heads/rec_att_head.py
ppocr/modeling/heads/rec_att_head.py
+202
-0
ppocr/modeling/heads/rec_ctc_head.py
ppocr/modeling/heads/rec_ctc_head.py
+87
-0
ppocr/modeling/heads/rec_multi_head.py
ppocr/modeling/heads/rec_multi_head.py
+73
-0
ppocr/modeling/heads/rec_nrtr_head.py
ppocr/modeling/heads/rec_nrtr_head.py
+826
-0
ppocr/modeling/heads/rec_pren_head.py
ppocr/modeling/heads/rec_pren_head.py
+34
-0
ppocr/modeling/heads/rec_sar_head.py
ppocr/modeling/heads/rec_sar_head.py
+410
-0
ppocr/modeling/heads/rec_srn_head.py
ppocr/modeling/heads/rec_srn_head.py
+278
-0
ppocr/modeling/heads/self_attention.py
ppocr/modeling/heads/self_attention.py
+405
-0
ppocr/modeling/heads/table_att_head.py
ppocr/modeling/heads/table_att_head.py
+246
-0
ppocr/modeling/necks/__init__.py
ppocr/modeling/necks/__init__.py
+37
-0
ppocr/modeling/necks/__pycache__/__init__.cpython-37.pyc
ppocr/modeling/necks/__pycache__/__init__.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/db_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/db_fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/east_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/east_fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/fce_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/fce_fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/pg_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/pg_fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/pren_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/pren_fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/rnn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/rnn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/sast_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/sast_fpn.cpython-37.pyc
+0
-0
ppocr/modeling/necks/__pycache__/table_fpn.cpython-37.pyc
ppocr/modeling/necks/__pycache__/table_fpn.cpython-37.pyc
+0
-0
No files found.
ppocr/modeling/heads/rec_att_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
import
numpy
as
np
class
AttentionHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
hidden_size
,
**
kwargs
):
super
(
AttentionHead
,
self
).
__init__
()
self
.
input_size
=
in_channels
self
.
hidden_size
=
hidden_size
self
.
num_classes
=
out_channels
self
.
attention_cell
=
AttentionGRUCell
(
in_channels
,
hidden_size
,
out_channels
,
use_gru
=
False
)
self
.
generator
=
nn
.
Linear
(
hidden_size
,
out_channels
)
def
_char_to_onehot
(
self
,
input_char
,
onehot_dim
):
input_ont_hot
=
F
.
one_hot
(
input_char
,
onehot_dim
)
return
input_ont_hot
def
forward
(
self
,
inputs
,
targets
=
None
,
batch_max_length
=
25
):
batch_size
=
paddle
.
shape
(
inputs
)[
0
]
num_steps
=
batch_max_length
hidden
=
paddle
.
zeros
((
batch_size
,
self
.
hidden_size
))
output_hiddens
=
[]
if
targets
is
not
None
:
for
i
in
range
(
num_steps
):
char_onehots
=
self
.
_char_to_onehot
(
targets
[:,
i
],
onehot_dim
=
self
.
num_classes
)
(
outputs
,
hidden
),
alpha
=
self
.
attention_cell
(
hidden
,
inputs
,
char_onehots
)
output_hiddens
.
append
(
paddle
.
unsqueeze
(
outputs
,
axis
=
1
))
output
=
paddle
.
concat
(
output_hiddens
,
axis
=
1
)
probs
=
self
.
generator
(
output
)
else
:
targets
=
paddle
.
zeros
(
shape
=
[
batch_size
],
dtype
=
"int32"
)
probs
=
None
char_onehots
=
None
outputs
=
None
alpha
=
None
for
i
in
range
(
num_steps
):
char_onehots
=
self
.
_char_to_onehot
(
targets
,
onehot_dim
=
self
.
num_classes
)
(
outputs
,
hidden
),
alpha
=
self
.
attention_cell
(
hidden
,
inputs
,
char_onehots
)
probs_step
=
self
.
generator
(
outputs
)
if
probs
is
None
:
probs
=
paddle
.
unsqueeze
(
probs_step
,
axis
=
1
)
else
:
probs
=
paddle
.
concat
(
[
probs
,
paddle
.
unsqueeze
(
probs_step
,
axis
=
1
)],
axis
=
1
)
next_input
=
probs_step
.
argmax
(
axis
=
1
)
targets
=
next_input
if
not
self
.
training
:
probs
=
paddle
.
nn
.
functional
.
softmax
(
probs
,
axis
=
2
)
return
probs
class
AttentionGRUCell
(
nn
.
Layer
):
def
__init__
(
self
,
input_size
,
hidden_size
,
num_embeddings
,
use_gru
=
False
):
super
(
AttentionGRUCell
,
self
).
__init__
()
self
.
i2h
=
nn
.
Linear
(
input_size
,
hidden_size
,
bias_attr
=
False
)
self
.
h2h
=
nn
.
Linear
(
hidden_size
,
hidden_size
)
self
.
score
=
nn
.
Linear
(
hidden_size
,
1
,
bias_attr
=
False
)
self
.
rnn
=
nn
.
GRUCell
(
input_size
=
input_size
+
num_embeddings
,
hidden_size
=
hidden_size
)
self
.
hidden_size
=
hidden_size
def
forward
(
self
,
prev_hidden
,
batch_H
,
char_onehots
):
batch_H_proj
=
self
.
i2h
(
batch_H
)
prev_hidden_proj
=
paddle
.
unsqueeze
(
self
.
h2h
(
prev_hidden
),
axis
=
1
)
res
=
paddle
.
add
(
batch_H_proj
,
prev_hidden_proj
)
res
=
paddle
.
tanh
(
res
)
e
=
self
.
score
(
res
)
alpha
=
F
.
softmax
(
e
,
axis
=
1
)
alpha
=
paddle
.
transpose
(
alpha
,
[
0
,
2
,
1
])
context
=
paddle
.
squeeze
(
paddle
.
mm
(
alpha
,
batch_H
),
axis
=
1
)
concat_context
=
paddle
.
concat
([
context
,
char_onehots
],
1
)
cur_hidden
=
self
.
rnn
(
concat_context
,
prev_hidden
)
return
cur_hidden
,
alpha
class
AttentionLSTM
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
hidden_size
,
**
kwargs
):
super
(
AttentionLSTM
,
self
).
__init__
()
self
.
input_size
=
in_channels
self
.
hidden_size
=
hidden_size
self
.
num_classes
=
out_channels
self
.
attention_cell
=
AttentionLSTMCell
(
in_channels
,
hidden_size
,
out_channels
,
use_gru
=
False
)
self
.
generator
=
nn
.
Linear
(
hidden_size
,
out_channels
)
def
_char_to_onehot
(
self
,
input_char
,
onehot_dim
):
input_ont_hot
=
F
.
one_hot
(
input_char
,
onehot_dim
)
return
input_ont_hot
def
forward
(
self
,
inputs
,
targets
=
None
,
batch_max_length
=
25
):
batch_size
=
inputs
.
shape
[
0
]
num_steps
=
batch_max_length
hidden
=
(
paddle
.
zeros
((
batch_size
,
self
.
hidden_size
)),
paddle
.
zeros
(
(
batch_size
,
self
.
hidden_size
)))
output_hiddens
=
[]
if
targets
is
not
None
:
for
i
in
range
(
num_steps
):
# one-hot vectors for a i-th char
char_onehots
=
self
.
_char_to_onehot
(
targets
[:,
i
],
onehot_dim
=
self
.
num_classes
)
hidden
,
alpha
=
self
.
attention_cell
(
hidden
,
inputs
,
char_onehots
)
hidden
=
(
hidden
[
1
][
0
],
hidden
[
1
][
1
])
output_hiddens
.
append
(
paddle
.
unsqueeze
(
hidden
[
0
],
axis
=
1
))
output
=
paddle
.
concat
(
output_hiddens
,
axis
=
1
)
probs
=
self
.
generator
(
output
)
else
:
targets
=
paddle
.
zeros
(
shape
=
[
batch_size
],
dtype
=
"int32"
)
probs
=
None
for
i
in
range
(
num_steps
):
char_onehots
=
self
.
_char_to_onehot
(
targets
,
onehot_dim
=
self
.
num_classes
)
hidden
,
alpha
=
self
.
attention_cell
(
hidden
,
inputs
,
char_onehots
)
probs_step
=
self
.
generator
(
hidden
[
0
])
hidden
=
(
hidden
[
1
][
0
],
hidden
[
1
][
1
])
if
probs
is
None
:
probs
=
paddle
.
unsqueeze
(
probs_step
,
axis
=
1
)
else
:
probs
=
paddle
.
concat
(
[
probs
,
paddle
.
unsqueeze
(
probs_step
,
axis
=
1
)],
axis
=
1
)
next_input
=
probs_step
.
argmax
(
axis
=
1
)
targets
=
next_input
return
probs
class
AttentionLSTMCell
(
nn
.
Layer
):
def
__init__
(
self
,
input_size
,
hidden_size
,
num_embeddings
,
use_gru
=
False
):
super
(
AttentionLSTMCell
,
self
).
__init__
()
self
.
i2h
=
nn
.
Linear
(
input_size
,
hidden_size
,
bias_attr
=
False
)
self
.
h2h
=
nn
.
Linear
(
hidden_size
,
hidden_size
)
self
.
score
=
nn
.
Linear
(
hidden_size
,
1
,
bias_attr
=
False
)
if
not
use_gru
:
self
.
rnn
=
nn
.
LSTMCell
(
input_size
=
input_size
+
num_embeddings
,
hidden_size
=
hidden_size
)
else
:
self
.
rnn
=
nn
.
GRUCell
(
input_size
=
input_size
+
num_embeddings
,
hidden_size
=
hidden_size
)
self
.
hidden_size
=
hidden_size
def
forward
(
self
,
prev_hidden
,
batch_H
,
char_onehots
):
batch_H_proj
=
self
.
i2h
(
batch_H
)
prev_hidden_proj
=
paddle
.
unsqueeze
(
self
.
h2h
(
prev_hidden
[
0
]),
axis
=
1
)
res
=
paddle
.
add
(
batch_H_proj
,
prev_hidden_proj
)
res
=
paddle
.
tanh
(
res
)
e
=
self
.
score
(
res
)
alpha
=
F
.
softmax
(
e
,
axis
=
1
)
alpha
=
paddle
.
transpose
(
alpha
,
[
0
,
2
,
1
])
context
=
paddle
.
squeeze
(
paddle
.
mm
(
alpha
,
batch_H
),
axis
=
1
)
concat_context
=
paddle
.
concat
([
context
,
char_onehots
],
1
)
cur_hidden
=
self
.
rnn
(
concat_context
,
prev_hidden
)
return
cur_hidden
,
alpha
ppocr/modeling/heads/rec_ctc_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
from
paddle
import
ParamAttr
,
nn
from
paddle.nn
import
functional
as
F
def
get_para_bias_attr
(
l2_decay
,
k
):
regularizer
=
paddle
.
regularizer
.
L2Decay
(
l2_decay
)
stdv
=
1.0
/
math
.
sqrt
(
k
*
1.0
)
initializer
=
nn
.
initializer
.
Uniform
(
-
stdv
,
stdv
)
weight_attr
=
ParamAttr
(
regularizer
=
regularizer
,
initializer
=
initializer
)
bias_attr
=
ParamAttr
(
regularizer
=
regularizer
,
initializer
=
initializer
)
return
[
weight_attr
,
bias_attr
]
class
CTCHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
fc_decay
=
0.0004
,
mid_channels
=
None
,
return_feats
=
False
,
**
kwargs
):
super
(
CTCHead
,
self
).
__init__
()
if
mid_channels
is
None
:
weight_attr
,
bias_attr
=
get_para_bias_attr
(
l2_decay
=
fc_decay
,
k
=
in_channels
)
self
.
fc
=
nn
.
Linear
(
in_channels
,
out_channels
,
weight_attr
=
weight_attr
,
bias_attr
=
bias_attr
)
else
:
weight_attr1
,
bias_attr1
=
get_para_bias_attr
(
l2_decay
=
fc_decay
,
k
=
in_channels
)
self
.
fc1
=
nn
.
Linear
(
in_channels
,
mid_channels
,
weight_attr
=
weight_attr1
,
bias_attr
=
bias_attr1
)
weight_attr2
,
bias_attr2
=
get_para_bias_attr
(
l2_decay
=
fc_decay
,
k
=
mid_channels
)
self
.
fc2
=
nn
.
Linear
(
mid_channels
,
out_channels
,
weight_attr
=
weight_attr2
,
bias_attr
=
bias_attr2
)
self
.
out_channels
=
out_channels
self
.
mid_channels
=
mid_channels
self
.
return_feats
=
return_feats
def
forward
(
self
,
x
,
targets
=
None
):
if
self
.
mid_channels
is
None
:
predicts
=
self
.
fc
(
x
)
else
:
x
=
self
.
fc1
(
x
)
predicts
=
self
.
fc2
(
x
)
if
self
.
return_feats
:
result
=
(
x
,
predicts
)
else
:
result
=
predicts
if
not
self
.
training
:
predicts
=
F
.
softmax
(
predicts
,
axis
=
2
)
result
=
predicts
return
result
ppocr/modeling/heads/rec_multi_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
ppocr.modeling.necks.rnn
import
Im2Seq
,
EncoderWithRNN
,
EncoderWithFC
,
SequenceEncoder
,
EncoderWithSVTR
from
.rec_ctc_head
import
CTCHead
from
.rec_sar_head
import
SARHead
class
MultiHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels_list
,
**
kwargs
):
super
().
__init__
()
self
.
head_list
=
kwargs
.
pop
(
'head_list'
)
self
.
gtc_head
=
'sar'
assert
len
(
self
.
head_list
)
>=
2
for
idx
,
head_name
in
enumerate
(
self
.
head_list
):
name
=
list
(
head_name
)[
0
]
if
name
==
'SARHead'
:
# sar head
sar_args
=
self
.
head_list
[
idx
][
name
]
self
.
sar_head
=
eval
(
name
)(
in_channels
=
in_channels
,
\
out_channels
=
out_channels_list
[
'SARLabelDecode'
],
**
sar_args
)
elif
name
==
'CTCHead'
:
# ctc neck
self
.
encoder_reshape
=
Im2Seq
(
in_channels
)
neck_args
=
self
.
head_list
[
idx
][
name
][
'Neck'
]
encoder_type
=
neck_args
.
pop
(
'name'
)
self
.
encoder
=
encoder_type
self
.
ctc_encoder
=
SequenceEncoder
(
in_channels
=
in_channels
,
\
encoder_type
=
encoder_type
,
**
neck_args
)
# ctc head
head_args
=
self
.
head_list
[
idx
][
name
][
'Head'
]
self
.
ctc_head
=
eval
(
name
)(
in_channels
=
self
.
ctc_encoder
.
out_channels
,
\
out_channels
=
out_channels_list
[
'CTCLabelDecode'
],
**
head_args
)
else
:
raise
NotImplementedError
(
'{} is not supported in MultiHead yet'
.
format
(
name
))
def
forward
(
self
,
x
,
targets
=
None
):
ctc_encoder
=
self
.
ctc_encoder
(
x
)
ctc_out
=
self
.
ctc_head
(
ctc_encoder
,
targets
)
head_out
=
dict
()
head_out
[
'ctc'
]
=
ctc_out
head_out
[
'ctc_neck'
]
=
ctc_encoder
# eval mode
if
not
self
.
training
:
return
ctc_out
if
self
.
gtc_head
==
'sar'
:
sar_out
=
self
.
sar_head
(
x
,
targets
[
1
:])
head_out
[
'sar'
]
=
sar_out
return
head_out
else
:
return
head_out
ppocr/modeling/heads/rec_nrtr_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
paddle
import
copy
from
paddle
import
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
LayerList
from
paddle.nn.initializer
import
XavierNormal
as
xavier_uniform_
from
paddle.nn
import
Dropout
,
Linear
,
LayerNorm
,
Conv2D
import
numpy
as
np
from
ppocr.modeling.heads.multiheadAttention
import
MultiheadAttention
from
paddle.nn.initializer
import
Constant
as
constant_
from
paddle.nn.initializer
import
XavierNormal
as
xavier_normal_
zeros_
=
constant_
(
value
=
0.
)
ones_
=
constant_
(
value
=
1.
)
class
Transformer
(
nn
.
Layer
):
"""A transformer model. User is able to modify the attributes as needed. The architechture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
Processing Systems, pages 6000-6010.
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
nhead: the number of heads in the multiheadattention models (default=8).
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
"""
def
__init__
(
self
,
d_model
=
512
,
nhead
=
8
,
num_encoder_layers
=
6
,
beam_size
=
0
,
num_decoder_layers
=
6
,
dim_feedforward
=
1024
,
attention_dropout_rate
=
0.0
,
residual_dropout_rate
=
0.1
,
custom_encoder
=
None
,
custom_decoder
=
None
,
in_channels
=
0
,
out_channels
=
0
,
scale_embedding
=
True
):
super
(
Transformer
,
self
).
__init__
()
self
.
out_channels
=
out_channels
+
1
self
.
embedding
=
Embeddings
(
d_model
=
d_model
,
vocab
=
self
.
out_channels
,
padding_idx
=
0
,
scale_embedding
=
scale_embedding
)
self
.
positional_encoding
=
PositionalEncoding
(
dropout
=
residual_dropout_rate
,
dim
=
d_model
,
)
if
custom_encoder
is
not
None
:
self
.
encoder
=
custom_encoder
else
:
if
num_encoder_layers
>
0
:
encoder_layer
=
TransformerEncoderLayer
(
d_model
,
nhead
,
dim_feedforward
,
attention_dropout_rate
,
residual_dropout_rate
)
self
.
encoder
=
TransformerEncoder
(
encoder_layer
,
num_encoder_layers
)
else
:
self
.
encoder
=
None
if
custom_decoder
is
not
None
:
self
.
decoder
=
custom_decoder
else
:
decoder_layer
=
TransformerDecoderLayer
(
d_model
,
nhead
,
dim_feedforward
,
attention_dropout_rate
,
residual_dropout_rate
)
self
.
decoder
=
TransformerDecoder
(
decoder_layer
,
num_decoder_layers
)
self
.
_reset_parameters
()
self
.
beam_size
=
beam_size
self
.
d_model
=
d_model
self
.
nhead
=
nhead
self
.
tgt_word_prj
=
nn
.
Linear
(
d_model
,
self
.
out_channels
,
bias_attr
=
False
)
w0
=
np
.
random
.
normal
(
0.0
,
d_model
**-
0.5
,
(
d_model
,
self
.
out_channels
)).
astype
(
np
.
float32
)
self
.
tgt_word_prj
.
weight
.
set_value
(
w0
)
self
.
apply
(
self
.
_init_weights
)
def
_init_weights
(
self
,
m
):
if
isinstance
(
m
,
nn
.
Conv2D
):
xavier_normal_
(
m
.
weight
)
if
m
.
bias
is
not
None
:
zeros_
(
m
.
bias
)
def
forward_train
(
self
,
src
,
tgt
):
tgt
=
tgt
[:,
:
-
1
]
tgt_key_padding_mask
=
self
.
generate_padding_mask
(
tgt
)
tgt
=
self
.
embedding
(
tgt
).
transpose
([
1
,
0
,
2
])
tgt
=
self
.
positional_encoding
(
tgt
)
tgt_mask
=
self
.
generate_square_subsequent_mask
(
tgt
.
shape
[
0
])
if
self
.
encoder
is
not
None
:
src
=
self
.
positional_encoding
(
src
.
transpose
([
1
,
0
,
2
]))
memory
=
self
.
encoder
(
src
)
else
:
memory
=
src
.
squeeze
(
2
).
transpose
([
2
,
0
,
1
])
output
=
self
.
decoder
(
tgt
,
memory
,
tgt_mask
=
tgt_mask
,
memory_mask
=
None
,
tgt_key_padding_mask
=
tgt_key_padding_mask
,
memory_key_padding_mask
=
None
)
output
=
output
.
transpose
([
1
,
0
,
2
])
logit
=
self
.
tgt_word_prj
(
output
)
return
logit
def
forward
(
self
,
src
,
targets
=
None
):
"""Take in and process masked source/target sequences.
Args:
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
Shape:
- src: :math:`(S, N, E)`.
- tgt: :math:`(T, N, E)`.
Examples:
>>> output = transformer_model(src, tgt)
"""
if
self
.
training
:
max_len
=
targets
[
1
].
max
()
tgt
=
targets
[
0
][:,
:
2
+
max_len
]
return
self
.
forward_train
(
src
,
tgt
)
else
:
if
self
.
beam_size
>
0
:
return
self
.
forward_beam
(
src
)
else
:
return
self
.
forward_test
(
src
)
def
forward_test
(
self
,
src
):
bs
=
paddle
.
shape
(
src
)[
0
]
if
self
.
encoder
is
not
None
:
src
=
self
.
positional_encoding
(
paddle
.
transpose
(
src
,
[
1
,
0
,
2
]))
memory
=
self
.
encoder
(
src
)
else
:
memory
=
paddle
.
transpose
(
paddle
.
squeeze
(
src
,
2
),
[
2
,
0
,
1
])
dec_seq
=
paddle
.
full
((
bs
,
1
),
2
,
dtype
=
paddle
.
int64
)
dec_prob
=
paddle
.
full
((
bs
,
1
),
1.
,
dtype
=
paddle
.
float32
)
for
len_dec_seq
in
range
(
1
,
25
):
dec_seq_embed
=
paddle
.
transpose
(
self
.
embedding
(
dec_seq
),
[
1
,
0
,
2
])
dec_seq_embed
=
self
.
positional_encoding
(
dec_seq_embed
)
tgt_mask
=
self
.
generate_square_subsequent_mask
(
paddle
.
shape
(
dec_seq_embed
)[
0
])
output
=
self
.
decoder
(
dec_seq_embed
,
memory
,
tgt_mask
=
tgt_mask
,
memory_mask
=
None
,
tgt_key_padding_mask
=
None
,
memory_key_padding_mask
=
None
)
dec_output
=
paddle
.
transpose
(
output
,
[
1
,
0
,
2
])
dec_output
=
dec_output
[:,
-
1
,
:]
word_prob
=
F
.
softmax
(
self
.
tgt_word_prj
(
dec_output
),
axis
=
1
)
preds_idx
=
paddle
.
argmax
(
word_prob
,
axis
=
1
)
if
paddle
.
equal_all
(
preds_idx
,
paddle
.
full
(
paddle
.
shape
(
preds_idx
),
3
,
dtype
=
'int64'
)):
break
preds_prob
=
paddle
.
max
(
word_prob
,
axis
=
1
)
dec_seq
=
paddle
.
concat
(
[
dec_seq
,
paddle
.
reshape
(
preds_idx
,
[
-
1
,
1
])],
axis
=
1
)
dec_prob
=
paddle
.
concat
(
[
dec_prob
,
paddle
.
reshape
(
preds_prob
,
[
-
1
,
1
])],
axis
=
1
)
return
[
dec_seq
,
dec_prob
]
def
forward_beam
(
self
,
images
):
''' Translation work in one batch '''
def
get_inst_idx_to_tensor_position_map
(
inst_idx_list
):
''' Indicate the position of an instance in a tensor. '''
return
{
inst_idx
:
tensor_position
for
tensor_position
,
inst_idx
in
enumerate
(
inst_idx_list
)
}
def
collect_active_part
(
beamed_tensor
,
curr_active_inst_idx
,
n_prev_active_inst
,
n_bm
):
''' Collect tensor parts associated to active instances. '''
beamed_tensor_shape
=
paddle
.
shape
(
beamed_tensor
)
n_curr_active_inst
=
len
(
curr_active_inst_idx
)
new_shape
=
(
n_curr_active_inst
*
n_bm
,
beamed_tensor_shape
[
1
],
beamed_tensor_shape
[
2
])
beamed_tensor
=
beamed_tensor
.
reshape
([
n_prev_active_inst
,
-
1
])
beamed_tensor
=
beamed_tensor
.
index_select
(
curr_active_inst_idx
,
axis
=
0
)
beamed_tensor
=
beamed_tensor
.
reshape
(
new_shape
)
return
beamed_tensor
def
collate_active_info
(
src_enc
,
inst_idx_to_position_map
,
active_inst_idx_list
):
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst
=
len
(
inst_idx_to_position_map
)
active_inst_idx
=
[
inst_idx_to_position_map
[
k
]
for
k
in
active_inst_idx_list
]
active_inst_idx
=
paddle
.
to_tensor
(
active_inst_idx
,
dtype
=
'int64'
)
active_src_enc
=
collect_active_part
(
src_enc
.
transpose
([
1
,
0
,
2
]),
active_inst_idx
,
n_prev_active_inst
,
n_bm
).
transpose
([
1
,
0
,
2
])
active_inst_idx_to_position_map
=
get_inst_idx_to_tensor_position_map
(
active_inst_idx_list
)
return
active_src_enc
,
active_inst_idx_to_position_map
def
beam_decode_step
(
inst_dec_beams
,
len_dec_seq
,
enc_output
,
inst_idx_to_position_map
,
n_bm
,
memory_key_padding_mask
):
''' Decode and update beam status, and then return active beam idx '''
def
prepare_beam_dec_seq
(
inst_dec_beams
,
len_dec_seq
):
dec_partial_seq
=
[
b
.
get_current_state
()
for
b
in
inst_dec_beams
if
not
b
.
done
]
dec_partial_seq
=
paddle
.
stack
(
dec_partial_seq
)
dec_partial_seq
=
dec_partial_seq
.
reshape
([
-
1
,
len_dec_seq
])
return
dec_partial_seq
def
predict_word
(
dec_seq
,
enc_output
,
n_active_inst
,
n_bm
,
memory_key_padding_mask
):
dec_seq
=
paddle
.
transpose
(
self
.
embedding
(
dec_seq
),
[
1
,
0
,
2
])
dec_seq
=
self
.
positional_encoding
(
dec_seq
)
tgt_mask
=
self
.
generate_square_subsequent_mask
(
paddle
.
shape
(
dec_seq
)[
0
])
dec_output
=
self
.
decoder
(
dec_seq
,
enc_output
,
tgt_mask
=
tgt_mask
,
tgt_key_padding_mask
=
None
,
memory_key_padding_mask
=
memory_key_padding_mask
,
)
dec_output
=
paddle
.
transpose
(
dec_output
,
[
1
,
0
,
2
])
dec_output
=
dec_output
[:,
-
1
,
:]
# Pick the last step: (bh * bm) * d_h
word_prob
=
F
.
softmax
(
self
.
tgt_word_prj
(
dec_output
),
axis
=
1
)
word_prob
=
paddle
.
reshape
(
word_prob
,
[
n_active_inst
,
n_bm
,
-
1
])
return
word_prob
def
collect_active_inst_idx_list
(
inst_beams
,
word_prob
,
inst_idx_to_position_map
):
active_inst_idx_list
=
[]
for
inst_idx
,
inst_position
in
inst_idx_to_position_map
.
items
():
is_inst_complete
=
inst_beams
[
inst_idx
].
advance
(
word_prob
[
inst_position
])
if
not
is_inst_complete
:
active_inst_idx_list
+=
[
inst_idx
]
return
active_inst_idx_list
n_active_inst
=
len
(
inst_idx_to_position_map
)
dec_seq
=
prepare_beam_dec_seq
(
inst_dec_beams
,
len_dec_seq
)
word_prob
=
predict_word
(
dec_seq
,
enc_output
,
n_active_inst
,
n_bm
,
None
)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list
=
collect_active_inst_idx_list
(
inst_dec_beams
,
word_prob
,
inst_idx_to_position_map
)
return
active_inst_idx_list
def
collect_hypothesis_and_scores
(
inst_dec_beams
,
n_best
):
all_hyp
,
all_scores
=
[],
[]
for
inst_idx
in
range
(
len
(
inst_dec_beams
)):
scores
,
tail_idxs
=
inst_dec_beams
[
inst_idx
].
sort_scores
()
all_scores
+=
[
scores
[:
n_best
]]
hyps
=
[
inst_dec_beams
[
inst_idx
].
get_hypothesis
(
i
)
for
i
in
tail_idxs
[:
n_best
]
]
all_hyp
+=
[
hyps
]
return
all_hyp
,
all_scores
with
paddle
.
no_grad
():
#-- Encode
if
self
.
encoder
is
not
None
:
src
=
self
.
positional_encoding
(
images
.
transpose
([
1
,
0
,
2
]))
src_enc
=
self
.
encoder
(
src
)
else
:
src_enc
=
images
.
squeeze
(
2
).
transpose
([
0
,
2
,
1
])
n_bm
=
self
.
beam_size
src_shape
=
paddle
.
shape
(
src_enc
)
inst_dec_beams
=
[
Beam
(
n_bm
)
for
_
in
range
(
1
)]
active_inst_idx_list
=
list
(
range
(
1
))
# Repeat data for beam search
src_enc
=
paddle
.
tile
(
src_enc
,
[
1
,
n_bm
,
1
])
inst_idx_to_position_map
=
get_inst_idx_to_tensor_position_map
(
active_inst_idx_list
)
# Decode
for
len_dec_seq
in
range
(
1
,
25
):
src_enc_copy
=
src_enc
.
clone
()
active_inst_idx_list
=
beam_decode_step
(
inst_dec_beams
,
len_dec_seq
,
src_enc_copy
,
inst_idx_to_position_map
,
n_bm
,
None
)
if
not
active_inst_idx_list
:
break
# all instances have finished their path to <EOS>
src_enc
,
inst_idx_to_position_map
=
collate_active_info
(
src_enc_copy
,
inst_idx_to_position_map
,
active_inst_idx_list
)
batch_hyp
,
batch_scores
=
collect_hypothesis_and_scores
(
inst_dec_beams
,
1
)
result_hyp
=
[]
hyp_scores
=
[]
for
bs_hyp
,
score
in
zip
(
batch_hyp
,
batch_scores
):
l
=
len
(
bs_hyp
[
0
])
bs_hyp_pad
=
bs_hyp
[
0
]
+
[
3
]
*
(
25
-
l
)
result_hyp
.
append
(
bs_hyp_pad
)
score
=
float
(
score
)
/
l
hyp_score
=
[
score
for
_
in
range
(
25
)]
hyp_scores
.
append
(
hyp_score
)
return
[
paddle
.
to_tensor
(
np
.
array
(
result_hyp
),
dtype
=
paddle
.
int64
),
paddle
.
to_tensor
(
hyp_scores
)
]
def
generate_square_subsequent_mask
(
self
,
sz
):
"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask
=
paddle
.
zeros
([
sz
,
sz
],
dtype
=
'float32'
)
mask_inf
=
paddle
.
triu
(
paddle
.
full
(
shape
=
[
sz
,
sz
],
dtype
=
'float32'
,
fill_value
=
'-inf'
),
diagonal
=
1
)
mask
=
mask
+
mask_inf
return
mask
def
generate_padding_mask
(
self
,
x
):
padding_mask
=
paddle
.
equal
(
x
,
paddle
.
to_tensor
(
0
,
dtype
=
x
.
dtype
))
return
padding_mask
def
_reset_parameters
(
self
):
"""Initiate parameters in the transformer model."""
for
p
in
self
.
parameters
():
if
p
.
dim
()
>
1
:
xavier_uniform_
(
p
)
class
TransformerEncoder
(
nn
.
Layer
):
"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
"""
def
__init__
(
self
,
encoder_layer
,
num_layers
):
super
(
TransformerEncoder
,
self
).
__init__
()
self
.
layers
=
_get_clones
(
encoder_layer
,
num_layers
)
self
.
num_layers
=
num_layers
def
forward
(
self
,
src
):
"""Pass the input through the endocder layers in turn.
Args:
src: the sequnce to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
"""
output
=
src
for
i
in
range
(
self
.
num_layers
):
output
=
self
.
layers
[
i
](
output
,
src_mask
=
None
,
src_key_padding_mask
=
None
)
return
output
class
TransformerDecoder
(
nn
.
Layer
):
"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
"""
def
__init__
(
self
,
decoder_layer
,
num_layers
):
super
(
TransformerDecoder
,
self
).
__init__
()
self
.
layers
=
_get_clones
(
decoder_layer
,
num_layers
)
self
.
num_layers
=
num_layers
def
forward
(
self
,
tgt
,
memory
,
tgt_mask
=
None
,
memory_mask
=
None
,
tgt_key_padding_mask
=
None
,
memory_key_padding_mask
=
None
):
"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
"""
output
=
tgt
for
i
in
range
(
self
.
num_layers
):
output
=
self
.
layers
[
i
](
output
,
memory
,
tgt_mask
=
tgt_mask
,
memory_mask
=
memory_mask
,
tgt_key_padding_mask
=
tgt_key_padding_mask
,
memory_key_padding_mask
=
memory_key_padding_mask
)
return
output
class
TransformerEncoderLayer
(
nn
.
Layer
):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
"""
def
__init__
(
self
,
d_model
,
nhead
,
dim_feedforward
=
2048
,
attention_dropout_rate
=
0.0
,
residual_dropout_rate
=
0.1
):
super
(
TransformerEncoderLayer
,
self
).
__init__
()
self
.
self_attn
=
MultiheadAttention
(
d_model
,
nhead
,
dropout
=
attention_dropout_rate
)
self
.
conv1
=
Conv2D
(
in_channels
=
d_model
,
out_channels
=
dim_feedforward
,
kernel_size
=
(
1
,
1
))
self
.
conv2
=
Conv2D
(
in_channels
=
dim_feedforward
,
out_channels
=
d_model
,
kernel_size
=
(
1
,
1
))
self
.
norm1
=
LayerNorm
(
d_model
)
self
.
norm2
=
LayerNorm
(
d_model
)
self
.
dropout1
=
Dropout
(
residual_dropout_rate
)
self
.
dropout2
=
Dropout
(
residual_dropout_rate
)
def
forward
(
self
,
src
,
src_mask
=
None
,
src_key_padding_mask
=
None
):
"""Pass the input through the endocder layer.
Args:
src: the sequnce to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
"""
src2
=
self
.
self_attn
(
src
,
src
,
src
,
attn_mask
=
src_mask
,
key_padding_mask
=
src_key_padding_mask
)
src
=
src
+
self
.
dropout1
(
src2
)
src
=
self
.
norm1
(
src
)
src
=
paddle
.
transpose
(
src
,
[
1
,
2
,
0
])
src
=
paddle
.
unsqueeze
(
src
,
2
)
src2
=
self
.
conv2
(
F
.
relu
(
self
.
conv1
(
src
)))
src2
=
paddle
.
squeeze
(
src2
,
2
)
src2
=
paddle
.
transpose
(
src2
,
[
2
,
0
,
1
])
src
=
paddle
.
squeeze
(
src
,
2
)
src
=
paddle
.
transpose
(
src
,
[
2
,
0
,
1
])
src
=
src
+
self
.
dropout2
(
src2
)
src
=
self
.
norm2
(
src
)
return
src
class
TransformerDecoderLayer
(
nn
.
Layer
):
"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
"""
def
__init__
(
self
,
d_model
,
nhead
,
dim_feedforward
=
2048
,
attention_dropout_rate
=
0.0
,
residual_dropout_rate
=
0.1
):
super
(
TransformerDecoderLayer
,
self
).
__init__
()
self
.
self_attn
=
MultiheadAttention
(
d_model
,
nhead
,
dropout
=
attention_dropout_rate
)
self
.
multihead_attn
=
MultiheadAttention
(
d_model
,
nhead
,
dropout
=
attention_dropout_rate
)
self
.
conv1
=
Conv2D
(
in_channels
=
d_model
,
out_channels
=
dim_feedforward
,
kernel_size
=
(
1
,
1
))
self
.
conv2
=
Conv2D
(
in_channels
=
dim_feedforward
,
out_channels
=
d_model
,
kernel_size
=
(
1
,
1
))
self
.
norm1
=
LayerNorm
(
d_model
)
self
.
norm2
=
LayerNorm
(
d_model
)
self
.
norm3
=
LayerNorm
(
d_model
)
self
.
dropout1
=
Dropout
(
residual_dropout_rate
)
self
.
dropout2
=
Dropout
(
residual_dropout_rate
)
self
.
dropout3
=
Dropout
(
residual_dropout_rate
)
def
forward
(
self
,
tgt
,
memory
,
tgt_mask
=
None
,
memory_mask
=
None
,
tgt_key_padding_mask
=
None
,
memory_key_padding_mask
=
None
):
"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
"""
tgt2
=
self
.
self_attn
(
tgt
,
tgt
,
tgt
,
attn_mask
=
tgt_mask
,
key_padding_mask
=
tgt_key_padding_mask
)
tgt
=
tgt
+
self
.
dropout1
(
tgt2
)
tgt
=
self
.
norm1
(
tgt
)
tgt2
=
self
.
multihead_attn
(
tgt
,
memory
,
memory
,
attn_mask
=
memory_mask
,
key_padding_mask
=
memory_key_padding_mask
)
tgt
=
tgt
+
self
.
dropout2
(
tgt2
)
tgt
=
self
.
norm2
(
tgt
)
# default
tgt
=
paddle
.
transpose
(
tgt
,
[
1
,
2
,
0
])
tgt
=
paddle
.
unsqueeze
(
tgt
,
2
)
tgt2
=
self
.
conv2
(
F
.
relu
(
self
.
conv1
(
tgt
)))
tgt2
=
paddle
.
squeeze
(
tgt2
,
2
)
tgt2
=
paddle
.
transpose
(
tgt2
,
[
2
,
0
,
1
])
tgt
=
paddle
.
squeeze
(
tgt
,
2
)
tgt
=
paddle
.
transpose
(
tgt
,
[
2
,
0
,
1
])
tgt
=
tgt
+
self
.
dropout3
(
tgt2
)
tgt
=
self
.
norm3
(
tgt
)
return
tgt
def
_get_clones
(
module
,
N
):
return
LayerList
([
copy
.
deepcopy
(
module
)
for
i
in
range
(
N
)])
class
PositionalEncoding
(
nn
.
Layer
):
"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\t
ext{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\t
ext{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\t
ext{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def
__init__
(
self
,
dropout
,
dim
,
max_len
=
5000
):
super
(
PositionalEncoding
,
self
).
__init__
()
self
.
dropout
=
nn
.
Dropout
(
p
=
dropout
)
pe
=
paddle
.
zeros
([
max_len
,
dim
])
position
=
paddle
.
arange
(
0
,
max_len
,
dtype
=
paddle
.
float32
).
unsqueeze
(
1
)
div_term
=
paddle
.
exp
(
paddle
.
arange
(
0
,
dim
,
2
).
astype
(
'float32'
)
*
(
-
math
.
log
(
10000.0
)
/
dim
))
pe
[:,
0
::
2
]
=
paddle
.
sin
(
position
*
div_term
)
pe
[:,
1
::
2
]
=
paddle
.
cos
(
position
*
div_term
)
pe
=
paddle
.
unsqueeze
(
pe
,
0
)
pe
=
paddle
.
transpose
(
pe
,
[
1
,
0
,
2
])
self
.
register_buffer
(
'pe'
,
pe
)
def
forward
(
self
,
x
):
"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x
=
x
+
self
.
pe
[:
paddle
.
shape
(
x
)[
0
],
:]
return
self
.
dropout
(
x
)
class
PositionalEncoding_2d
(
nn
.
Layer
):
"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\t
ext{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\t
ext{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\t
ext{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def
__init__
(
self
,
dropout
,
dim
,
max_len
=
5000
):
super
(
PositionalEncoding_2d
,
self
).
__init__
()
self
.
dropout
=
nn
.
Dropout
(
p
=
dropout
)
pe
=
paddle
.
zeros
([
max_len
,
dim
])
position
=
paddle
.
arange
(
0
,
max_len
,
dtype
=
paddle
.
float32
).
unsqueeze
(
1
)
div_term
=
paddle
.
exp
(
paddle
.
arange
(
0
,
dim
,
2
).
astype
(
'float32'
)
*
(
-
math
.
log
(
10000.0
)
/
dim
))
pe
[:,
0
::
2
]
=
paddle
.
sin
(
position
*
div_term
)
pe
[:,
1
::
2
]
=
paddle
.
cos
(
position
*
div_term
)
pe
=
paddle
.
transpose
(
paddle
.
unsqueeze
(
pe
,
0
),
[
1
,
0
,
2
])
self
.
register_buffer
(
'pe'
,
pe
)
self
.
avg_pool_1
=
nn
.
AdaptiveAvgPool2D
((
1
,
1
))
self
.
linear1
=
nn
.
Linear
(
dim
,
dim
)
self
.
linear1
.
weight
.
data
.
fill_
(
1.
)
self
.
avg_pool_2
=
nn
.
AdaptiveAvgPool2D
((
1
,
1
))
self
.
linear2
=
nn
.
Linear
(
dim
,
dim
)
self
.
linear2
.
weight
.
data
.
fill_
(
1.
)
def
forward
(
self
,
x
):
"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
w_pe
=
self
.
pe
[:
paddle
.
shape
(
x
)[
-
1
],
:]
w1
=
self
.
linear1
(
self
.
avg_pool_1
(
x
).
squeeze
()).
unsqueeze
(
0
)
w_pe
=
w_pe
*
w1
w_pe
=
paddle
.
transpose
(
w_pe
,
[
1
,
2
,
0
])
w_pe
=
paddle
.
unsqueeze
(
w_pe
,
2
)
h_pe
=
self
.
pe
[:
paddle
.
shape
(
x
).
shape
[
-
2
],
:]
w2
=
self
.
linear2
(
self
.
avg_pool_2
(
x
).
squeeze
()).
unsqueeze
(
0
)
h_pe
=
h_pe
*
w2
h_pe
=
paddle
.
transpose
(
h_pe
,
[
1
,
2
,
0
])
h_pe
=
paddle
.
unsqueeze
(
h_pe
,
3
)
x
=
x
+
w_pe
+
h_pe
x
=
paddle
.
transpose
(
paddle
.
reshape
(
x
,
[
x
.
shape
[
0
],
x
.
shape
[
1
],
x
.
shape
[
2
]
*
x
.
shape
[
3
]]),
[
2
,
0
,
1
])
return
self
.
dropout
(
x
)
class
Embeddings
(
nn
.
Layer
):
def
__init__
(
self
,
d_model
,
vocab
,
padding_idx
,
scale_embedding
):
super
(
Embeddings
,
self
).
__init__
()
self
.
embedding
=
nn
.
Embedding
(
vocab
,
d_model
,
padding_idx
=
padding_idx
)
w0
=
np
.
random
.
normal
(
0.0
,
d_model
**-
0.5
,
(
vocab
,
d_model
)).
astype
(
np
.
float32
)
self
.
embedding
.
weight
.
set_value
(
w0
)
self
.
d_model
=
d_model
self
.
scale_embedding
=
scale_embedding
def
forward
(
self
,
x
):
if
self
.
scale_embedding
:
x
=
self
.
embedding
(
x
)
return
x
*
math
.
sqrt
(
self
.
d_model
)
return
self
.
embedding
(
x
)
class
Beam
():
''' Beam search '''
def
__init__
(
self
,
size
,
device
=
False
):
self
.
size
=
size
self
.
_done
=
False
# The score for each translation on the beam.
self
.
scores
=
paddle
.
zeros
((
size
,
),
dtype
=
paddle
.
float32
)
self
.
all_scores
=
[]
# The backpointers at each time-step.
self
.
prev_ks
=
[]
# The outputs at each time-step.
self
.
next_ys
=
[
paddle
.
full
((
size
,
),
0
,
dtype
=
paddle
.
int64
)]
self
.
next_ys
[
0
][
0
]
=
2
def
get_current_state
(
self
):
"Get the outputs for the current timestep."
return
self
.
get_tentative_hypothesis
()
def
get_current_origin
(
self
):
"Get the backpointers for the current timestep."
return
self
.
prev_ks
[
-
1
]
@
property
def
done
(
self
):
return
self
.
_done
def
advance
(
self
,
word_prob
):
"Update beam status and check if finished or not."
num_words
=
word_prob
.
shape
[
1
]
# Sum the previous scores.
if
len
(
self
.
prev_ks
)
>
0
:
beam_lk
=
word_prob
+
self
.
scores
.
unsqueeze
(
1
).
expand_as
(
word_prob
)
else
:
beam_lk
=
word_prob
[
0
]
flat_beam_lk
=
beam_lk
.
reshape
([
-
1
])
best_scores
,
best_scores_id
=
flat_beam_lk
.
topk
(
self
.
size
,
0
,
True
,
True
)
# 1st sort
self
.
all_scores
.
append
(
self
.
scores
)
self
.
scores
=
best_scores
# bestScoresId is flattened as a (beam x word) array,
# so we need to calculate which word and beam each score came from
prev_k
=
best_scores_id
//
num_words
self
.
prev_ks
.
append
(
prev_k
)
self
.
next_ys
.
append
(
best_scores_id
-
prev_k
*
num_words
)
# End condition is when top-of-beam is EOS.
if
self
.
next_ys
[
-
1
][
0
]
==
3
:
self
.
_done
=
True
self
.
all_scores
.
append
(
self
.
scores
)
return
self
.
_done
def
sort_scores
(
self
):
"Sort the scores."
return
self
.
scores
,
paddle
.
to_tensor
(
[
i
for
i
in
range
(
int
(
self
.
scores
.
shape
[
0
]))],
dtype
=
'int32'
)
def
get_the_best_score_and_idx
(
self
):
"Get the score of the best in the beam."
scores
,
ids
=
self
.
sort_scores
()
return
scores
[
1
],
ids
[
1
]
def
get_tentative_hypothesis
(
self
):
"Get the decoded sequence for the current timestep."
if
len
(
self
.
next_ys
)
==
1
:
dec_seq
=
self
.
next_ys
[
0
].
unsqueeze
(
1
)
else
:
_
,
keys
=
self
.
sort_scores
()
hyps
=
[
self
.
get_hypothesis
(
k
)
for
k
in
keys
]
hyps
=
[[
2
]
+
h
for
h
in
hyps
]
dec_seq
=
paddle
.
to_tensor
(
hyps
,
dtype
=
'int64'
)
return
dec_seq
def
get_hypothesis
(
self
,
k
):
""" Walk back to construct the full hypothesis. """
hyp
=
[]
for
j
in
range
(
len
(
self
.
prev_ks
)
-
1
,
-
1
,
-
1
):
hyp
.
append
(
self
.
next_ys
[
j
+
1
][
k
])
k
=
self
.
prev_ks
[
j
][
k
]
return
list
(
map
(
lambda
x
:
x
.
item
(),
hyp
[::
-
1
]))
ppocr/modeling/heads/rec_pren_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
paddle
import
nn
from
paddle.nn
import
functional
as
F
class
PRENHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
**
kwargs
):
super
(
PRENHead
,
self
).
__init__
()
self
.
linear
=
nn
.
Linear
(
in_channels
,
out_channels
)
def
forward
(
self
,
x
,
targets
=
None
):
predicts
=
self
.
linear
(
x
)
if
not
self
.
training
:
predicts
=
F
.
softmax
(
predicts
,
axis
=
2
)
return
predicts
ppocr/modeling/heads/rec_sar_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/encoders/sar_encoder.py
https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/decoders/sar_decoder.py
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
class
SAREncoder
(
nn
.
Layer
):
"""
Args:
enc_bi_rnn (bool): If True, use bidirectional RNN in encoder.
enc_drop_rnn (float): Dropout probability of RNN layer in encoder.
enc_gru (bool): If True, use GRU, else LSTM in encoder.
d_model (int): Dim of channels from backbone.
d_enc (int): Dim of encoder RNN layer.
mask (bool): If True, mask padding in RNN sequence.
"""
def
__init__
(
self
,
enc_bi_rnn
=
False
,
enc_drop_rnn
=
0.1
,
enc_gru
=
False
,
d_model
=
512
,
d_enc
=
512
,
mask
=
True
,
**
kwargs
):
super
().
__init__
()
assert
isinstance
(
enc_bi_rnn
,
bool
)
assert
isinstance
(
enc_drop_rnn
,
(
int
,
float
))
assert
0
<=
enc_drop_rnn
<
1.0
assert
isinstance
(
enc_gru
,
bool
)
assert
isinstance
(
d_model
,
int
)
assert
isinstance
(
d_enc
,
int
)
assert
isinstance
(
mask
,
bool
)
self
.
enc_bi_rnn
=
enc_bi_rnn
self
.
enc_drop_rnn
=
enc_drop_rnn
self
.
mask
=
mask
# LSTM Encoder
if
enc_bi_rnn
:
direction
=
'bidirectional'
else
:
direction
=
'forward'
kwargs
=
dict
(
input_size
=
d_model
,
hidden_size
=
d_enc
,
num_layers
=
2
,
time_major
=
False
,
dropout
=
enc_drop_rnn
,
direction
=
direction
)
if
enc_gru
:
self
.
rnn_encoder
=
nn
.
GRU
(
**
kwargs
)
else
:
self
.
rnn_encoder
=
nn
.
LSTM
(
**
kwargs
)
# global feature transformation
encoder_rnn_out_size
=
d_enc
*
(
int
(
enc_bi_rnn
)
+
1
)
self
.
linear
=
nn
.
Linear
(
encoder_rnn_out_size
,
encoder_rnn_out_size
)
def
forward
(
self
,
feat
,
img_metas
=
None
):
if
img_metas
is
not
None
:
assert
len
(
img_metas
[
0
])
==
paddle
.
shape
(
feat
)[
0
]
valid_ratios
=
None
if
img_metas
is
not
None
and
self
.
mask
:
valid_ratios
=
img_metas
[
-
1
]
h_feat
=
feat
.
shape
[
2
]
# bsz c h w
feat_v
=
F
.
max_pool2d
(
feat
,
kernel_size
=
(
h_feat
,
1
),
stride
=
1
,
padding
=
0
)
feat_v
=
feat_v
.
squeeze
(
2
)
# bsz * C * W
feat_v
=
paddle
.
transpose
(
feat_v
,
perm
=
[
0
,
2
,
1
])
# bsz * W * C
holistic_feat
=
self
.
rnn_encoder
(
feat_v
)[
0
]
# bsz * T * C
if
valid_ratios
is
not
None
:
valid_hf
=
[]
T
=
paddle
.
shape
(
holistic_feat
)[
1
]
for
i
in
range
(
paddle
.
shape
(
valid_ratios
)[
0
]):
valid_step
=
paddle
.
minimum
(
T
,
paddle
.
ceil
(
valid_ratios
[
i
]
*
T
).
astype
(
'int32'
))
-
1
valid_hf
.
append
(
holistic_feat
[
i
,
valid_step
,
:])
valid_hf
=
paddle
.
stack
(
valid_hf
,
axis
=
0
)
else
:
valid_hf
=
holistic_feat
[:,
-
1
,
:]
# bsz * C
holistic_feat
=
self
.
linear
(
valid_hf
)
# bsz * C
return
holistic_feat
class
BaseDecoder
(
nn
.
Layer
):
def
__init__
(
self
,
**
kwargs
):
super
().
__init__
()
def
forward_train
(
self
,
feat
,
out_enc
,
targets
,
img_metas
):
raise
NotImplementedError
def
forward_test
(
self
,
feat
,
out_enc
,
img_metas
):
raise
NotImplementedError
def
forward
(
self
,
feat
,
out_enc
,
label
=
None
,
img_metas
=
None
,
train_mode
=
True
):
self
.
train_mode
=
train_mode
if
train_mode
:
return
self
.
forward_train
(
feat
,
out_enc
,
label
,
img_metas
)
return
self
.
forward_test
(
feat
,
out_enc
,
img_metas
)
class
ParallelSARDecoder
(
BaseDecoder
):
"""
Args:
out_channels (int): Output class number.
enc_bi_rnn (bool): If True, use bidirectional RNN in encoder.
dec_bi_rnn (bool): If True, use bidirectional RNN in decoder.
dec_drop_rnn (float): Dropout of RNN layer in decoder.
dec_gru (bool): If True, use GRU, else LSTM in decoder.
d_model (int): Dim of channels from backbone.
d_enc (int): Dim of encoder RNN layer.
d_k (int): Dim of channels of attention module.
pred_dropout (float): Dropout probability of prediction layer.
max_seq_len (int): Maximum sequence length for decoding.
mask (bool): If True, mask padding in feature map.
start_idx (int): Index of start token.
padding_idx (int): Index of padding token.
pred_concat (bool): If True, concat glimpse feature from
attention with holistic feature and hidden state.
"""
def
__init__
(
self
,
out_channels
,
# 90 + unknown + start + padding
enc_bi_rnn
=
False
,
dec_bi_rnn
=
False
,
dec_drop_rnn
=
0.0
,
dec_gru
=
False
,
d_model
=
512
,
d_enc
=
512
,
d_k
=
64
,
pred_dropout
=
0.1
,
max_text_length
=
30
,
mask
=
True
,
pred_concat
=
True
,
**
kwargs
):
super
().
__init__
()
self
.
num_classes
=
out_channels
self
.
enc_bi_rnn
=
enc_bi_rnn
self
.
d_k
=
d_k
self
.
start_idx
=
out_channels
-
2
self
.
padding_idx
=
out_channels
-
1
self
.
max_seq_len
=
max_text_length
self
.
mask
=
mask
self
.
pred_concat
=
pred_concat
encoder_rnn_out_size
=
d_enc
*
(
int
(
enc_bi_rnn
)
+
1
)
decoder_rnn_out_size
=
encoder_rnn_out_size
*
(
int
(
dec_bi_rnn
)
+
1
)
# 2D attention layer
self
.
conv1x1_1
=
nn
.
Linear
(
decoder_rnn_out_size
,
d_k
)
self
.
conv3x3_1
=
nn
.
Conv2D
(
d_model
,
d_k
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
conv1x1_2
=
nn
.
Linear
(
d_k
,
1
)
# Decoder RNN layer
if
dec_bi_rnn
:
direction
=
'bidirectional'
else
:
direction
=
'forward'
kwargs
=
dict
(
input_size
=
encoder_rnn_out_size
,
hidden_size
=
encoder_rnn_out_size
,
num_layers
=
2
,
time_major
=
False
,
dropout
=
dec_drop_rnn
,
direction
=
direction
)
if
dec_gru
:
self
.
rnn_decoder
=
nn
.
GRU
(
**
kwargs
)
else
:
self
.
rnn_decoder
=
nn
.
LSTM
(
**
kwargs
)
# Decoder input embedding
self
.
embedding
=
nn
.
Embedding
(
self
.
num_classes
,
encoder_rnn_out_size
,
padding_idx
=
self
.
padding_idx
)
# Prediction layer
self
.
pred_dropout
=
nn
.
Dropout
(
pred_dropout
)
pred_num_classes
=
self
.
num_classes
-
1
if
pred_concat
:
fc_in_channel
=
decoder_rnn_out_size
+
d_model
+
encoder_rnn_out_size
else
:
fc_in_channel
=
d_model
self
.
prediction
=
nn
.
Linear
(
fc_in_channel
,
pred_num_classes
)
def
_2d_attention
(
self
,
decoder_input
,
feat
,
holistic_feat
,
valid_ratios
=
None
):
y
=
self
.
rnn_decoder
(
decoder_input
)[
0
]
# y: bsz * (seq_len + 1) * hidden_size
attn_query
=
self
.
conv1x1_1
(
y
)
# bsz * (seq_len + 1) * attn_size
bsz
,
seq_len
,
attn_size
=
attn_query
.
shape
attn_query
=
paddle
.
unsqueeze
(
attn_query
,
axis
=
[
3
,
4
])
# (bsz, seq_len + 1, attn_size, 1, 1)
attn_key
=
self
.
conv3x3_1
(
feat
)
# bsz * attn_size * h * w
attn_key
=
attn_key
.
unsqueeze
(
1
)
# bsz * 1 * attn_size * h * w
attn_weight
=
paddle
.
tanh
(
paddle
.
add
(
attn_key
,
attn_query
))
# bsz * (seq_len + 1) * attn_size * h * w
attn_weight
=
paddle
.
transpose
(
attn_weight
,
perm
=
[
0
,
1
,
3
,
4
,
2
])
# bsz * (seq_len + 1) * h * w * attn_size
attn_weight
=
self
.
conv1x1_2
(
attn_weight
)
# bsz * (seq_len + 1) * h * w * 1
bsz
,
T
,
h
,
w
,
c
=
paddle
.
shape
(
attn_weight
)
assert
c
==
1
if
valid_ratios
is
not
None
:
# cal mask of attention weight
for
i
in
range
(
paddle
.
shape
(
valid_ratios
)[
0
]):
valid_width
=
paddle
.
minimum
(
w
,
paddle
.
ceil
(
valid_ratios
[
i
]
*
w
).
astype
(
"int32"
))
if
valid_width
<
w
:
attn_weight
[
i
,
:,
:,
valid_width
:,
:]
=
float
(
'-inf'
)
attn_weight
=
paddle
.
reshape
(
attn_weight
,
[
bsz
,
T
,
-
1
])
attn_weight
=
F
.
softmax
(
attn_weight
,
axis
=-
1
)
attn_weight
=
paddle
.
reshape
(
attn_weight
,
[
bsz
,
T
,
h
,
w
,
c
])
attn_weight
=
paddle
.
transpose
(
attn_weight
,
perm
=
[
0
,
1
,
4
,
2
,
3
])
# attn_weight: bsz * T * c * h * w
# feat: bsz * c * h * w
attn_feat
=
paddle
.
sum
(
paddle
.
multiply
(
feat
.
unsqueeze
(
1
),
attn_weight
),
(
3
,
4
),
keepdim
=
False
)
# bsz * (seq_len + 1) * C
# Linear transformation
if
self
.
pred_concat
:
hf_c
=
holistic_feat
.
shape
[
-
1
]
holistic_feat
=
paddle
.
expand
(
holistic_feat
,
shape
=
[
bsz
,
seq_len
,
hf_c
])
y
=
self
.
prediction
(
paddle
.
concat
((
y
,
attn_feat
,
holistic_feat
),
2
))
else
:
y
=
self
.
prediction
(
attn_feat
)
# bsz * (seq_len + 1) * num_classes
if
self
.
train_mode
:
y
=
self
.
pred_dropout
(
y
)
return
y
def
forward_train
(
self
,
feat
,
out_enc
,
label
,
img_metas
):
'''
img_metas: [label, valid_ratio]
'''
if
img_metas
is
not
None
:
assert
paddle
.
shape
(
img_metas
[
0
])[
0
]
==
paddle
.
shape
(
feat
)[
0
]
valid_ratios
=
None
if
img_metas
is
not
None
and
self
.
mask
:
valid_ratios
=
img_metas
[
-
1
]
lab_embedding
=
self
.
embedding
(
label
)
# bsz * seq_len * emb_dim
out_enc
=
out_enc
.
unsqueeze
(
1
)
# bsz * 1 * emb_dim
in_dec
=
paddle
.
concat
((
out_enc
,
lab_embedding
),
axis
=
1
)
# bsz * (seq_len + 1) * C
out_dec
=
self
.
_2d_attention
(
in_dec
,
feat
,
out_enc
,
valid_ratios
=
valid_ratios
)
return
out_dec
[:,
1
:,
:]
# bsz * seq_len * num_classes
def
forward_test
(
self
,
feat
,
out_enc
,
img_metas
):
if
img_metas
is
not
None
:
assert
len
(
img_metas
[
0
])
==
feat
.
shape
[
0
]
valid_ratios
=
None
if
img_metas
is
not
None
and
self
.
mask
:
valid_ratios
=
img_metas
[
-
1
]
seq_len
=
self
.
max_seq_len
bsz
=
feat
.
shape
[
0
]
start_token
=
paddle
.
full
(
(
bsz
,
),
fill_value
=
self
.
start_idx
,
dtype
=
'int64'
)
# bsz
start_token
=
self
.
embedding
(
start_token
)
# bsz * emb_dim
emb_dim
=
start_token
.
shape
[
1
]
start_token
=
start_token
.
unsqueeze
(
1
)
start_token
=
paddle
.
expand
(
start_token
,
shape
=
[
bsz
,
seq_len
,
emb_dim
])
# bsz * seq_len * emb_dim
out_enc
=
out_enc
.
unsqueeze
(
1
)
# bsz * 1 * emb_dim
decoder_input
=
paddle
.
concat
((
out_enc
,
start_token
),
axis
=
1
)
# bsz * (seq_len + 1) * emb_dim
outputs
=
[]
for
i
in
range
(
1
,
seq_len
+
1
):
decoder_output
=
self
.
_2d_attention
(
decoder_input
,
feat
,
out_enc
,
valid_ratios
=
valid_ratios
)
char_output
=
decoder_output
[:,
i
,
:]
# bsz * num_classes
char_output
=
F
.
softmax
(
char_output
,
-
1
)
outputs
.
append
(
char_output
)
max_idx
=
paddle
.
argmax
(
char_output
,
axis
=
1
,
keepdim
=
False
)
char_embedding
=
self
.
embedding
(
max_idx
)
# bsz * emb_dim
if
i
<
seq_len
:
decoder_input
[:,
i
+
1
,
:]
=
char_embedding
outputs
=
paddle
.
stack
(
outputs
,
1
)
# bsz * seq_len * num_classes
return
outputs
class
SARHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
enc_dim
=
512
,
max_text_length
=
30
,
enc_bi_rnn
=
False
,
enc_drop_rnn
=
0.1
,
enc_gru
=
False
,
dec_bi_rnn
=
False
,
dec_drop_rnn
=
0.0
,
dec_gru
=
False
,
d_k
=
512
,
pred_dropout
=
0.1
,
pred_concat
=
True
,
**
kwargs
):
super
(
SARHead
,
self
).
__init__
()
# encoder module
self
.
encoder
=
SAREncoder
(
enc_bi_rnn
=
enc_bi_rnn
,
enc_drop_rnn
=
enc_drop_rnn
,
enc_gru
=
enc_gru
,
d_model
=
in_channels
,
d_enc
=
enc_dim
)
# decoder module
self
.
decoder
=
ParallelSARDecoder
(
out_channels
=
out_channels
,
enc_bi_rnn
=
enc_bi_rnn
,
dec_bi_rnn
=
dec_bi_rnn
,
dec_drop_rnn
=
dec_drop_rnn
,
dec_gru
=
dec_gru
,
d_model
=
in_channels
,
d_enc
=
enc_dim
,
d_k
=
d_k
,
pred_dropout
=
pred_dropout
,
max_text_length
=
max_text_length
,
pred_concat
=
pred_concat
)
def
forward
(
self
,
feat
,
targets
=
None
):
'''
img_metas: [label, valid_ratio]
'''
holistic_feat
=
self
.
encoder
(
feat
,
targets
)
# bsz c
if
self
.
training
:
label
=
targets
[
0
]
# label
final_out
=
self
.
decoder
(
feat
,
holistic_feat
,
label
,
img_metas
=
targets
)
else
:
final_out
=
self
.
decoder
(
feat
,
holistic_feat
,
label
=
None
,
img_metas
=
targets
,
train_mode
=
False
)
# (bsz, seq_len, num_classes)
return
final_out
ppocr/modeling/heads/rec_srn_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
from
paddle
import
nn
,
ParamAttr
from
paddle.nn
import
functional
as
F
import
numpy
as
np
from
.self_attention
import
WrapEncoderForFeature
from
.self_attention
import
WrapEncoder
from
paddle.static
import
Program
from
ppocr.modeling.backbones.rec_resnet_fpn
import
ResNetFPN
from
collections
import
OrderedDict
gradient_clip
=
10
class
PVAM
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
char_num
,
max_text_length
,
num_heads
,
num_encoder_tus
,
hidden_dims
):
super
(
PVAM
,
self
).
__init__
()
self
.
char_num
=
char_num
self
.
max_length
=
max_text_length
self
.
num_heads
=
num_heads
self
.
num_encoder_TUs
=
num_encoder_tus
self
.
hidden_dims
=
hidden_dims
# Transformer encoder
t
=
256
c
=
512
self
.
wrap_encoder_for_feature
=
WrapEncoderForFeature
(
src_vocab_size
=
1
,
max_length
=
t
,
n_layer
=
self
.
num_encoder_TUs
,
n_head
=
self
.
num_heads
,
d_key
=
int
(
self
.
hidden_dims
/
self
.
num_heads
),
d_value
=
int
(
self
.
hidden_dims
/
self
.
num_heads
),
d_model
=
self
.
hidden_dims
,
d_inner_hid
=
self
.
hidden_dims
,
prepostprocess_dropout
=
0.1
,
attention_dropout
=
0.1
,
relu_dropout
=
0.1
,
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
,
weight_sharing
=
True
)
# PVAM
self
.
flatten0
=
paddle
.
nn
.
Flatten
(
start_axis
=
0
,
stop_axis
=
1
)
self
.
fc0
=
paddle
.
nn
.
Linear
(
in_features
=
in_channels
,
out_features
=
in_channels
,
)
self
.
emb
=
paddle
.
nn
.
Embedding
(
num_embeddings
=
self
.
max_length
,
embedding_dim
=
in_channels
)
self
.
flatten1
=
paddle
.
nn
.
Flatten
(
start_axis
=
0
,
stop_axis
=
2
)
self
.
fc1
=
paddle
.
nn
.
Linear
(
in_features
=
in_channels
,
out_features
=
1
,
bias_attr
=
False
)
def
forward
(
self
,
inputs
,
encoder_word_pos
,
gsrm_word_pos
):
b
,
c
,
h
,
w
=
inputs
.
shape
conv_features
=
paddle
.
reshape
(
inputs
,
shape
=
[
-
1
,
c
,
h
*
w
])
conv_features
=
paddle
.
transpose
(
conv_features
,
perm
=
[
0
,
2
,
1
])
# transformer encoder
b
,
t
,
c
=
conv_features
.
shape
enc_inputs
=
[
conv_features
,
encoder_word_pos
,
None
]
word_features
=
self
.
wrap_encoder_for_feature
(
enc_inputs
)
# pvam
b
,
t
,
c
=
word_features
.
shape
word_features
=
self
.
fc0
(
word_features
)
word_features_
=
paddle
.
reshape
(
word_features
,
[
-
1
,
1
,
t
,
c
])
word_features_
=
paddle
.
tile
(
word_features_
,
[
1
,
self
.
max_length
,
1
,
1
])
word_pos_feature
=
self
.
emb
(
gsrm_word_pos
)
word_pos_feature_
=
paddle
.
reshape
(
word_pos_feature
,
[
-
1
,
self
.
max_length
,
1
,
c
])
word_pos_feature_
=
paddle
.
tile
(
word_pos_feature_
,
[
1
,
1
,
t
,
1
])
y
=
word_pos_feature_
+
word_features_
y
=
F
.
tanh
(
y
)
attention_weight
=
self
.
fc1
(
y
)
attention_weight
=
paddle
.
reshape
(
attention_weight
,
shape
=
[
-
1
,
self
.
max_length
,
t
])
attention_weight
=
F
.
softmax
(
attention_weight
,
axis
=-
1
)
pvam_features
=
paddle
.
matmul
(
attention_weight
,
word_features
)
#[b, max_length, c]
return
pvam_features
class
GSRM
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
char_num
,
max_text_length
,
num_heads
,
num_encoder_tus
,
num_decoder_tus
,
hidden_dims
):
super
(
GSRM
,
self
).
__init__
()
self
.
char_num
=
char_num
self
.
max_length
=
max_text_length
self
.
num_heads
=
num_heads
self
.
num_encoder_TUs
=
num_encoder_tus
self
.
num_decoder_TUs
=
num_decoder_tus
self
.
hidden_dims
=
hidden_dims
self
.
fc0
=
paddle
.
nn
.
Linear
(
in_features
=
in_channels
,
out_features
=
self
.
char_num
)
self
.
wrap_encoder0
=
WrapEncoder
(
src_vocab_size
=
self
.
char_num
+
1
,
max_length
=
self
.
max_length
,
n_layer
=
self
.
num_decoder_TUs
,
n_head
=
self
.
num_heads
,
d_key
=
int
(
self
.
hidden_dims
/
self
.
num_heads
),
d_value
=
int
(
self
.
hidden_dims
/
self
.
num_heads
),
d_model
=
self
.
hidden_dims
,
d_inner_hid
=
self
.
hidden_dims
,
prepostprocess_dropout
=
0.1
,
attention_dropout
=
0.1
,
relu_dropout
=
0.1
,
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
,
weight_sharing
=
True
)
self
.
wrap_encoder1
=
WrapEncoder
(
src_vocab_size
=
self
.
char_num
+
1
,
max_length
=
self
.
max_length
,
n_layer
=
self
.
num_decoder_TUs
,
n_head
=
self
.
num_heads
,
d_key
=
int
(
self
.
hidden_dims
/
self
.
num_heads
),
d_value
=
int
(
self
.
hidden_dims
/
self
.
num_heads
),
d_model
=
self
.
hidden_dims
,
d_inner_hid
=
self
.
hidden_dims
,
prepostprocess_dropout
=
0.1
,
attention_dropout
=
0.1
,
relu_dropout
=
0.1
,
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
,
weight_sharing
=
True
)
self
.
mul
=
lambda
x
:
paddle
.
matmul
(
x
=
x
,
y
=
self
.
wrap_encoder0
.
prepare_decoder
.
emb0
.
weight
,
transpose_y
=
True
)
def
forward
(
self
,
inputs
,
gsrm_word_pos
,
gsrm_slf_attn_bias1
,
gsrm_slf_attn_bias2
):
# ===== GSRM Visual-to-semantic embedding block =====
b
,
t
,
c
=
inputs
.
shape
pvam_features
=
paddle
.
reshape
(
inputs
,
[
-
1
,
c
])
word_out
=
self
.
fc0
(
pvam_features
)
word_ids
=
paddle
.
argmax
(
F
.
softmax
(
word_out
),
axis
=
1
)
word_ids
=
paddle
.
reshape
(
x
=
word_ids
,
shape
=
[
-
1
,
t
,
1
])
#===== GSRM Semantic reasoning block =====
"""
This module is achieved through bi-transformers,
ngram_feature1 is the froward one, ngram_fetaure2 is the backward one
"""
pad_idx
=
self
.
char_num
word1
=
paddle
.
cast
(
word_ids
,
"float32"
)
word1
=
F
.
pad
(
word1
,
[
1
,
0
],
value
=
1.0
*
pad_idx
,
data_format
=
"NLC"
)
word1
=
paddle
.
cast
(
word1
,
"int64"
)
word1
=
word1
[:,
:
-
1
,
:]
word2
=
word_ids
enc_inputs_1
=
[
word1
,
gsrm_word_pos
,
gsrm_slf_attn_bias1
]
enc_inputs_2
=
[
word2
,
gsrm_word_pos
,
gsrm_slf_attn_bias2
]
gsrm_feature1
=
self
.
wrap_encoder0
(
enc_inputs_1
)
gsrm_feature2
=
self
.
wrap_encoder1
(
enc_inputs_2
)
gsrm_feature2
=
F
.
pad
(
gsrm_feature2
,
[
0
,
1
],
value
=
0.
,
data_format
=
"NLC"
)
gsrm_feature2
=
gsrm_feature2
[:,
1
:,
]
gsrm_features
=
gsrm_feature1
+
gsrm_feature2
gsrm_out
=
self
.
mul
(
gsrm_features
)
b
,
t
,
c
=
gsrm_out
.
shape
gsrm_out
=
paddle
.
reshape
(
gsrm_out
,
[
-
1
,
c
])
return
gsrm_features
,
word_out
,
gsrm_out
class
VSFD
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
=
512
,
pvam_ch
=
512
,
char_num
=
38
):
super
(
VSFD
,
self
).
__init__
()
self
.
char_num
=
char_num
self
.
fc0
=
paddle
.
nn
.
Linear
(
in_features
=
in_channels
*
2
,
out_features
=
pvam_ch
)
self
.
fc1
=
paddle
.
nn
.
Linear
(
in_features
=
pvam_ch
,
out_features
=
self
.
char_num
)
def
forward
(
self
,
pvam_feature
,
gsrm_feature
):
b
,
t
,
c1
=
pvam_feature
.
shape
b
,
t
,
c2
=
gsrm_feature
.
shape
combine_feature_
=
paddle
.
concat
([
pvam_feature
,
gsrm_feature
],
axis
=
2
)
img_comb_feature_
=
paddle
.
reshape
(
combine_feature_
,
shape
=
[
-
1
,
c1
+
c2
])
img_comb_feature_map
=
self
.
fc0
(
img_comb_feature_
)
img_comb_feature_map
=
F
.
sigmoid
(
img_comb_feature_map
)
img_comb_feature_map
=
paddle
.
reshape
(
img_comb_feature_map
,
shape
=
[
-
1
,
t
,
c1
])
combine_feature
=
img_comb_feature_map
*
pvam_feature
+
(
1.0
-
img_comb_feature_map
)
*
gsrm_feature
img_comb_feature
=
paddle
.
reshape
(
combine_feature
,
shape
=
[
-
1
,
c1
])
out
=
self
.
fc1
(
img_comb_feature
)
return
out
class
SRNHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
max_text_length
,
num_heads
,
num_encoder_TUs
,
num_decoder_TUs
,
hidden_dims
,
**
kwargs
):
super
(
SRNHead
,
self
).
__init__
()
self
.
char_num
=
out_channels
self
.
max_length
=
max_text_length
self
.
num_heads
=
num_heads
self
.
num_encoder_TUs
=
num_encoder_TUs
self
.
num_decoder_TUs
=
num_decoder_TUs
self
.
hidden_dims
=
hidden_dims
self
.
pvam
=
PVAM
(
in_channels
=
in_channels
,
char_num
=
self
.
char_num
,
max_text_length
=
self
.
max_length
,
num_heads
=
self
.
num_heads
,
num_encoder_tus
=
self
.
num_encoder_TUs
,
hidden_dims
=
self
.
hidden_dims
)
self
.
gsrm
=
GSRM
(
in_channels
=
in_channels
,
char_num
=
self
.
char_num
,
max_text_length
=
self
.
max_length
,
num_heads
=
self
.
num_heads
,
num_encoder_tus
=
self
.
num_encoder_TUs
,
num_decoder_tus
=
self
.
num_decoder_TUs
,
hidden_dims
=
self
.
hidden_dims
)
self
.
vsfd
=
VSFD
(
in_channels
=
in_channels
,
char_num
=
self
.
char_num
)
self
.
gsrm
.
wrap_encoder1
.
prepare_decoder
.
emb0
=
self
.
gsrm
.
wrap_encoder0
.
prepare_decoder
.
emb0
def
forward
(
self
,
inputs
,
targets
=
None
):
others
=
targets
[
-
4
:]
encoder_word_pos
=
others
[
0
]
gsrm_word_pos
=
others
[
1
]
gsrm_slf_attn_bias1
=
others
[
2
]
gsrm_slf_attn_bias2
=
others
[
3
]
pvam_feature
=
self
.
pvam
(
inputs
,
encoder_word_pos
,
gsrm_word_pos
)
gsrm_feature
,
word_out
,
gsrm_out
=
self
.
gsrm
(
pvam_feature
,
gsrm_word_pos
,
gsrm_slf_attn_bias1
,
gsrm_slf_attn_bias2
)
final_out
=
self
.
vsfd
(
pvam_feature
,
gsrm_feature
)
if
not
self
.
training
:
final_out
=
F
.
softmax
(
final_out
,
axis
=
1
)
_
,
decoded_out
=
paddle
.
topk
(
final_out
,
k
=
1
)
predicts
=
OrderedDict
([
(
'predict'
,
final_out
),
(
'pvam_feature'
,
pvam_feature
),
(
'decoded_out'
,
decoded_out
),
(
'word_out'
,
word_out
),
(
'gsrm_out'
,
gsrm_out
),
])
return
predicts
ppocr/modeling/heads/self_attention.py
0 → 100755
View file @
f1506916
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
from
paddle
import
ParamAttr
,
nn
from
paddle
import
nn
,
ParamAttr
from
paddle.nn
import
functional
as
F
import
numpy
as
np
gradient_clip
=
10
class
WrapEncoderForFeature
(
nn
.
Layer
):
def
__init__
(
self
,
src_vocab_size
,
max_length
,
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
,
weight_sharing
,
bos_idx
=
0
):
super
(
WrapEncoderForFeature
,
self
).
__init__
()
self
.
prepare_encoder
=
PrepareEncoder
(
src_vocab_size
,
d_model
,
max_length
,
prepostprocess_dropout
,
bos_idx
=
bos_idx
,
word_emb_param_name
=
"src_word_emb_table"
)
self
.
encoder
=
Encoder
(
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
)
def
forward
(
self
,
enc_inputs
):
conv_features
,
src_pos
,
src_slf_attn_bias
=
enc_inputs
enc_input
=
self
.
prepare_encoder
(
conv_features
,
src_pos
)
enc_output
=
self
.
encoder
(
enc_input
,
src_slf_attn_bias
)
return
enc_output
class
WrapEncoder
(
nn
.
Layer
):
"""
embedder + encoder
"""
def
__init__
(
self
,
src_vocab_size
,
max_length
,
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
,
weight_sharing
,
bos_idx
=
0
):
super
(
WrapEncoder
,
self
).
__init__
()
self
.
prepare_decoder
=
PrepareDecoder
(
src_vocab_size
,
d_model
,
max_length
,
prepostprocess_dropout
,
bos_idx
=
bos_idx
)
self
.
encoder
=
Encoder
(
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
)
def
forward
(
self
,
enc_inputs
):
src_word
,
src_pos
,
src_slf_attn_bias
=
enc_inputs
enc_input
=
self
.
prepare_decoder
(
src_word
,
src_pos
)
enc_output
=
self
.
encoder
(
enc_input
,
src_slf_attn_bias
)
return
enc_output
class
Encoder
(
nn
.
Layer
):
"""
encoder
"""
def
__init__
(
self
,
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
):
super
(
Encoder
,
self
).
__init__
()
self
.
encoder_layers
=
list
()
for
i
in
range
(
n_layer
):
self
.
encoder_layers
.
append
(
self
.
add_sublayer
(
"layer_%d"
%
i
,
EncoderLayer
(
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
)))
self
.
processer
=
PrePostProcessLayer
(
preprocess_cmd
,
d_model
,
prepostprocess_dropout
)
def
forward
(
self
,
enc_input
,
attn_bias
):
for
encoder_layer
in
self
.
encoder_layers
:
enc_output
=
encoder_layer
(
enc_input
,
attn_bias
)
enc_input
=
enc_output
enc_output
=
self
.
processer
(
enc_output
)
return
enc_output
class
EncoderLayer
(
nn
.
Layer
):
"""
EncoderLayer
"""
def
__init__
(
self
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
):
super
(
EncoderLayer
,
self
).
__init__
()
self
.
preprocesser1
=
PrePostProcessLayer
(
preprocess_cmd
,
d_model
,
prepostprocess_dropout
)
self
.
self_attn
=
MultiHeadAttention
(
d_key
,
d_value
,
d_model
,
n_head
,
attention_dropout
)
self
.
postprocesser1
=
PrePostProcessLayer
(
postprocess_cmd
,
d_model
,
prepostprocess_dropout
)
self
.
preprocesser2
=
PrePostProcessLayer
(
preprocess_cmd
,
d_model
,
prepostprocess_dropout
)
self
.
ffn
=
FFN
(
d_inner_hid
,
d_model
,
relu_dropout
)
self
.
postprocesser2
=
PrePostProcessLayer
(
postprocess_cmd
,
d_model
,
prepostprocess_dropout
)
def
forward
(
self
,
enc_input
,
attn_bias
):
attn_output
=
self
.
self_attn
(
self
.
preprocesser1
(
enc_input
),
None
,
None
,
attn_bias
)
attn_output
=
self
.
postprocesser1
(
attn_output
,
enc_input
)
ffn_output
=
self
.
ffn
(
self
.
preprocesser2
(
attn_output
))
ffn_output
=
self
.
postprocesser2
(
ffn_output
,
attn_output
)
return
ffn_output
class
MultiHeadAttention
(
nn
.
Layer
):
"""
Multi-Head Attention
"""
def
__init__
(
self
,
d_key
,
d_value
,
d_model
,
n_head
=
1
,
dropout_rate
=
0.
):
super
(
MultiHeadAttention
,
self
).
__init__
()
self
.
n_head
=
n_head
self
.
d_key
=
d_key
self
.
d_value
=
d_value
self
.
d_model
=
d_model
self
.
dropout_rate
=
dropout_rate
self
.
q_fc
=
paddle
.
nn
.
Linear
(
in_features
=
d_model
,
out_features
=
d_key
*
n_head
,
bias_attr
=
False
)
self
.
k_fc
=
paddle
.
nn
.
Linear
(
in_features
=
d_model
,
out_features
=
d_key
*
n_head
,
bias_attr
=
False
)
self
.
v_fc
=
paddle
.
nn
.
Linear
(
in_features
=
d_model
,
out_features
=
d_value
*
n_head
,
bias_attr
=
False
)
self
.
proj_fc
=
paddle
.
nn
.
Linear
(
in_features
=
d_value
*
n_head
,
out_features
=
d_model
,
bias_attr
=
False
)
def
_prepare_qkv
(
self
,
queries
,
keys
,
values
,
cache
=
None
):
if
keys
is
None
:
# self-attention
keys
,
values
=
queries
,
queries
static_kv
=
False
else
:
# cross-attention
static_kv
=
True
q
=
self
.
q_fc
(
queries
)
q
=
paddle
.
reshape
(
x
=
q
,
shape
=
[
0
,
0
,
self
.
n_head
,
self
.
d_key
])
q
=
paddle
.
transpose
(
x
=
q
,
perm
=
[
0
,
2
,
1
,
3
])
if
cache
is
not
None
and
static_kv
and
"static_k"
in
cache
:
# for encoder-decoder attention in inference and has cached
k
=
cache
[
"static_k"
]
v
=
cache
[
"static_v"
]
else
:
k
=
self
.
k_fc
(
keys
)
v
=
self
.
v_fc
(
values
)
k
=
paddle
.
reshape
(
x
=
k
,
shape
=
[
0
,
0
,
self
.
n_head
,
self
.
d_key
])
k
=
paddle
.
transpose
(
x
=
k
,
perm
=
[
0
,
2
,
1
,
3
])
v
=
paddle
.
reshape
(
x
=
v
,
shape
=
[
0
,
0
,
self
.
n_head
,
self
.
d_value
])
v
=
paddle
.
transpose
(
x
=
v
,
perm
=
[
0
,
2
,
1
,
3
])
if
cache
is
not
None
:
if
static_kv
and
not
"static_k"
in
cache
:
# for encoder-decoder attention in inference and has not cached
cache
[
"static_k"
],
cache
[
"static_v"
]
=
k
,
v
elif
not
static_kv
:
# for decoder self-attention in inference
cache_k
,
cache_v
=
cache
[
"k"
],
cache
[
"v"
]
k
=
paddle
.
concat
([
cache_k
,
k
],
axis
=
2
)
v
=
paddle
.
concat
([
cache_v
,
v
],
axis
=
2
)
cache
[
"k"
],
cache
[
"v"
]
=
k
,
v
return
q
,
k
,
v
def
forward
(
self
,
queries
,
keys
,
values
,
attn_bias
,
cache
=
None
):
# compute q ,k ,v
keys
=
queries
if
keys
is
None
else
keys
values
=
keys
if
values
is
None
else
values
q
,
k
,
v
=
self
.
_prepare_qkv
(
queries
,
keys
,
values
,
cache
)
# scale dot product attention
product
=
paddle
.
matmul
(
x
=
q
,
y
=
k
,
transpose_y
=
True
)
product
=
product
*
self
.
d_model
**-
0.5
if
attn_bias
is
not
None
:
product
+=
attn_bias
weights
=
F
.
softmax
(
product
)
if
self
.
dropout_rate
:
weights
=
F
.
dropout
(
weights
,
p
=
self
.
dropout_rate
,
mode
=
"downscale_in_infer"
)
out
=
paddle
.
matmul
(
weights
,
v
)
# combine heads
out
=
paddle
.
transpose
(
out
,
perm
=
[
0
,
2
,
1
,
3
])
out
=
paddle
.
reshape
(
x
=
out
,
shape
=
[
0
,
0
,
out
.
shape
[
2
]
*
out
.
shape
[
3
]])
# project to output
out
=
self
.
proj_fc
(
out
)
return
out
class
PrePostProcessLayer
(
nn
.
Layer
):
"""
PrePostProcessLayer
"""
def
__init__
(
self
,
process_cmd
,
d_model
,
dropout_rate
):
super
(
PrePostProcessLayer
,
self
).
__init__
()
self
.
process_cmd
=
process_cmd
self
.
functors
=
[]
for
cmd
in
self
.
process_cmd
:
if
cmd
==
"a"
:
# add residual connection
self
.
functors
.
append
(
lambda
x
,
y
:
x
+
y
if
y
is
not
None
else
x
)
elif
cmd
==
"n"
:
# add layer normalization
self
.
functors
.
append
(
self
.
add_sublayer
(
"layer_norm_%d"
%
len
(
self
.
sublayers
()),
paddle
.
nn
.
LayerNorm
(
normalized_shape
=
d_model
,
weight_attr
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
nn
.
initializer
.
Constant
(
1.
)),
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
nn
.
initializer
.
Constant
(
0.
)))))
elif
cmd
==
"d"
:
# add dropout
self
.
functors
.
append
(
lambda
x
:
F
.
dropout
(
x
,
p
=
dropout_rate
,
mode
=
"downscale_in_infer"
)
if
dropout_rate
else
x
)
def
forward
(
self
,
x
,
residual
=
None
):
for
i
,
cmd
in
enumerate
(
self
.
process_cmd
):
if
cmd
==
"a"
:
x
=
self
.
functors
[
i
](
x
,
residual
)
else
:
x
=
self
.
functors
[
i
](
x
)
return
x
class
PrepareEncoder
(
nn
.
Layer
):
def
__init__
(
self
,
src_vocab_size
,
src_emb_dim
,
src_max_len
,
dropout_rate
=
0
,
bos_idx
=
0
,
word_emb_param_name
=
None
,
pos_enc_param_name
=
None
):
super
(
PrepareEncoder
,
self
).
__init__
()
self
.
src_emb_dim
=
src_emb_dim
self
.
src_max_len
=
src_max_len
self
.
emb
=
paddle
.
nn
.
Embedding
(
num_embeddings
=
self
.
src_max_len
,
embedding_dim
=
self
.
src_emb_dim
)
self
.
dropout_rate
=
dropout_rate
def
forward
(
self
,
src_word
,
src_pos
):
src_word_emb
=
src_word
src_word_emb
=
paddle
.
cast
(
src_word_emb
,
'float32'
)
src_word_emb
=
paddle
.
scale
(
x
=
src_word_emb
,
scale
=
self
.
src_emb_dim
**
0.5
)
src_pos
=
paddle
.
squeeze
(
src_pos
,
axis
=-
1
)
src_pos_enc
=
self
.
emb
(
src_pos
)
src_pos_enc
.
stop_gradient
=
True
enc_input
=
src_word_emb
+
src_pos_enc
if
self
.
dropout_rate
:
out
=
F
.
dropout
(
x
=
enc_input
,
p
=
self
.
dropout_rate
,
mode
=
"downscale_in_infer"
)
else
:
out
=
enc_input
return
out
class
PrepareDecoder
(
nn
.
Layer
):
def
__init__
(
self
,
src_vocab_size
,
src_emb_dim
,
src_max_len
,
dropout_rate
=
0
,
bos_idx
=
0
,
word_emb_param_name
=
None
,
pos_enc_param_name
=
None
):
super
(
PrepareDecoder
,
self
).
__init__
()
self
.
src_emb_dim
=
src_emb_dim
"""
self.emb0 = Embedding(num_embeddings=src_vocab_size,
embedding_dim=src_emb_dim)
"""
self
.
emb0
=
paddle
.
nn
.
Embedding
(
num_embeddings
=
src_vocab_size
,
embedding_dim
=
self
.
src_emb_dim
,
padding_idx
=
bos_idx
,
weight_attr
=
paddle
.
ParamAttr
(
name
=
word_emb_param_name
,
initializer
=
nn
.
initializer
.
Normal
(
0.
,
src_emb_dim
**-
0.5
)))
self
.
emb1
=
paddle
.
nn
.
Embedding
(
num_embeddings
=
src_max_len
,
embedding_dim
=
self
.
src_emb_dim
,
weight_attr
=
paddle
.
ParamAttr
(
name
=
pos_enc_param_name
))
self
.
dropout_rate
=
dropout_rate
def
forward
(
self
,
src_word
,
src_pos
):
src_word
=
paddle
.
cast
(
src_word
,
'int64'
)
src_word
=
paddle
.
squeeze
(
src_word
,
axis
=-
1
)
src_word_emb
=
self
.
emb0
(
src_word
)
src_word_emb
=
paddle
.
scale
(
x
=
src_word_emb
,
scale
=
self
.
src_emb_dim
**
0.5
)
src_pos
=
paddle
.
squeeze
(
src_pos
,
axis
=-
1
)
src_pos_enc
=
self
.
emb1
(
src_pos
)
src_pos_enc
.
stop_gradient
=
True
enc_input
=
src_word_emb
+
src_pos_enc
if
self
.
dropout_rate
:
out
=
F
.
dropout
(
x
=
enc_input
,
p
=
self
.
dropout_rate
,
mode
=
"downscale_in_infer"
)
else
:
out
=
enc_input
return
out
class
FFN
(
nn
.
Layer
):
"""
Feed-Forward Network
"""
def
__init__
(
self
,
d_inner_hid
,
d_model
,
dropout_rate
):
super
(
FFN
,
self
).
__init__
()
self
.
dropout_rate
=
dropout_rate
self
.
fc1
=
paddle
.
nn
.
Linear
(
in_features
=
d_model
,
out_features
=
d_inner_hid
)
self
.
fc2
=
paddle
.
nn
.
Linear
(
in_features
=
d_inner_hid
,
out_features
=
d_model
)
def
forward
(
self
,
x
):
hidden
=
self
.
fc1
(
x
)
hidden
=
F
.
relu
(
hidden
)
if
self
.
dropout_rate
:
hidden
=
F
.
dropout
(
hidden
,
p
=
self
.
dropout_rate
,
mode
=
"downscale_in_infer"
)
out
=
self
.
fc2
(
hidden
)
return
out
ppocr/modeling/heads/table_att_head.py
0 → 100755
View file @
f1506916
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
import
numpy
as
np
class
TableAttentionHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
hidden_size
,
loc_type
,
in_max_len
=
488
,
max_text_length
=
100
,
max_elem_length
=
800
,
max_cell_num
=
500
,
**
kwargs
):
super
(
TableAttentionHead
,
self
).
__init__
()
self
.
input_size
=
in_channels
[
-
1
]
self
.
hidden_size
=
hidden_size
self
.
elem_num
=
30
self
.
max_text_length
=
max_text_length
self
.
max_elem_length
=
max_elem_length
self
.
max_cell_num
=
max_cell_num
self
.
structure_attention_cell
=
AttentionGRUCell
(
self
.
input_size
,
hidden_size
,
self
.
elem_num
,
use_gru
=
False
)
self
.
structure_generator
=
nn
.
Linear
(
hidden_size
,
self
.
elem_num
)
self
.
loc_type
=
loc_type
self
.
in_max_len
=
in_max_len
if
self
.
loc_type
==
1
:
self
.
loc_generator
=
nn
.
Linear
(
hidden_size
,
4
)
else
:
if
self
.
in_max_len
==
640
:
self
.
loc_fea_trans
=
nn
.
Linear
(
400
,
self
.
max_elem_length
+
1
)
elif
self
.
in_max_len
==
800
:
self
.
loc_fea_trans
=
nn
.
Linear
(
625
,
self
.
max_elem_length
+
1
)
else
:
self
.
loc_fea_trans
=
nn
.
Linear
(
256
,
self
.
max_elem_length
+
1
)
self
.
loc_generator
=
nn
.
Linear
(
self
.
input_size
+
hidden_size
,
4
)
def
_char_to_onehot
(
self
,
input_char
,
onehot_dim
):
input_ont_hot
=
F
.
one_hot
(
input_char
,
onehot_dim
)
return
input_ont_hot
def
forward
(
self
,
inputs
,
targets
=
None
):
# if and else branch are both needed when you want to assign a variable
# if you modify the var in just one branch, then the modification will not work.
fea
=
inputs
[
-
1
]
if
len
(
fea
.
shape
)
==
3
:
pass
else
:
last_shape
=
int
(
np
.
prod
(
fea
.
shape
[
2
:]))
# gry added
fea
=
paddle
.
reshape
(
fea
,
[
fea
.
shape
[
0
],
fea
.
shape
[
1
],
last_shape
])
fea
=
fea
.
transpose
([
0
,
2
,
1
])
# (NTC)(batch, width, channels)
batch_size
=
fea
.
shape
[
0
]
hidden
=
paddle
.
zeros
((
batch_size
,
self
.
hidden_size
))
output_hiddens
=
[]
if
self
.
training
and
targets
is
not
None
:
structure
=
targets
[
0
]
for
i
in
range
(
self
.
max_elem_length
+
1
):
elem_onehots
=
self
.
_char_to_onehot
(
structure
[:,
i
],
onehot_dim
=
self
.
elem_num
)
(
outputs
,
hidden
),
alpha
=
self
.
structure_attention_cell
(
hidden
,
fea
,
elem_onehots
)
output_hiddens
.
append
(
paddle
.
unsqueeze
(
outputs
,
axis
=
1
))
output
=
paddle
.
concat
(
output_hiddens
,
axis
=
1
)
structure_probs
=
self
.
structure_generator
(
output
)
if
self
.
loc_type
==
1
:
loc_preds
=
self
.
loc_generator
(
output
)
loc_preds
=
F
.
sigmoid
(
loc_preds
)
else
:
loc_fea
=
fea
.
transpose
([
0
,
2
,
1
])
loc_fea
=
self
.
loc_fea_trans
(
loc_fea
)
loc_fea
=
loc_fea
.
transpose
([
0
,
2
,
1
])
loc_concat
=
paddle
.
concat
([
output
,
loc_fea
],
axis
=
2
)
loc_preds
=
self
.
loc_generator
(
loc_concat
)
loc_preds
=
F
.
sigmoid
(
loc_preds
)
else
:
temp_elem
=
paddle
.
zeros
(
shape
=
[
batch_size
],
dtype
=
"int32"
)
structure_probs
=
None
loc_preds
=
None
elem_onehots
=
None
outputs
=
None
alpha
=
None
max_elem_length
=
paddle
.
to_tensor
(
self
.
max_elem_length
)
i
=
0
while
i
<
max_elem_length
+
1
:
elem_onehots
=
self
.
_char_to_onehot
(
temp_elem
,
onehot_dim
=
self
.
elem_num
)
(
outputs
,
hidden
),
alpha
=
self
.
structure_attention_cell
(
hidden
,
fea
,
elem_onehots
)
output_hiddens
.
append
(
paddle
.
unsqueeze
(
outputs
,
axis
=
1
))
structure_probs_step
=
self
.
structure_generator
(
outputs
)
temp_elem
=
structure_probs_step
.
argmax
(
axis
=
1
,
dtype
=
"int32"
)
i
+=
1
output
=
paddle
.
concat
(
output_hiddens
,
axis
=
1
)
structure_probs
=
self
.
structure_generator
(
output
)
structure_probs
=
F
.
softmax
(
structure_probs
)
if
self
.
loc_type
==
1
:
loc_preds
=
self
.
loc_generator
(
output
)
loc_preds
=
F
.
sigmoid
(
loc_preds
)
else
:
loc_fea
=
fea
.
transpose
([
0
,
2
,
1
])
loc_fea
=
self
.
loc_fea_trans
(
loc_fea
)
loc_fea
=
loc_fea
.
transpose
([
0
,
2
,
1
])
loc_concat
=
paddle
.
concat
([
output
,
loc_fea
],
axis
=
2
)
loc_preds
=
self
.
loc_generator
(
loc_concat
)
loc_preds
=
F
.
sigmoid
(
loc_preds
)
return
{
'structure_probs'
:
structure_probs
,
'loc_preds'
:
loc_preds
}
class
AttentionGRUCell
(
nn
.
Layer
):
def
__init__
(
self
,
input_size
,
hidden_size
,
num_embeddings
,
use_gru
=
False
):
super
(
AttentionGRUCell
,
self
).
__init__
()
self
.
i2h
=
nn
.
Linear
(
input_size
,
hidden_size
,
bias_attr
=
False
)
self
.
h2h
=
nn
.
Linear
(
hidden_size
,
hidden_size
)
self
.
score
=
nn
.
Linear
(
hidden_size
,
1
,
bias_attr
=
False
)
self
.
rnn
=
nn
.
GRUCell
(
input_size
=
input_size
+
num_embeddings
,
hidden_size
=
hidden_size
)
self
.
hidden_size
=
hidden_size
def
forward
(
self
,
prev_hidden
,
batch_H
,
char_onehots
):
batch_H_proj
=
self
.
i2h
(
batch_H
)
prev_hidden_proj
=
paddle
.
unsqueeze
(
self
.
h2h
(
prev_hidden
),
axis
=
1
)
res
=
paddle
.
add
(
batch_H_proj
,
prev_hidden_proj
)
res
=
paddle
.
tanh
(
res
)
e
=
self
.
score
(
res
)
alpha
=
F
.
softmax
(
e
,
axis
=
1
)
alpha
=
paddle
.
transpose
(
alpha
,
[
0
,
2
,
1
])
context
=
paddle
.
squeeze
(
paddle
.
mm
(
alpha
,
batch_H
),
axis
=
1
)
concat_context
=
paddle
.
concat
([
context
,
char_onehots
],
1
)
cur_hidden
=
self
.
rnn
(
concat_context
,
prev_hidden
)
return
cur_hidden
,
alpha
class
AttentionLSTM
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
hidden_size
,
**
kwargs
):
super
(
AttentionLSTM
,
self
).
__init__
()
self
.
input_size
=
in_channels
self
.
hidden_size
=
hidden_size
self
.
num_classes
=
out_channels
self
.
attention_cell
=
AttentionLSTMCell
(
in_channels
,
hidden_size
,
out_channels
,
use_gru
=
False
)
self
.
generator
=
nn
.
Linear
(
hidden_size
,
out_channels
)
def
_char_to_onehot
(
self
,
input_char
,
onehot_dim
):
input_ont_hot
=
F
.
one_hot
(
input_char
,
onehot_dim
)
return
input_ont_hot
def
forward
(
self
,
inputs
,
targets
=
None
,
batch_max_length
=
25
):
batch_size
=
inputs
.
shape
[
0
]
num_steps
=
batch_max_length
hidden
=
(
paddle
.
zeros
((
batch_size
,
self
.
hidden_size
)),
paddle
.
zeros
(
(
batch_size
,
self
.
hidden_size
)))
output_hiddens
=
[]
if
targets
is
not
None
:
for
i
in
range
(
num_steps
):
# one-hot vectors for a i-th char
char_onehots
=
self
.
_char_to_onehot
(
targets
[:,
i
],
onehot_dim
=
self
.
num_classes
)
hidden
,
alpha
=
self
.
attention_cell
(
hidden
,
inputs
,
char_onehots
)
hidden
=
(
hidden
[
1
][
0
],
hidden
[
1
][
1
])
output_hiddens
.
append
(
paddle
.
unsqueeze
(
hidden
[
0
],
axis
=
1
))
output
=
paddle
.
concat
(
output_hiddens
,
axis
=
1
)
probs
=
self
.
generator
(
output
)
else
:
targets
=
paddle
.
zeros
(
shape
=
[
batch_size
],
dtype
=
"int32"
)
probs
=
None
for
i
in
range
(
num_steps
):
char_onehots
=
self
.
_char_to_onehot
(
targets
,
onehot_dim
=
self
.
num_classes
)
hidden
,
alpha
=
self
.
attention_cell
(
hidden
,
inputs
,
char_onehots
)
probs_step
=
self
.
generator
(
hidden
[
0
])
hidden
=
(
hidden
[
1
][
0
],
hidden
[
1
][
1
])
if
probs
is
None
:
probs
=
paddle
.
unsqueeze
(
probs_step
,
axis
=
1
)
else
:
probs
=
paddle
.
concat
(
[
probs
,
paddle
.
unsqueeze
(
probs_step
,
axis
=
1
)],
axis
=
1
)
next_input
=
probs_step
.
argmax
(
axis
=
1
)
targets
=
next_input
return
probs
class
AttentionLSTMCell
(
nn
.
Layer
):
def
__init__
(
self
,
input_size
,
hidden_size
,
num_embeddings
,
use_gru
=
False
):
super
(
AttentionLSTMCell
,
self
).
__init__
()
self
.
i2h
=
nn
.
Linear
(
input_size
,
hidden_size
,
bias_attr
=
False
)
self
.
h2h
=
nn
.
Linear
(
hidden_size
,
hidden_size
)
self
.
score
=
nn
.
Linear
(
hidden_size
,
1
,
bias_attr
=
False
)
if
not
use_gru
:
self
.
rnn
=
nn
.
LSTMCell
(
input_size
=
input_size
+
num_embeddings
,
hidden_size
=
hidden_size
)
else
:
self
.
rnn
=
nn
.
GRUCell
(
input_size
=
input_size
+
num_embeddings
,
hidden_size
=
hidden_size
)
self
.
hidden_size
=
hidden_size
def
forward
(
self
,
prev_hidden
,
batch_H
,
char_onehots
):
batch_H_proj
=
self
.
i2h
(
batch_H
)
prev_hidden_proj
=
paddle
.
unsqueeze
(
self
.
h2h
(
prev_hidden
[
0
]),
axis
=
1
)
res
=
paddle
.
add
(
batch_H_proj
,
prev_hidden_proj
)
res
=
paddle
.
tanh
(
res
)
e
=
self
.
score
(
res
)
alpha
=
F
.
softmax
(
e
,
axis
=
1
)
alpha
=
paddle
.
transpose
(
alpha
,
[
0
,
2
,
1
])
context
=
paddle
.
squeeze
(
paddle
.
mm
(
alpha
,
batch_H
),
axis
=
1
)
concat_context
=
paddle
.
concat
([
context
,
char_onehots
],
1
)
cur_hidden
=
self
.
rnn
(
concat_context
,
prev_hidden
)
return
cur_hidden
,
alpha
ppocr/modeling/necks/__init__.py
0 → 100755
View file @
f1506916
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__
=
[
'build_neck'
]
def
build_neck
(
config
):
from
.db_fpn
import
DBFPN
,
RSEFPN
,
LKPAN
from
.east_fpn
import
EASTFPN
from
.sast_fpn
import
SASTFPN
from
.rnn
import
SequenceEncoder
from
.pg_fpn
import
PGFPN
from
.table_fpn
import
TableFPN
from
.fpn
import
FPN
from
.fce_fpn
import
FCEFPN
from
.pren_fpn
import
PRENFPN
support_dict
=
[
'FPN'
,
'FCEFPN'
,
'LKPAN'
,
'DBFPN'
,
'RSEFPN'
,
'EASTFPN'
,
'SASTFPN'
,
'SequenceEncoder'
,
'PGFPN'
,
'TableFPN'
,
'PRENFPN'
]
module_name
=
config
.
pop
(
'name'
)
assert
module_name
in
support_dict
,
Exception
(
'neck only support {}'
.
format
(
support_dict
))
module_class
=
eval
(
module_name
)(
**
config
)
return
module_class
ppocr/modeling/necks/__pycache__/__init__.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/db_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/east_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/fce_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/pg_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/pren_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/rnn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/sast_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
ppocr/modeling/necks/__pycache__/table_fpn.cpython-37.pyc
0 → 100644
View file @
f1506916
File added
Prev
1
…
12
13
14
15
16
17
18
19
20
…
22
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment