Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
4c226604
Commit
4c226604
authored
Sep 22, 2020
by
Chen Chen
Committed by
A. Unique TensorFlower
Sep 22, 2020
Browse files
Internal Change
PiperOrigin-RevId: 333163906
parent
4c693d66
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
7 deletions
+3
-7
official/nlp/modeling/networks/mobile_bert_encoder.py
official/nlp/modeling/networks/mobile_bert_encoder.py
+3
-7
No files found.
official/nlp/modeling/networks/mobile_bert_encoder.py
View file @
4c226604
...
@@ -127,7 +127,7 @@ class MobileBertEmbedding(tf.keras.layers.Layer):
...
@@ -127,7 +127,7 @@ class MobileBertEmbedding(tf.keras.layers.Layer):
self
.
dropout_rate
,
self
.
dropout_rate
,
name
=
'embedding_dropout'
)
name
=
'embedding_dropout'
)
def
call
(
self
,
input_ids
,
token_type_ids
=
None
,
training
=
False
):
def
call
(
self
,
input_ids
,
token_type_ids
=
None
):
word_embedding_out
=
self
.
word_embedding
(
input_ids
)
word_embedding_out
=
self
.
word_embedding
(
input_ids
)
word_embedding_out
=
tf
.
concat
(
word_embedding_out
=
tf
.
concat
(
[
tf
.
pad
(
word_embedding_out
[:,
1
:],
((
0
,
0
),
(
0
,
1
),
(
0
,
0
))),
[
tf
.
pad
(
word_embedding_out
[:,
1
:],
((
0
,
0
),
(
0
,
1
),
(
0
,
0
))),
...
@@ -142,7 +142,7 @@ class MobileBertEmbedding(tf.keras.layers.Layer):
...
@@ -142,7 +142,7 @@ class MobileBertEmbedding(tf.keras.layers.Layer):
type_embedding_out
=
self
.
type_embedding
(
token_type_ids
)
type_embedding_out
=
self
.
type_embedding
(
token_type_ids
)
embedding_out
+=
type_embedding_out
embedding_out
+=
type_embedding_out
embedding_out
=
self
.
layer_norm
(
embedding_out
)
embedding_out
=
self
.
layer_norm
(
embedding_out
)
embedding_out
=
self
.
dropout_layer
(
embedding_out
,
training
=
training
)
embedding_out
=
self
.
dropout_layer
(
embedding_out
)
return
embedding_out
return
embedding_out
...
@@ -300,7 +300,6 @@ class TransformerLayer(tf.keras.layers.Layer):
...
@@ -300,7 +300,6 @@ class TransformerLayer(tf.keras.layers.Layer):
def
call
(
self
,
def
call
(
self
,
input_tensor
,
input_tensor
,
attention_mask
=
None
,
attention_mask
=
None
,
training
=
False
,
return_attention_scores
=
False
):
return_attention_scores
=
False
):
"""Implementes the forward pass.
"""Implementes the forward pass.
...
@@ -309,7 +308,6 @@ class TransformerLayer(tf.keras.layers.Layer):
...
@@ -309,7 +308,6 @@ class TransformerLayer(tf.keras.layers.Layer):
attention_mask: (optional) int32 tensor of shape [batch_size, seq_length,
attention_mask: (optional) int32 tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
positions that should not be.
training: If the model is in training mode.
return_attention_scores: If return attention score.
return_attention_scores: If return attention score.
Returns:
Returns:
...
@@ -326,7 +324,6 @@ class TransformerLayer(tf.keras.layers.Layer):
...
@@ -326,7 +324,6 @@ class TransformerLayer(tf.keras.layers.Layer):
f
'hidden size
{
self
.
hidden_size
}
'
))
f
'hidden size
{
self
.
hidden_size
}
'
))
prev_output
=
input_tensor
prev_output
=
input_tensor
# input bottleneck
# input bottleneck
dense_layer
=
self
.
block_layers
[
'bottleneck_input'
][
0
]
dense_layer
=
self
.
block_layers
[
'bottleneck_input'
][
0
]
layer_norm
=
self
.
block_layers
[
'bottleneck_input'
][
1
]
layer_norm
=
self
.
block_layers
[
'bottleneck_input'
][
1
]
...
@@ -355,7 +352,6 @@ class TransformerLayer(tf.keras.layers.Layer):
...
@@ -355,7 +352,6 @@ class TransformerLayer(tf.keras.layers.Layer):
key_tensor
,
key_tensor
,
attention_mask
,
attention_mask
,
return_attention_scores
=
True
,
return_attention_scores
=
True
,
training
=
training
)
)
attention_output
=
layer_norm
(
attention_output
+
layer_input
)
attention_output
=
layer_norm
(
attention_output
+
layer_input
)
...
@@ -375,7 +371,7 @@ class TransformerLayer(tf.keras.layers.Layer):
...
@@ -375,7 +371,7 @@ class TransformerLayer(tf.keras.layers.Layer):
dropout_layer
=
self
.
block_layers
[
'bottleneck_output'
][
1
]
dropout_layer
=
self
.
block_layers
[
'bottleneck_output'
][
1
]
layer_norm
=
self
.
block_layers
[
'bottleneck_output'
][
2
]
layer_norm
=
self
.
block_layers
[
'bottleneck_output'
][
2
]
layer_output
=
bottleneck
(
layer_output
)
layer_output
=
bottleneck
(
layer_output
)
layer_output
=
dropout_layer
(
layer_output
,
training
=
training
)
layer_output
=
dropout_layer
(
layer_output
)
layer_output
=
layer_norm
(
layer_output
+
prev_output
)
layer_output
=
layer_norm
(
layer_output
+
prev_output
)
if
return_attention_scores
:
if
return_attention_scores
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment