Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
a26e4649
Commit
a26e4649
authored
Feb 24, 2022
by
A. Unique TensorFlower
Browse files
Internal change
PiperOrigin-RevId: 430772605
parent
4585ff77
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
21 additions
and
8 deletions
+21
-8
official/projects/vit/modeling/nn_blocks.py
official/projects/vit/modeling/nn_blocks.py
+21
-8
No files found.
official/projects/vit/modeling/nn_blocks.py
View file @
a26e4649
...
@@ -22,10 +22,15 @@ from official.vision.beta.modeling.layers.nn_layers import StochasticDepth
...
@@ -22,10 +22,15 @@ from official.vision.beta.modeling.layers.nn_layers import StochasticDepth
class
TransformerEncoderBlock
(
modeling
.
layers
.
TransformerEncoderBlock
):
class
TransformerEncoderBlock
(
modeling
.
layers
.
TransformerEncoderBlock
):
"""TransformerEncoderBlock layer with stochastic depth."""
"""TransformerEncoderBlock layer with stochastic depth."""
def
__init__
(
self
,
*
args
,
stochastic_depth_drop_rate
=
0.0
,
**
kwargs
):
def
__init__
(
self
,
*
args
,
stochastic_depth_drop_rate
=
0.0
,
return_attention
=
False
,
**
kwargs
):
"""Initializes TransformerEncoderBlock."""
"""Initializes TransformerEncoderBlock."""
super
().
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
_stochastic_depth_drop_rate
=
stochastic_depth_drop_rate
self
.
_stochastic_depth_drop_rate
=
stochastic_depth_drop_rate
self
.
_return_attention
=
return_attention
def
build
(
self
,
input_shape
):
def
build
(
self
,
input_shape
):
if
self
.
_stochastic_depth_drop_rate
:
if
self
.
_stochastic_depth_drop_rate
:
...
@@ -73,8 +78,9 @@ class TransformerEncoderBlock(modeling.layers.TransformerEncoderBlock):
...
@@ -73,8 +78,9 @@ class TransformerEncoderBlock(modeling.layers.TransformerEncoderBlock):
if
key_value
is
None
:
if
key_value
is
None
:
key_value
=
input_tensor
key_value
=
input_tensor
attention_output
=
self
.
_attention_layer
(
attention_output
,
attention_scores
=
self
.
_attention_layer
(
query
=
target_tensor
,
value
=
key_value
,
attention_mask
=
attention_mask
)
query
=
target_tensor
,
value
=
key_value
,
attention_mask
=
attention_mask
,
return_attention_scores
=
True
)
attention_output
=
self
.
_attention_dropout
(
attention_output
)
attention_output
=
self
.
_attention_dropout
(
attention_output
)
if
self
.
_norm_first
:
if
self
.
_norm_first
:
...
@@ -95,12 +101,19 @@ class TransformerEncoderBlock(modeling.layers.TransformerEncoderBlock):
...
@@ -95,12 +101,19 @@ class TransformerEncoderBlock(modeling.layers.TransformerEncoderBlock):
layer_output
=
self
.
_output_dropout
(
layer_output
)
layer_output
=
self
.
_output_dropout
(
layer_output
)
if
self
.
_norm_first
:
if
self
.
_norm_first
:
return
source_attention_output
+
self
.
_stochastic_depth
(
if
self
.
_return_attention
:
layer_output
,
training
=
training
)
return
source_attention_output
+
self
.
_stochastic_depth
(
layer_output
,
training
=
training
),
attention_scores
else
:
return
source_attention_output
+
self
.
_stochastic_depth
(
layer_output
,
training
=
training
)
# During mixed precision training, layer norm output is always fp32 for now.
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
# Casts fp32 for the subsequent add.
layer_output
=
tf
.
cast
(
layer_output
,
tf
.
float32
)
layer_output
=
tf
.
cast
(
layer_output
,
tf
.
float32
)
return
self
.
_output_layer_norm
(
if
self
.
_return_attention
:
layer_output
+
return
self
.
_output_layer_norm
(
layer_output
+
self
.
_stochastic_depth
(
self
.
_stochastic_depth
(
attention_output
,
training
=
training
))
attention_output
,
training
=
training
)),
attention_scores
else
:
return
self
.
_output_layer_norm
(
layer_output
+
self
.
_stochastic_depth
(
attention_output
,
training
=
training
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment