Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
9a3e9518
Commit
9a3e9518
authored
May 27, 2022
by
Scott Zhu
Committed by
A. Unique TensorFlower
May 27, 2022
Browse files
Prepare for upcoming keras initializer change.
PiperOrigin-RevId: 451474703
parent
a945ae9d
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
16 additions
and
11 deletions
+16
-11
official/projects/detr/modeling/transformer.py
official/projects/detr/modeling/transformer.py
+16
-11
No files found.
official/projects/detr/modeling/transformer.py
View file @
9a3e9518
...
...
@@ -20,6 +20,7 @@ cross-attention layer.
import
tensorflow
as
tf
from
official.modeling
import
tf_utils
from
official.nlp.modeling
import
layers
from
official.nlp.modeling
import
models
...
...
@@ -91,8 +92,9 @@ class TransformerEncoder(tf.keras.layers.Layer):
norm_first
=
self
.
_norm_first
,
norm_epsilon
=
self
.
_norm_epsilon
,
inner_dropout
=
self
.
_intermediate_dropout
,
attention_initializer
=
models
.
seq2seq_transformer
.
attention_initializer
(
input_shape
[
2
]),
attention_initializer
=
tf_utils
.
clone_initializer
(
models
.
seq2seq_transformer
.
attention_initializer
(
input_shape
[
2
])),
name
=
(
"layer_%d"
%
i
)))
self
.
output_normalization
=
tf
.
keras
.
layers
.
LayerNormalization
(
epsilon
=
self
.
_norm_epsilon
,
dtype
=
"float32"
)
...
...
@@ -234,7 +236,8 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
self
.
_attention_initializer
=
tf
.
keras
.
initializers
.
get
(
attention_initializer
)
else
:
self
.
_attention_initializer
=
self
.
_kernel_initializer
self
.
_attention_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
self
.
_attention_axes
=
attention_axes
def
build
(
self
,
input_shape
):
...
...
@@ -284,7 +287,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
einsum_equation
,
output_shape
=
(
None
,
self
.
_inner_dim
),
bias_axes
=
"d"
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
,
name
=
"intermediate"
,
**
common_kwargs
)
policy
=
tf
.
keras
.
mixed_precision
.
global_policy
()
...
...
@@ -302,7 +305,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
output_shape
=
(
None
,
hidden_size
),
bias_axes
=
"d"
,
name
=
"output"
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
,
**
common_kwargs
)
self
.
_output_dropout
=
tf
.
keras
.
layers
.
Dropout
(
rate
=
self
.
_output_dropout
)
# Use float32 in layernorm for numeric stability.
...
...
@@ -490,8 +493,9 @@ class TransformerDecoder(tf.keras.layers.Layer):
norm_first
=
self
.
_norm_first
,
norm_epsilon
=
self
.
_norm_epsilon
,
intermediate_dropout
=
self
.
_intermediate_dropout
,
attention_initializer
=
models
.
seq2seq_transformer
.
attention_initializer
(
input_shape
[
2
]),
attention_initializer
=
tf_utils
.
clone_initializer
(
models
.
seq2seq_transformer
.
attention_initializer
(
input_shape
[
2
])),
name
=
(
"layer_%d"
%
i
)))
self
.
output_normalization
=
tf
.
keras
.
layers
.
LayerNormalization
(
epsilon
=
self
.
_norm_epsilon
,
dtype
=
"float32"
)
...
...
@@ -656,7 +660,8 @@ class TransformerDecoderBlock(tf.keras.layers.Layer):
self
.
_attention_initializer
=
tf
.
keras
.
initializers
.
get
(
attention_initializer
)
else
:
self
.
_attention_initializer
=
self
.
_kernel_initializer
self
.
_attention_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
self
.
_cross_attention_cls
=
layers
.
attention
.
MultiHeadAttention
def
build
(
self
,
input_shape
):
...
...
@@ -690,7 +695,7 @@ class TransformerDecoderBlock(tf.keras.layers.Layer):
"abc,cd->abd"
,
output_shape
=
(
None
,
hidden_size
),
bias_axes
=
"d"
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
,
name
=
"output"
,
**
common_kwargs
)
self
.
self_attention_dropout
=
tf
.
keras
.
layers
.
Dropout
(
...
...
@@ -726,7 +731,7 @@ class TransformerDecoderBlock(tf.keras.layers.Layer):
"abc,cd->abd"
,
output_shape
=
(
None
,
self
.
intermediate_size
),
bias_axes
=
"d"
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
,
name
=
"intermediate"
,
**
common_kwargs
)
self
.
intermediate_activation_layer
=
tf
.
keras
.
layers
.
Activation
(
...
...
@@ -737,7 +742,7 @@ class TransformerDecoderBlock(tf.keras.layers.Layer):
"abc,cd->abd"
,
output_shape
=
(
None
,
hidden_size
),
bias_axes
=
"d"
,
kernel_initializer
=
self
.
_kernel_initializer
,
kernel_initializer
=
tf_utils
.
clone_initializer
(
self
.
_kernel_initializer
)
,
name
=
"output"
,
**
common_kwargs
)
self
.
output_dropout
=
tf
.
keras
.
layers
.
Dropout
(
rate
=
self
.
dropout_rate
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment