Unverified Commit dfcc691c authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'master' into panoptic-deeplab

parents 83b87f05 a9d9e633
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for Mixture of Experts (MoE) routing.
For MoE routing, we need to separate a set of tokens to sets of tokens.
Later on, different sets of tokens can potentially go to different experts.
"""
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="Text")
class TokenImportanceWithMovingAvg(tf.keras.layers.Layer):
"""Routing based on per-token importance value."""
def __init__(self,
vocab_size,
init_importance,
moving_average_beta=0.995,
**kwargs):
self._vocab_size = vocab_size
self._init_importance = init_importance
self._moving_average_beta = moving_average_beta
super(TokenImportanceWithMovingAvg, self).__init__(**kwargs)
def build(self, input_shape):
self._importance_embedding = self.add_weight(
name="importance_embed",
shape=(self._vocab_size),
initializer=tf.keras.initializers.Constant(self._init_importance),
trainable=False)
def get_config(self):
config = {
"vocab_size":
self._vocab_size,
"init_importance":
self._init_importance,
"moving_average_beta":
self._moving_average_beta,
}
base_config = super(TokenImportanceWithMovingAvg, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def update_token_importance(self, token_ids, importance):
token_ids = tf.reshape(token_ids, shape=[-1])
importance = tf.reshape(importance, shape=[-1])
beta = self._moving_average_beta
old_importance = tf.gather(self._importance_embedding, token_ids)
self._importance_embedding.assign(tf.tensor_scatter_nd_update(
self._importance_embedding,
tf.expand_dims(token_ids, axis=1),
old_importance * beta + tf.cast(importance * (1.0 - beta),
dtype=tf.float32)))
def call(self, inputs):
return tf.gather(self._importance_embedding, inputs)
@tf.keras.utils.register_keras_serializable(package="Text")
class SelectTopK(tf.keras.layers.Layer):
"""Select top-k + random-k tokens according to importance."""
def __init__(self,
top_k=None,
random_k=None,
**kwargs):
self._top_k = top_k
self._random_k = random_k
super(SelectTopK, self).__init__(**kwargs)
def get_config(self):
config = {
"top_k":
self._top_k,
"random_k":
self._random_k,
}
base_config = super(SelectTopK, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if self._random_k is None:
# Pure top-k, not randomness.
pos = tf.argsort(inputs, direction="DESCENDING")
selected = tf.slice(pos, [0, 0], [-1, self._top_k])
not_selected = tf.slice(pos, [0, self._top_k], [-1, -1])
elif self._top_k is None:
# Pure randomness, no top-k.
pos = tf.argsort(tf.random.uniform(shape=tf.shape(inputs)),
direction="DESCENDING")
selected = tf.slice(pos, [0, 0], [-1, self._random_k])
not_selected = tf.slice(pos, [0, self._random_k], [-1, -1])
else:
# Top-k plus randomness.
pos = tf.argsort(inputs, direction="DESCENDING")
selected_top_k = tf.slice(pos, [0, 0], [-1, self._top_k])
pos_left = tf.slice(pos, [0, self._top_k], [-1, -1])
# Randomly shuffle pos_left
sort_index = tf.argsort(
tf.random.uniform(shape=tf.shape(pos_left)),
direction="DESCENDING")
pos_left = tf.gather(pos_left, sort_index, batch_dims=1, axis=1)
selected_rand = tf.slice(pos_left, [0, 0], [-1, self._random_k])
not_selected = tf.slice(pos_left, [0, self._random_k], [-1, -1])
selected = tf.concat([selected_top_k, selected_rand], axis=1)
# Return the indices of selected and not-selected tokens.
return selected, not_selected
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for routing."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import routing
class TokenImportanceTest(tf.test.TestCase, parameterized.TestCase):
def test_token_importance(self):
token_importance_embed = routing.TokenImportanceWithMovingAvg(
vocab_size=4,
init_importance=10.0,
moving_average_beta=0.995)
importance = token_importance_embed(np.array([[0, 1], [2, 3]]))
self.assertAllClose(importance, np.array([[10.0, 10.0], [10.0, 10.0]]))
token_importance_embed.update_token_importance(
token_ids=np.array([[0, 1]]),
importance=np.array([[0.0, 0.0]]))
importance = token_importance_embed(np.array([[0, 1], [2, 3]]))
self.assertAllClose(importance, np.array([[9.95, 9.95], [10.0, 10.0]]))
class TopKSelectionTest(tf.test.TestCase, parameterized.TestCase):
def test_top_k_selection(self):
token_selection = routing.SelectTopK(top_k=2)
selected, _ = token_selection(np.array([[0, 1, 2, 3], [4, 3, 2, 1]]))
self.assertAllClose(selected, np.array([[3, 2], [0, 1]]))
def test_random_k_selection(self):
token_selection = routing.SelectTopK(random_k=2)
selected, _ = token_selection(np.array([[0, 1, 2, 3], [4, 3, 2, 1]]))
self.assertAllClose(selected.shape, (2, 2))
def test_top_k_random_k(self):
token_selection = routing.SelectTopK(top_k=1, random_k=1)
selected, _ = token_selection(np.array([[0, 1, 2, 3], [4, 3, 2, 1]]))
self.assertAllClose(selected.shape, (2, 2))
if __name__ == "__main__":
tf.test.main()
...@@ -54,9 +54,31 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -54,9 +54,31 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
inner_dropout=0.0, inner_dropout=0.0,
attention_initializer=None, attention_initializer=None,
attention_axes=None, attention_axes=None,
use_query_residual=True,
key_dim=None,
value_dim=None,
output_last_dim=None,
diff_q_kv_att_layer_norm=False,
**kwargs): **kwargs):
"""Initializes `TransformerEncoderBlock`. """Initializes `TransformerEncoderBlock`.
Note: If `output_last_dim` is used and `use_query_residual` is `True`, the
`output_last_dim`'s value must equal the first input's last dimension for
the query residual connection to work. This is because the residual
connection after the multi-head-attention requires their dimensions to
match. If `use_query_residual` is `False`, the `output_last_dim` dictactes
the last dimension of the output of this module and the
multi-head-attention.
E.g. let's say input dims are `[batch_size, seq_dim, input_last_dim]`.
Scenario 1: If `output_last_dim` is not `None`, then the output dims of this
module would be `[batch_size, seq_dim, output_last_dim]`. Note `key_dim` is
is overriden by `output_last_dim`.
Scenario 2: If `output_last_dim` is `None` and `key_dim` is not `None`, then
the output dims of this module would be `[batch_size, seq_dim, key_dim]`.
Scenario 3: If the `output_last_dim` and `key_dim` are both `None`, the
output dims would be `[batch_size, seq_dim, input_last_dim]`.
Args: Args:
num_attention_heads: Number of attention heads. num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer inner_dim: The output dimension of the first Dense layer in a two-layer
...@@ -88,6 +110,18 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -88,6 +110,18 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
kernel. kernel.
attention_axes: axes over which the attention is applied. `None` means attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features. attention over all axes, but batch, heads, and features.
use_query_residual: Toggle to execute residual connection after attention.
key_dim: `key_dim` for the `tf.keras.layers.MultiHeadAttention`. If
`None`, we use the first `input_shape`'s last dim.
value_dim: `value_dim` for the `tf.keras.layers.MultiHeadAttention`.
output_last_dim: Final dimension of the output of this module. This also
dictates the value for the final dimension of the
multi-head-attention. When it's `None`, we use, in order of decreasing
precedence, `key_dim` * `num_heads` or the first `input_shape`'s last
dim as the output's last dim.
diff_q_kv_att_layer_norm: If `True`, create a separate attention layer
norm layer for query and key-value if `norm_first` is `True`. Invalid
to set to `True` if `norm_first` is `False`.
**kwargs: keyword arguments. **kwargs: keyword arguments.
""" """
util.filter_kwargs(kwargs) util.filter_kwargs(kwargs)
...@@ -112,6 +146,11 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -112,6 +146,11 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
self._norm_first = norm_first self._norm_first = norm_first
self._norm_epsilon = norm_epsilon self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout self._inner_dropout = inner_dropout
self._use_query_residual = use_query_residual
self._key_dim = key_dim
self._value_dim = value_dim
self._output_last_dim = output_last_dim
self._diff_q_kv_att_layer_norm = diff_q_kv_att_layer_norm
if attention_initializer: if attention_initializer:
self._attention_initializer = tf.keras.initializers.get( self._attention_initializer = tf.keras.initializers.get(
attention_initializer) attention_initializer)
...@@ -119,6 +158,10 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -119,6 +158,10 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
self._attention_initializer = self._kernel_initializer self._attention_initializer = self._kernel_initializer
self._attention_axes = attention_axes self._attention_axes = attention_axes
if self._diff_q_kv_att_layer_norm and not self._norm_first:
raise ValueError("Setting `diff_q_and_kv_attention_layer_norm` to True"
"when `norm_first` is False is invalid.")
def build(self, input_shape): def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape): if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape input_tensor_shape = input_shape
...@@ -136,7 +179,13 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -136,7 +179,13 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
raise ValueError( raise ValueError(
"The input size (%d) is not a multiple of the number of attention " "The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads)) "heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads) if self._key_dim is None:
self._key_dim = int(hidden_size // self._num_heads)
if self._output_last_dim is None:
last_output_shape = hidden_size
else:
last_output_shape = self._output_last_dim
common_kwargs = dict( common_kwargs = dict(
bias_initializer=self._bias_initializer, bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer, kernel_regularizer=self._kernel_regularizer,
...@@ -146,11 +195,13 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -146,11 +195,13 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
bias_constraint=self._bias_constraint) bias_constraint=self._bias_constraint)
self._attention_layer = tf.keras.layers.MultiHeadAttention( self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads, num_heads=self._num_heads,
key_dim=self._attention_head_size, key_dim=self._key_dim,
value_dim=self._value_dim,
dropout=self._attention_dropout, dropout=self._attention_dropout,
use_bias=self._use_bias, use_bias=self._use_bias,
kernel_initializer=self._attention_initializer, kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes, attention_axes=self._attention_axes,
output_shape=self._output_last_dim,
name="self_attention", name="self_attention",
**common_kwargs) **common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout) self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
...@@ -162,6 +213,15 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -162,6 +213,15 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
axis=-1, axis=-1,
epsilon=self._norm_epsilon, epsilon=self._norm_epsilon,
dtype=tf.float32)) dtype=tf.float32))
self._attention_layer_norm_kv = self._attention_layer_norm
if self._diff_q_kv_att_layer_norm:
self._attention_layer_norm_kv = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm_kv",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.experimental.EinsumDense( self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(
einsum_equation, einsum_equation,
output_shape=(None, self._inner_dim), output_shape=(None, self._inner_dim),
...@@ -181,7 +241,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -181,7 +241,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
rate=self._inner_dropout) rate=self._inner_dropout)
self._output_dense = tf.keras.layers.experimental.EinsumDense( self._output_dense = tf.keras.layers.experimental.EinsumDense(
einsum_equation, einsum_equation,
output_shape=(None, hidden_size), output_shape=(None, last_output_shape),
bias_axes="d", bias_axes="d",
name="output", name="output",
kernel_initializer=self._kernel_initializer, kernel_initializer=self._kernel_initializer,
...@@ -235,6 +295,16 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -235,6 +295,16 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
"attention_initializer": "attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer), tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes": self._attention_axes, "attention_axes": self._attention_axes,
"use_query_residual":
self._use_query_residual,
"key_dim":
self._key_dim,
"value_dim":
self._value_dim,
"output_last_dim":
self._output_last_dim,
"diff_q_kv_att_layer_norm":
self._diff_q_kv_att_layer_norm,
} }
base_config = super(TransformerEncoderBlock, self).get_config() base_config = super(TransformerEncoderBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
...@@ -271,7 +341,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -271,7 +341,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
source_tensor = input_tensor[:, 0:self._output_range, :] source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor) input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None: if key_value is not None:
key_value = self._attention_layer_norm(key_value) key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :] target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None: if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :] attention_mask = attention_mask[:, 0:self._output_range, :]
...@@ -280,7 +350,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -280,7 +350,7 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
source_tensor = input_tensor source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor) input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None: if key_value is not None:
key_value = self._attention_layer_norm(key_value) key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor target_tensor = input_tensor
if key_value is None: if key_value is None:
...@@ -288,11 +358,18 @@ class TransformerEncoderBlock(tf.keras.layers.Layer): ...@@ -288,11 +358,18 @@ class TransformerEncoderBlock(tf.keras.layers.Layer):
attention_output = self._attention_layer( attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask) query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output) attention_output = self._attention_dropout(attention_output)
if self._norm_first: if self._norm_first:
attention_output = source_tensor + attention_output # Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else: else:
attention_output = self._attention_layer_norm(target_tensor + if self._use_query_residual:
attention_output) attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first: if self._norm_first:
source_attention_output = attention_output source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output) attention_output = self._output_layer_norm(attention_output)
......
...@@ -252,6 +252,182 @@ class TransformerEncoderBlockLayerTest(keras_parameterized.TestCase): ...@@ -252,6 +252,182 @@ class TransformerEncoderBlockLayerTest(keras_parameterized.TestCase):
self.assertEqual(output.shape, q_tensor.shape) self.assertEqual(output.shape, q_tensor.shape)
@keras_parameterized.run_all_keras_modes
class TransformerEncoderBlockLayerTestWithoutParams(
keras_parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockLayerTestWithoutParams, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_raises_invalid_arg_error_when_q_kv_dims_are_different(self):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=True)
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
with self.assertRaises(tf.errors.InvalidArgumentError):
test_layer(inputs)
@parameterized.named_parameters(
('output_range_not_none', 2),
('output_range_none', None))
def test_needs_diff_q_kv_att_layer_norm_to_be_true_for_diff_q_and_kv_dims(
self,
output_range):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
output_range=output_range,
norm_first=True)
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
with self.assertRaises(tf.errors.InvalidArgumentError):
test_layer(inputs)
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
diff_q_kv_att_layer_norm=True,
norm_first=True)
# Forward path.
test_layer(inputs)
@parameterized.named_parameters(
('norm_first_is_true', True),
('norm_first_is_false', False))
def test_use_query_residual_false_removes_add_op(self, norm_first):
graph_with_res = tf.Graph()
with graph_with_res.as_default():
layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=norm_first)
inputs = tf.keras.Input(shape=(None, None, 2))
outputs = layer(inputs)
tf.keras.Model(inputs=inputs, outputs=outputs)
graph_without_res = tf.Graph()
with graph_without_res.as_default():
layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=norm_first,
use_query_residual=False)
inputs = tf.keras.Input(shape=(None, None, 2))
outputs = layer(inputs)
tf.keras.Model(inputs=inputs, outputs=outputs)
graph_with_res_names = {x.name for x in graph_with_res.get_operations()}
graph_without_res_names = {
x.name for x in graph_without_res.get_operations()
}
self.assertIn('transformer_encoder_block/add',
list(graph_with_res_names - graph_without_res_names)[0])
self.assertEmpty(graph_without_res_names - graph_with_res_names)
@parameterized.named_parameters(
('key_dim_is_none', None, 128, 2, 128 // 2),
('key_dim_is_not_none', 30, 128, 2, 30))
def test_key_dim(
self,
key_dim,
q_tensor_last_dim,
some_num_attention_heads,
expected):
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
key_dim=key_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(
expected,
test_layer._attention_layer.get_config()['key_dim'])
@parameterized.named_parameters(
('output_last_dim_is_none_use_query_residual_false',
False,
None,
128,
128),
('output_last_dim_is_none_use_query_residual_true',
True,
None,
128,
128),
('output_last_dim_is_not_none', False, 30, 128, 30))
def test_output_last_dim(
self,
use_query_residual,
output_last_dim,
q_tensor_last_dim,
expected):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
# Must be false for multi-head output to be different from
# first input's last dim
use_query_residual=use_query_residual,
output_last_dim=output_last_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
output = test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(output.numpy().shape[-1], expected)
@parameterized.named_parameters(
('value_dim_is_none', None, 128, 2, 128 // 2),
('value_dim_is_not_none', 30, 128, 2, 30))
def test_value_dim(
self,
value_dim,
q_tensor_last_dim,
some_num_attention_heads,
expected):
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
value_dim=value_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(
expected,
test_layer._attention_layer.get_config()['value_dim'])
@keras_parameterized.run_all_keras_modes @keras_parameterized.run_all_keras_modes
class TransformerArgumentTest(keras_parameterized.TestCase): class TransformerArgumentTest(keras_parameterized.TestCase):
...@@ -277,6 +453,138 @@ class TransformerArgumentTest(keras_parameterized.TestCase): ...@@ -277,6 +453,138 @@ class TransformerArgumentTest(keras_parameterized.TestCase):
output = encoder_block(inputs) output = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size)) self.assertEqual(output.shape, (2, 4, hidden_size))
def test_norm_first_false_and_diff_q_kv_att_layer_norm_true_raises(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
with self.assertRaises(ValueError):
TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=False,
diff_q_kv_att_layer_norm=True)
def test_diff_q_kv_att_layer_norm_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=False)
self.assertIn('diff_q_kv_att_layer_norm', encoder.get_config())
self.assertFalse(encoder.get_config()['diff_q_kv_att_layer_norm'])
def test_diff_q_kv_att_layer_norm_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=True,
diff_q_kv_att_layer_norm=True)
self.assertIn('diff_q_kv_att_layer_norm', encoder.get_config())
self.assertTrue(encoder.get_config()['diff_q_kv_att_layer_norm'])
def test_use_query_residual_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('use_query_residual', encoder.get_config())
self.assertTrue(encoder.get_config()['use_query_residual'])
def test_use_query_residual_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
use_query_residual=False)
self.assertIn('use_query_residual', encoder.get_config())
self.assertFalse(encoder.get_config()['use_query_residual'])
def test_key_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('key_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['key_dim'])
def test_key_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
key_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
key_dim=key_dim)
self.assertIn('key_dim', encoder.get_config())
self.assertEqual(key_dim, encoder.get_config()['key_dim'])
def test_value_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('value_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['value_dim'])
def test_value_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
value_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
value_dim=value_dim)
self.assertIn('value_dim', encoder.get_config())
self.assertEqual(value_dim, encoder.get_config()['value_dim'])
def test_output_last_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('output_last_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['output_last_dim'])
def test_output_last_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
output_last_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
output_last_dim=output_last_dim)
self.assertIn('output_last_dim', encoder.get_config())
self.assertEqual(output_last_dim, encoder.get_config()['output_last_dim'])
def test_get_config(self): def test_get_config(self):
num_attention_heads = 2 num_attention_heads = 2
encoder_block = TransformerEncoderBlock( encoder_block = TransformerEncoderBlock(
...@@ -290,7 +598,12 @@ class TransformerArgumentTest(keras_parameterized.TestCase): ...@@ -290,7 +598,12 @@ class TransformerArgumentTest(keras_parameterized.TestCase):
norm_epsilon=1e-6, norm_epsilon=1e-6,
inner_dropout=0.1, inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform( attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.)) minval=0., maxval=1.),
use_query_residual=False,
key_dim=20,
value_dim=30,
output_last_dim=40,
diff_q_kv_att_layer_norm=True)
encoder_block_config = encoder_block.get_config() encoder_block_config = encoder_block.get_config()
new_encoder_block = TransformerEncoderBlock.from_config( new_encoder_block = TransformerEncoderBlock.from_config(
encoder_block_config) encoder_block_config)
......
...@@ -199,6 +199,7 @@ class BertPretrainerV2(tf.keras.Model): ...@@ -199,6 +199,7 @@ class BertPretrainerV2(tf.keras.Model):
self._config = { self._config = {
'encoder_network': encoder_network, 'encoder_network': encoder_network,
'mlm_initializer': mlm_initializer, 'mlm_initializer': mlm_initializer,
'mlm_activation': mlm_activation,
'classification_heads': classification_heads, 'classification_heads': classification_heads,
'name': name, 'name': name,
} }
......
...@@ -1234,7 +1234,7 @@ class Decoder(Module): ...@@ -1234,7 +1234,7 @@ class Decoder(Module):
encoded: the encoder outputs. encoded: the encoder outputs.
decoder_mask: the decoder self-attention mask. decoder_mask: the decoder self-attention mask.
encoder_decoder_mask: the cross-attention mask. encoder_decoder_mask: the cross-attention mask.
decode: Whether to perform autoaggressive decoding. decode: Whether to perform autoregressive decoding.
decode_position: integer, the position to decode. decode_position: integer, the position to decode.
cache: The cache dictionary of key, value tensors. cache: The cache dictionary of key, value tensors.
max_decode_len: An optional integer specifying the maximum decoding max_decode_len: An optional integer specifying the maximum decoding
......
...@@ -43,6 +43,7 @@ ctpu up --name <tpu-name> --zone <zone> --tpu-size=v3-32 --tf-version nightly ...@@ -43,6 +43,7 @@ ctpu up --name <tpu-name> --zone <zone> --tpu-size=v3-32 --tf-version nightly
This model requires TF version `>= 2.5`. Currently, that is only available via a This model requires TF version `>= 2.5`. Currently, that is only available via a
`nightly` build on Cloud. `nightly` build on Cloud.
### Install requirements ### Install requirements
SSH into the TPU host with `gcloud compute ssh <tpu-name>` and execute the SSH into the TPU host with `gcloud compute ssh <tpu-name>` and execute the
...@@ -60,7 +61,7 @@ The configurations can be found in the `configs/experiments` directory. You can ...@@ -60,7 +61,7 @@ The configurations can be found in the `configs/experiments` directory. You can
launch a training job by executing. launch a training job by executing.
```shell ```shell
$ export CONFIG=./official/vision/beta/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_voc_r50.yaml $ export CONFIG=./official/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_voc_r50.yaml
$ export MODEL_DIR="gs://<path-for-checkpoints>" $ export MODEL_DIR="gs://<path-for-checkpoints>"
$ export ANNOTAION_FILE="gs://<path-to-coco-annotation-json>" $ export ANNOTAION_FILE="gs://<path-to-coco-annotation-json>"
$ export TRAIN_DATA="gs://<path-to-train-data>" $ export TRAIN_DATA="gs://<path-to-train-data>"
...@@ -71,7 +72,7 @@ task.train_data.input_path=${TRAIN_DATA},\ ...@@ -71,7 +72,7 @@ task.train_data.input_path=${TRAIN_DATA},\
task.annotation_file=${ANNOTAION_FILE},\ task.annotation_file=${ANNOTAION_FILE},\
runtime.distribution_strategy=tpu" runtime.distribution_strategy=tpu"
$ python3 -m official.vision.beta.projects.deepmac_maskrcnn.train \ $ python3 -m official.projects.deepmac_maskrcnn.train \
--logtostderr \ --logtostderr \
--mode=train_and_eval \ --mode=train_and_eval \
--experiment=deep_mask_head_rcnn_resnetfpn_coco \ --experiment=deep_mask_head_rcnn_resnetfpn_coco \
......
...@@ -15,4 +15,4 @@ ...@@ -15,4 +15,4 @@
"""Imports to configure Mask R-CNN with deep mask heads.""" """Imports to configure Mask R-CNN with deep mask heads."""
# pylint: disable=unused-import # pylint: disable=unused-import
from official.vision.beta.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import tensorflow as tf import tensorflow as tf
from official.vision.beta.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn
class DeepMaskHeadRcnnConfigTest(tf.test.TestCase): class DeepMaskHeadRcnnConfigTest(tf.test.TestCase):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment