attention_test.py 10.7 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the attention layer."""

Hongkun Yu's avatar
Hongkun Yu committed
17
from absl.testing import parameterized
Hongkun Yu's avatar
Hongkun Yu committed
18
19
20
21
22
23
24
25
26
27
import numpy as np
import tensorflow as tf

from tensorflow.python.keras import keras_parameterized  # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import attention


# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
28
class MultiHeadAttentionTest(keras_parameterized.TestCase):
Hongkun Yu's avatar
Hongkun Yu committed
29

Hongkun Yu's avatar
Hongkun Yu committed
30
31
32
33
34
  @parameterized.named_parameters(
      ("key_value_same_proj", None, None, [40, 80]),
      ("key_value_different_proj", 32, 60, [40, 60]),
  )
  def test_non_masked_attention(self, value_size, output_shape, output_dims):
Hongkun Yu's avatar
Hongkun Yu committed
35
    """Test that the attention layer can be created without a mask tensor."""
Hongkun Yu's avatar
Hongkun Yu committed
36
37
38
39
40
    test_layer = attention.MultiHeadAttention(
        num_heads=12,
        key_size=64,
        value_size=value_size,
        output_shape=output_shape)
Hongkun Yu's avatar
Hongkun Yu committed
41
    # Create a 3-dimensional input (the first dimension is implicit).
Hongkun Yu's avatar
Hongkun Yu committed
42
43
    query = tf.keras.Input(shape=(40, 80))
    value = tf.keras.Input(shape=(20, 80))
44
    output = test_layer(query=query, value=value)
Hongkun Yu's avatar
Hongkun Yu committed
45
    self.assertEqual(output.shape.as_list(), [None] + output_dims)
Hongkun Yu's avatar
Hongkun Yu committed
46
47
48

  def test_non_masked_self_attention(self):
    """Test with one input (self-attenntion) and no mask tensor."""
Hongkun Yu's avatar
Hongkun Yu committed
49
    test_layer = attention.MultiHeadAttention(num_heads=12, key_size=64)
Hongkun Yu's avatar
Hongkun Yu committed
50
    # Create a 3-dimensional input (the first dimension is implicit).
Hongkun Yu's avatar
Hongkun Yu committed
51
    query = tf.keras.Input(shape=(40, 80))
52
    output = test_layer(query, query)
Hongkun Yu's avatar
Hongkun Yu committed
53
    self.assertEqual(output.shape.as_list(), [None, 40, 80])
Hongkun Yu's avatar
Hongkun Yu committed
54

55
56
57
58
59
60
  def test_attention_scores(self):
    """Test attention outputs with coefficients."""
    test_layer = attention.MultiHeadAttention(
        num_heads=12, key_size=64, return_attention_scores=True)
    # Create a 3-dimensional input (the first dimension is implicit).
    query = tf.keras.Input(shape=(40, 80))
61
    output, coef = test_layer(query, query)
62
63
64
65
    self.assertEqual(output.shape.as_list(), [None, 40, 80])
    self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])

  @parameterized.named_parameters(("with_bias", True), ("no_bias", False))
Hongkun Yu's avatar
Hongkun Yu committed
66
  def test_masked_attention(self, use_bias):
Hongkun Yu's avatar
Hongkun Yu committed
67
    """Test with a mask tensor."""
Hongkun Yu's avatar
Hongkun Yu committed
68
69
    test_layer = attention.MultiHeadAttention(
        num_heads=2, key_size=2, use_bias=use_bias)
Hongkun Yu's avatar
Hongkun Yu committed
70
    # Create a 3-dimensional input (the first dimension is implicit).
71
    batch_size = 3
Hongkun Yu's avatar
Hongkun Yu committed
72
73
    query = tf.keras.Input(shape=(4, 8))
    value = tf.keras.Input(shape=(2, 8))
Hongkun Yu's avatar
Hongkun Yu committed
74
    mask_tensor = tf.keras.Input(shape=(4, 2))
75
    output = test_layer(query=query, value=value, attention_mask=mask_tensor)
Hongkun Yu's avatar
Hongkun Yu committed
76
77

    # Create a model containing the test layer.
Hongkun Yu's avatar
Hongkun Yu committed
78
    model = tf.keras.Model([query, value, mask_tensor], output)
Hongkun Yu's avatar
Hongkun Yu committed
79
80

    # Generate data for the input (non-mask) tensors.
81
82
    from_data = 10 * np.random.random_sample((batch_size, 4, 8))
    to_data = 10 * np.random.random_sample((batch_size, 2, 8))
Hongkun Yu's avatar
Hongkun Yu committed
83
84
85

    # Invoke the data with a random set of mask data. This should mask at least
    # one element.
86
    mask_data = np.random.randint(2, size=(batch_size, 4, 2))
Hongkun Yu's avatar
Hongkun Yu committed
87
88
89
    masked_output_data = model.predict([from_data, to_data, mask_data])

    # Invoke the same data, but with a null mask (where no elements are masked).
90
    null_mask_data = np.ones((batch_size, 4, 2))
Hongkun Yu's avatar
Hongkun Yu committed
91
92
93
94
95
96
    unmasked_output_data = model.predict([from_data, to_data, null_mask_data])

    # Because one data is masked and one is not, the outputs should not be the
    # same.
    self.assertNotAllClose(masked_output_data, unmasked_output_data)

Hongkun Yu's avatar
Hongkun Yu committed
97
98
    # Tests the layer with three inputs: Q, K, V.
    key = tf.keras.Input(shape=(2, 8))
99
    output = test_layer(query, value=value, key=key, attention_mask=mask_tensor)
Hongkun Yu's avatar
Hongkun Yu committed
100
101
102
103
104
105
106
107
108
    model = tf.keras.Model([query, value, key, mask_tensor], output)

    masked_output_data = model.predict([from_data, to_data, to_data, mask_data])
    unmasked_output_data = model.predict(
        [from_data, to_data, to_data, null_mask_data])
    # Because one data is masked and one is not, the outputs should not be the
    # same.
    self.assertNotAllClose(masked_output_data, unmasked_output_data)

Hongkun Yu's avatar
Hongkun Yu committed
109
110
111
112
113
114
115
    if use_bias:
      self.assertLen(test_layer._query_dense.trainable_variables, 2)
      self.assertLen(test_layer._output_dense.trainable_variables, 2)
    else:
      self.assertLen(test_layer._query_dense.trainable_variables, 1)
      self.assertLen(test_layer._output_dense.trainable_variables, 1)

Hongkun Yu's avatar
Hongkun Yu committed
116
117
  def test_initializer(self):
    """Test with a specified initializer."""
118
    test_layer = attention.MultiHeadAttention(
Hongkun Yu's avatar
Hongkun Yu committed
119
        num_heads=12,
Hongkun Yu's avatar
Hongkun Yu committed
120
        key_size=64,
Hongkun Yu's avatar
Hongkun Yu committed
121
122
        kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
    # Create a 3-dimensional input (the first dimension is implicit).
Hongkun Yu's avatar
Hongkun Yu committed
123
    query = tf.keras.Input(shape=(40, 80))
124
    output = test_layer(query, query)
Hongkun Yu's avatar
Hongkun Yu committed
125
    self.assertEqual(output.shape.as_list(), [None, 40, 80])
Hongkun Yu's avatar
Hongkun Yu committed
126

127
  @parameterized.named_parameters(
128
129
130
131
132
133
134
      ("4d_inputs_1freebatch_mask2", [3, 4], [3, 2], [4, 2],
       (2,)), ("4d_inputs_1freebatch_mask3", [3, 4], [3, 2], [3, 4, 2], (2,)),
      ("4d_inputs_1freebatch_mask4", [3, 4], [3, 2], [3, 2, 4, 2],
       (2,)), ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
      ("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)),
      ("5D_inputs_2D_attention_fullmask", [5, 3, 4], [5, 3, 2], [5, 3, 4, 3, 2],
       (2, 3)))
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
    """Test with a mask tensor."""
    test_layer = attention.MultiHeadAttention(
        num_heads=2, key_size=2, attention_axes=attention_axes)
    batch_size, hidden_size = 3, 8
    # Generate data for the input (non-mask) tensors.
    query_shape = [batch_size] + q_dims + [hidden_size]
    value_shape = [batch_size] + v_dims + [hidden_size]
    mask_shape = [batch_size] + mask_dims
    query = 10 * np.random.random_sample(query_shape)
    value = 10 * np.random.random_sample(value_shape)

    # Invoke the data with a random set of mask data. This should mask at least
    # one element.
    mask_data = np.random.randint(2, size=mask_shape).astype("bool")
150
    output = test_layer(query=query, value=value, attention_mask=mask_data)
151
152
153

    # Invoke the same data, but with a null mask (where no elements are masked).
    null_mask_data = np.ones(mask_shape)
154
155
    unmasked_output = test_layer(
        query=query, value=value, attention_mask=null_mask_data)
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    # Because one data is masked and one is not, the outputs should not be the
    # same.
    self.assertNotAllClose(output, unmasked_output)


class SubclassAttention(attention.MultiHeadAttention):

  def _build_attention(self, qkv_rank):
    pass

  def _compute_attention(self,
                         query_tensor,
                         key_tensor,
                         value_tensor,
                         attention_mask=None):
    return value_tensor, None


@keras_parameterized.run_all_keras_modes
class AttentionSubclassTest(keras_parameterized.TestCase):

  def test_initializer(self):
    """Test with a specified initializer."""
179
    test_layer = SubclassAttention(num_heads=12, key_size=64)
180
181
    # Create a 3-dimensional input (the first dimension is implicit).
    query = tf.keras.Input(shape=(40, 80))
182
    output = test_layer(query, query)
183
184
    self.assertEqual(output.shape.as_list(), [None, 40, 80])

Hongkun Yu's avatar
Hongkun Yu committed
185

186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
def _create_cache(batch_size, init_decode_length, num_heads, head_size):
  return {
      "key":
          tf.zeros([batch_size, init_decode_length, num_heads, head_size],
                   dtype=tf.float32),
      "value":
          tf.zeros([batch_size, init_decode_length, num_heads, head_size],
                   dtype=tf.float32)
  }


@keras_parameterized.run_all_keras_modes
class CachedAttentionTest(keras_parameterized.TestCase):

  def test_masked_attention(self):
    """Test with a mask tensor."""
    num_heads, head_size = 2, 2
    # Create a 3-dimensional input (the first dimension is implicit).
    from_seq_length = 4
    batch_size = 3
    # GPU/CPU case.
    init_decode_length = 0
    # Directly tests the keras layer.
    cache = _create_cache(batch_size, init_decode_length, num_heads, head_size)
Hongkun Yu's avatar
Hongkun Yu committed
210
    layer = attention.CachedAttention(num_heads=num_heads, key_size=head_size)
211
212
213
214
215
216
217

    # Generate data for the input (non-mask) tensors.
    from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32)
    # Invoke the data with a random set of mask data. This should mask at least
    # one element.
    mask_data = np.random.randint(
        2, size=(batch_size, from_seq_length, from_seq_length))
218
219
    masked_output_data, cache = layer(
        query=from_data, value=from_data, attention_mask=mask_data, cache=cache)
Hongkun Yu's avatar
Hongkun Yu committed
220
    self.assertEqual(masked_output_data.shape, (3, 4, 8))
221
222
223
    self.assertEqual(cache["value"].shape, (3, 4, 2, 2))

    # Tests inputs without cache.
224
225
    masked_output_data, cache = layer(
        query=from_data, value=from_data, attention_mask=mask_data)
Hongkun Yu's avatar
Hongkun Yu committed
226
    self.assertEqual(masked_output_data.shape, (3, 4, 8))
227
228
229
230
231
232
233
234
235
236
237
238
    self.assertIsNone(cache)

  def test_padded_decode(self):
    """Test with a mask tensor."""
    num_heads, head_size = 2, 2
    from_seq_length = 4
    # TPU decoding should pre-allocate the entire sequence.
    batch_size = 3
    init_decode_length = from_seq_length

    # Directly tests the keras layer.
    cache = _create_cache(batch_size, init_decode_length, num_heads, head_size)
Hongkun Yu's avatar
Hongkun Yu committed
239
    layer = attention.CachedAttention(num_heads=num_heads, key_size=head_size)
240
241
242
243
244
245
246

    # Generate data for the input (non-mask) tensors.
    from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32)
    decode_loop_step = 2
    mask_data = np.random.randint(
        2, size=(batch_size, from_seq_length, from_seq_length), dtype=np.int32)
    # Testing the invocation directly as Keras cannot consume inputs correctly.
247
248
249
250
251
252
    masked_output_data, cache = layer(
        query=from_data,
        value=from_data,
        attention_mask=mask_data,
        cache=cache,
        decode_loop_step=decode_loop_step)
Hongkun Yu's avatar
Hongkun Yu committed
253
    self.assertEqual(masked_output_data.shape, (3, 4, 8))
254
255
256
257
    self.assertEqual(cache["value"].shape, (3, 4, 2, 2))


if __name__ == "__main__":
Hongkun Yu's avatar
Hongkun Yu committed
258
  tf.test.main()