attention_test.py 10.6 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the attention layer."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

Hongkun Yu's avatar
Hongkun Yu committed
21
from absl.testing import parameterized
Hongkun Yu's avatar
Hongkun Yu committed
22
23
24
25
26
27
28
29
30
31
import numpy as np
import tensorflow as tf

from tensorflow.python.keras import keras_parameterized  # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import attention


# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
32
class MultiHeadAttentionTest(keras_parameterized.TestCase):
Hongkun Yu's avatar
Hongkun Yu committed
33

Hongkun Yu's avatar
Hongkun Yu committed
34
35
36
37
38
  @parameterized.named_parameters(
      ("key_value_same_proj", None, None, [40, 80]),
      ("key_value_different_proj", 32, 60, [40, 60]),
  )
  def test_non_masked_attention(self, value_size, output_shape, output_dims):
Hongkun Yu's avatar
Hongkun Yu committed
39
    """Test that the attention layer can be created without a mask tensor."""
Hongkun Yu's avatar
Hongkun Yu committed
40
41
42
43
44
    test_layer = attention.MultiHeadAttention(
        num_heads=12,
        key_size=64,
        value_size=value_size,
        output_shape=output_shape)
Hongkun Yu's avatar
Hongkun Yu committed
45
    # Create a 3-dimensional input (the first dimension is implicit).
Hongkun Yu's avatar
Hongkun Yu committed
46
47
    query = tf.keras.Input(shape=(40, 80))
    value = tf.keras.Input(shape=(20, 80))
48
    output = test_layer(query=query, value=value)
Hongkun Yu's avatar
Hongkun Yu committed
49
    self.assertEqual(output.shape.as_list(), [None] + output_dims)
Hongkun Yu's avatar
Hongkun Yu committed
50
51
52

  def test_non_masked_self_attention(self):
    """Test with one input (self-attenntion) and no mask tensor."""
Hongkun Yu's avatar
Hongkun Yu committed
53
    test_layer = attention.MultiHeadAttention(num_heads=12, key_size=64)
Hongkun Yu's avatar
Hongkun Yu committed
54
    # Create a 3-dimensional input (the first dimension is implicit).
Hongkun Yu's avatar
Hongkun Yu committed
55
    query = tf.keras.Input(shape=(40, 80))
56
    output = test_layer(query, query)
Hongkun Yu's avatar
Hongkun Yu committed
57
    self.assertEqual(output.shape.as_list(), [None, 40, 80])
Hongkun Yu's avatar
Hongkun Yu committed
58

59
60
61
62
63
64
  def test_attention_scores(self):
    """Test attention outputs with coefficients."""
    test_layer = attention.MultiHeadAttention(
        num_heads=12, key_size=64, return_attention_scores=True)
    # Create a 3-dimensional input (the first dimension is implicit).
    query = tf.keras.Input(shape=(40, 80))
65
    output, coef = test_layer(query, query)
66
67
68
69
    self.assertEqual(output.shape.as_list(), [None, 40, 80])
    self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])

  @parameterized.named_parameters(("with_bias", True), ("no_bias", False))
Hongkun Yu's avatar
Hongkun Yu committed
70
  def test_masked_attention(self, use_bias):
Hongkun Yu's avatar
Hongkun Yu committed
71
    """Test with a mask tensor."""
Hongkun Yu's avatar
Hongkun Yu committed
72
73
    test_layer = attention.MultiHeadAttention(
        num_heads=2, key_size=2, use_bias=use_bias)
Hongkun Yu's avatar
Hongkun Yu committed
74
    # Create a 3-dimensional input (the first dimension is implicit).
75
    batch_size = 3
Hongkun Yu's avatar
Hongkun Yu committed
76
77
    query = tf.keras.Input(shape=(4, 8))
    value = tf.keras.Input(shape=(2, 8))
Hongkun Yu's avatar
Hongkun Yu committed
78
    mask_tensor = tf.keras.Input(shape=(4, 2))
79
    output = test_layer(query=query, value=value, attention_mask=mask_tensor)
Hongkun Yu's avatar
Hongkun Yu committed
80
81

    # Create a model containing the test layer.
Hongkun Yu's avatar
Hongkun Yu committed
82
    model = tf.keras.Model([query, value, mask_tensor], output)
Hongkun Yu's avatar
Hongkun Yu committed
83
84

    # Generate data for the input (non-mask) tensors.
85
86
    from_data = 10 * np.random.random_sample((batch_size, 4, 8))
    to_data = 10 * np.random.random_sample((batch_size, 2, 8))
Hongkun Yu's avatar
Hongkun Yu committed
87
88
89

    # Invoke the data with a random set of mask data. This should mask at least
    # one element.
90
    mask_data = np.random.randint(2, size=(batch_size, 4, 2))
Hongkun Yu's avatar
Hongkun Yu committed
91
92
93
    masked_output_data = model.predict([from_data, to_data, mask_data])

    # Invoke the same data, but with a null mask (where no elements are masked).
94
    null_mask_data = np.ones((batch_size, 4, 2))
Hongkun Yu's avatar
Hongkun Yu committed
95
96
97
98
99
100
    unmasked_output_data = model.predict([from_data, to_data, null_mask_data])

    # Because one data is masked and one is not, the outputs should not be the
    # same.
    self.assertNotAllClose(masked_output_data, unmasked_output_data)

Hongkun Yu's avatar
Hongkun Yu committed
101
102
    # Tests the layer with three inputs: Q, K, V.
    key = tf.keras.Input(shape=(2, 8))
103
    output = test_layer(query, value=value, key=key, attention_mask=mask_tensor)
Hongkun Yu's avatar
Hongkun Yu committed
104
105
106
107
108
109
110
111
112
    model = tf.keras.Model([query, value, key, mask_tensor], output)

    masked_output_data = model.predict([from_data, to_data, to_data, mask_data])
    unmasked_output_data = model.predict(
        [from_data, to_data, to_data, null_mask_data])
    # Because one data is masked and one is not, the outputs should not be the
    # same.
    self.assertNotAllClose(masked_output_data, unmasked_output_data)

Hongkun Yu's avatar
Hongkun Yu committed
113
114
115
116
117
118
119
    if use_bias:
      self.assertLen(test_layer._query_dense.trainable_variables, 2)
      self.assertLen(test_layer._output_dense.trainable_variables, 2)
    else:
      self.assertLen(test_layer._query_dense.trainable_variables, 1)
      self.assertLen(test_layer._output_dense.trainable_variables, 1)

Hongkun Yu's avatar
Hongkun Yu committed
120
121
  def test_initializer(self):
    """Test with a specified initializer."""
122
    test_layer = attention.MultiHeadAttention(
Hongkun Yu's avatar
Hongkun Yu committed
123
        num_heads=12,
Hongkun Yu's avatar
Hongkun Yu committed
124
        key_size=64,
Hongkun Yu's avatar
Hongkun Yu committed
125
126
        kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
    # Create a 3-dimensional input (the first dimension is implicit).
Hongkun Yu's avatar
Hongkun Yu committed
127
    query = tf.keras.Input(shape=(40, 80))
128
    output = test_layer(query, query)
Hongkun Yu's avatar
Hongkun Yu committed
129
    self.assertEqual(output.shape.as_list(), [None, 40, 80])
Hongkun Yu's avatar
Hongkun Yu committed
130

131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  @parameterized.named_parameters(
      ("4d_inputs_one_free_batch", [3, 4], [3, 2], [4, 2], (2,)),
      ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
      ("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)))
  def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
    """Test with a mask tensor."""
    test_layer = attention.MultiHeadAttention(
        num_heads=2, key_size=2, attention_axes=attention_axes)
    batch_size, hidden_size = 3, 8
    # Generate data for the input (non-mask) tensors.
    query_shape = [batch_size] + q_dims + [hidden_size]
    value_shape = [batch_size] + v_dims + [hidden_size]
    mask_shape = [batch_size] + mask_dims
    query = 10 * np.random.random_sample(query_shape)
    value = 10 * np.random.random_sample(value_shape)

    # Invoke the data with a random set of mask data. This should mask at least
    # one element.
    mask_data = np.random.randint(2, size=mask_shape).astype("bool")
150
    output = test_layer(query=query, value=value, attention_mask=mask_data)
151
152
153

    # Invoke the same data, but with a null mask (where no elements are masked).
    null_mask_data = np.ones(mask_shape)
154
155
    unmasked_output = test_layer(
        query=query, value=value, attention_mask=null_mask_data)
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    # Because one data is masked and one is not, the outputs should not be the
    # same.
    self.assertNotAllClose(output, unmasked_output)


class SubclassAttention(attention.MultiHeadAttention):

  def _build_attention(self, qkv_rank):
    pass

  def _compute_attention(self,
                         query_tensor,
                         key_tensor,
                         value_tensor,
                         attention_mask=None):
    return value_tensor, None


@keras_parameterized.run_all_keras_modes
class AttentionSubclassTest(keras_parameterized.TestCase):

  def test_initializer(self):
    """Test with a specified initializer."""
    test_layer = SubclassAttention(
        num_heads=12,
        key_size=64)
    # Create a 3-dimensional input (the first dimension is implicit).
    query = tf.keras.Input(shape=(40, 80))
184
    output = test_layer(query, query)
185
186
    self.assertEqual(output.shape.as_list(), [None, 40, 80])

Hongkun Yu's avatar
Hongkun Yu committed
187

188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
def _create_cache(batch_size, init_decode_length, num_heads, head_size):
  return {
      "key":
          tf.zeros([batch_size, init_decode_length, num_heads, head_size],
                   dtype=tf.float32),
      "value":
          tf.zeros([batch_size, init_decode_length, num_heads, head_size],
                   dtype=tf.float32)
  }


@keras_parameterized.run_all_keras_modes
class CachedAttentionTest(keras_parameterized.TestCase):

  def test_masked_attention(self):
    """Test with a mask tensor."""
    num_heads, head_size = 2, 2
    # Create a 3-dimensional input (the first dimension is implicit).
    from_seq_length = 4
    batch_size = 3
    # GPU/CPU case.
    init_decode_length = 0
    # Directly tests the keras layer.
    cache = _create_cache(batch_size, init_decode_length, num_heads, head_size)
Hongkun Yu's avatar
Hongkun Yu committed
212
    layer = attention.CachedAttention(num_heads=num_heads, key_size=head_size)
213
214
215
216
217
218
219

    # Generate data for the input (non-mask) tensors.
    from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32)
    # Invoke the data with a random set of mask data. This should mask at least
    # one element.
    mask_data = np.random.randint(
        2, size=(batch_size, from_seq_length, from_seq_length))
220
221
    masked_output_data, cache = layer(
        query=from_data, value=from_data, attention_mask=mask_data, cache=cache)
Hongkun Yu's avatar
Hongkun Yu committed
222
    self.assertEqual(masked_output_data.shape, (3, 4, 8))
223
224
225
    self.assertEqual(cache["value"].shape, (3, 4, 2, 2))

    # Tests inputs without cache.
226
227
    masked_output_data, cache = layer(
        query=from_data, value=from_data, attention_mask=mask_data)
Hongkun Yu's avatar
Hongkun Yu committed
228
    self.assertEqual(masked_output_data.shape, (3, 4, 8))
229
230
231
232
233
234
235
236
237
238
239
240
    self.assertIsNone(cache)

  def test_padded_decode(self):
    """Test with a mask tensor."""
    num_heads, head_size = 2, 2
    from_seq_length = 4
    # TPU decoding should pre-allocate the entire sequence.
    batch_size = 3
    init_decode_length = from_seq_length

    # Directly tests the keras layer.
    cache = _create_cache(batch_size, init_decode_length, num_heads, head_size)
Hongkun Yu's avatar
Hongkun Yu committed
241
    layer = attention.CachedAttention(num_heads=num_heads, key_size=head_size)
242
243
244
245
246
247
248

    # Generate data for the input (non-mask) tensors.
    from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32)
    decode_loop_step = 2
    mask_data = np.random.randint(
        2, size=(batch_size, from_seq_length, from_seq_length), dtype=np.int32)
    # Testing the invocation directly as Keras cannot consume inputs correctly.
249
250
251
252
253
254
    masked_output_data, cache = layer(
        query=from_data,
        value=from_data,
        attention_mask=mask_data,
        cache=cache,
        decode_loop_step=decode_loop_step)
Hongkun Yu's avatar
Hongkun Yu committed
255
    self.assertEqual(masked_output_data.shape, (3, 4, 8))
256
257
258
259
    self.assertEqual(cache["value"].shape, (3, 4, 2, 2))


if __name__ == "__main__":
Hongkun Yu's avatar
Hongkun Yu committed
260
  tf.test.main()