attention.py 3.91 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
# Lint as: python3
Hongkun Yu's avatar
Hongkun Yu committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based attention layer."""
17
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
18
import math
Hongkun Yu's avatar
Hongkun Yu committed
19

Hongkun Yu's avatar
Hongkun Yu committed
20
21
import tensorflow as tf

Hongkun Yu's avatar
Hongkun Yu committed
22
EinsumDense = tf.keras.layers.experimental.EinsumDense
23
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
24
25


26
@tf.keras.utils.register_keras_serializable(package="Text")
27
class CachedAttention(tf.keras.layers.MultiHeadAttention):
28
29
  """Attention layer with cache used for auto-agressive decoding.

30
  Arguments are the same as `tf.keras.layers.MultiHeadAttention` layer.
31
32
  """

33
  def _update_cache(self, key, value, cache, decode_loop_step):
34
35
36
37
38
39
    """Updates cache states and gets full-length key/value tensors."""
    # Combines cached keys and values with new keys and values.
    if decode_loop_step is not None:
      # TPU special case.
      key_seq_dim = cache["key"].shape.as_list()[1]
      indices = tf.reshape(
40
          tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),
41
          [1, key_seq_dim, 1, 1])
42
      key = cache["key"] + key * indices
43
44
      value_seq_dim = cache["value"].shape.as_list()[1]
      indices = tf.reshape(
45
          tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),
46
          [1, value_seq_dim, 1, 1])
47
      value = cache["value"] + value * indices
48
    else:
49
50
      key = tf.concat([tf.cast(cache["key"], key.dtype), key], axis=1)
      value = tf.concat([tf.cast(cache["value"], value.dtype), value], axis=1)
51
52

    # Update cache
53
54
    cache["key"] = key
    cache["value"] = value
55

56
    return key, value
57

Hongkun Yu's avatar
Hongkun Yu committed
58
  def call(self,
59
60
61
           query,
           value,
           key=None,
Hongkun Yu's avatar
Hongkun Yu committed
62
63
           attention_mask=None,
           cache=None,
64
65
           decode_loop_step=None,
           return_attention_scores=False):
66
67
68
69
    if not self._built_from_signature:
      self._build_from_signature(query=query, value=value, key=key)
    if key is None:
      key = value
Hongkun Yu's avatar
Hongkun Yu committed
70

71
72
73
74
75
76
    # Scalar dimensions referenced here:
    #   B = batch size (number of sequences)
    #   F = `from_tensor` sequence length
    #   T = `to_tensor` sequence length
    #   N = `num_attention_heads`
    #   H = `size_per_head`
77
78
    # `query` = [B, F, N ,H]
    query = self._query_dense(query)
79

80
81
    # `key` = [B, T, N, H]
    key = self._key_dense(key)
82

83
84
    # `value` = [B, T, N, H]
    value = self._value_dense(value)
85
86

    if cache:
87
      key, value = self._update_cache(key, value, cache, decode_loop_step)
88

89
    query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
xinliupitt's avatar
xinliupitt committed
90

91
92
    # Take the dot product between "query" and "key" to get the raw
    # attention scores.
93
    attention_scores = tf.einsum(self._dot_product_equation, key, query)
94
95

    # Normalize the attention scores to probabilities.
96
    # `attention_scores` = [B, N, F, T]
97
    attention_scores = self._masked_softmax(attention_scores, attention_mask)
98
99
100

    # This is actually dropping out entire tokens to attend to, which might
    # seem a bit unusual, but is taken from the original Transformer paper.
101
    attention_scores = self._dropout_layer(attention_scores)
102
    # `context_layer` = [B, F, N, H]
103
    attention_output = tf.einsum(self._combine_equation, attention_scores,
104
                                 value)
Hongkun Yu's avatar
Hongkun Yu committed
105
    attention_output = self._output_dense(attention_output)
106
    if return_attention_scores:
107
      return attention_output, attention_scores, cache
Hongkun Yu's avatar
Hongkun Yu committed
108
    return attention_output, cache