attention.py 3.9 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
# Lint as: python3
Hongkun Yu's avatar
Hongkun Yu committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based attention layer."""
17
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
18
import math
Hongkun Yu's avatar
Hongkun Yu committed
19

Hongkun Yu's avatar
Hongkun Yu committed
20
21
22
import tensorflow as tf


Hongkun Yu's avatar
Hongkun Yu committed
23
EinsumDense = tf.keras.layers.experimental.EinsumDense
24
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
25
26
27


@tf.keras.utils.register_keras_serializable(package="Text")
28
class CachedAttention(tf.keras.layers.MultiHeadAttention):
29
30
  """Attention layer with cache used for auto-agressive decoding.

31
  Arguments are the same as `MultiHeadAttention` layer.
32
33
  """

34
  def _update_cache(self, key, value, cache, decode_loop_step):
35
36
37
38
39
40
    """Updates cache states and gets full-length key/value tensors."""
    # Combines cached keys and values with new keys and values.
    if decode_loop_step is not None:
      # TPU special case.
      key_seq_dim = cache["key"].shape.as_list()[1]
      indices = tf.reshape(
41
          tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),
42
          [1, key_seq_dim, 1, 1])
43
      key = cache["key"] + key * indices
44
45
      value_seq_dim = cache["value"].shape.as_list()[1]
      indices = tf.reshape(
46
          tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),
47
          [1, value_seq_dim, 1, 1])
48
      value = cache["value"] + value * indices
49
    else:
50
51
      key = tf.concat([tf.cast(cache["key"], key.dtype), key], axis=1)
      value = tf.concat([tf.cast(cache["value"], value.dtype), value], axis=1)
52
53

    # Update cache
54
55
    cache["key"] = key
    cache["value"] = value
56

57
    return key, value
58

Hongkun Yu's avatar
Hongkun Yu committed
59
  def call(self,
60
61
62
           query,
           value,
           key=None,
Hongkun Yu's avatar
Hongkun Yu committed
63
64
           attention_mask=None,
           cache=None,
65
66
           decode_loop_step=None,
           return_attention_scores=False):
67
68
69
70
    if not self._built_from_signature:
      self._build_from_signature(query=query, value=value, key=key)
    if key is None:
      key = value
Hongkun Yu's avatar
Hongkun Yu committed
71

72
73
74
75
76
77
    # Scalar dimensions referenced here:
    #   B = batch size (number of sequences)
    #   F = `from_tensor` sequence length
    #   T = `to_tensor` sequence length
    #   N = `num_attention_heads`
    #   H = `size_per_head`
78
79
    # `query` = [B, F, N ,H]
    query = self._query_dense(query)
80

81
82
    # `key` = [B, T, N, H]
    key = self._key_dense(key)
83

84
85
    # `value` = [B, T, N, H]
    value = self._value_dense(value)
86
87

    if cache:
88
      key, value = self._update_cache(key, value, cache, decode_loop_step)
89

90
    query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
xinliupitt's avatar
xinliupitt committed
91

92
93
    # Take the dot product between "query" and "key" to get the raw
    # attention scores.
94
    attention_scores = tf.einsum(self._dot_product_equation, key, query)
95
96

    # Normalize the attention scores to probabilities.
97
    # `attention_scores` = [B, N, F, T]
98
    attention_scores = self._masked_softmax(attention_scores, attention_mask)
99
100
101

    # This is actually dropping out entire tokens to attend to, which might
    # seem a bit unusual, but is taken from the original Transformer paper.
102
    attention_scores = self._dropout_layer(attention_scores)
103
    # `context_layer` = [B, F, N, H]
104
    attention_output = tf.einsum(self._combine_equation, attention_scores,
105
                                 value)
Hongkun Yu's avatar
Hongkun Yu committed
106
    attention_output = self._output_dense(attention_output)
107
    if return_attention_scores:
108
      return attention_output, attention_scores, cache
Hongkun Yu's avatar
Hongkun Yu committed
109
    return attention_output, cache