attention.py 4.1 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
# Lint as: python3
Hongkun Yu's avatar
Hongkun Yu committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based attention layer."""
17
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
18
19
20
21
22
23
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function

import math
Hongkun Yu's avatar
Hongkun Yu committed
24
25
import string

Hongkun Yu's avatar
Hongkun Yu committed
26
27
28
import tensorflow as tf


Hongkun Yu's avatar
Hongkun Yu committed
29
30
31
32
EinsumDense = tf.keras.layers.experimental.EinsumDense
_CHR_IDX = string.ascii_lowercase


33
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
34
35
36


@tf.keras.utils.register_keras_serializable(package="Text")
37
class CachedAttention(tf.keras.layers.MultiHeadAttention):
38
39
  """Attention layer with cache used for auto-agressive decoding.

40
  Arguments are the same as `MultiHeadAttention` layer.
41
42
  """

43
  def _update_cache(self, key, value, cache, decode_loop_step):
44
45
46
47
48
49
    """Updates cache states and gets full-length key/value tensors."""
    # Combines cached keys and values with new keys and values.
    if decode_loop_step is not None:
      # TPU special case.
      key_seq_dim = cache["key"].shape.as_list()[1]
      indices = tf.reshape(
50
          tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),
51
          [1, key_seq_dim, 1, 1])
52
      key = cache["key"] + key * indices
53
54
      value_seq_dim = cache["value"].shape.as_list()[1]
      indices = tf.reshape(
55
          tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),
56
          [1, value_seq_dim, 1, 1])
57
      value = cache["value"] + value * indices
58
    else:
59
60
      key = tf.concat([tf.cast(cache["key"], key.dtype), key], axis=1)
      value = tf.concat([tf.cast(cache["value"], value.dtype), value], axis=1)
61
62

    # Update cache
63
64
    cache["key"] = key
    cache["value"] = value
65

66
    return key, value
67

Hongkun Yu's avatar
Hongkun Yu committed
68
  def call(self,
69
70
71
           query,
           value,
           key=None,
Hongkun Yu's avatar
Hongkun Yu committed
72
73
           attention_mask=None,
           cache=None,
74
75
           decode_loop_step=None,
           return_attention_scores=False):
76
77
78
79
    if not self._built_from_signature:
      self._build_from_signature(query=query, value=value, key=key)
    if key is None:
      key = value
Hongkun Yu's avatar
Hongkun Yu committed
80

81
82
83
84
85
86
    # Scalar dimensions referenced here:
    #   B = batch size (number of sequences)
    #   F = `from_tensor` sequence length
    #   T = `to_tensor` sequence length
    #   N = `num_attention_heads`
    #   H = `size_per_head`
87
88
    # `query` = [B, F, N ,H]
    query = self._query_dense(query)
89

90
91
    # `key` = [B, T, N, H]
    key = self._key_dense(key)
92

93
94
    # `value` = [B, T, N, H]
    value = self._value_dense(value)
95
96

    if cache:
97
      key, value = self._update_cache(key, value, cache, decode_loop_step)
98

99
    query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
xinliupitt's avatar
xinliupitt committed
100

101
102
    # Take the dot product between "query" and "key" to get the raw
    # attention scores.
103
    attention_scores = tf.einsum(self._dot_product_equation, key, query)
104
105

    # Normalize the attention scores to probabilities.
106
    # `attention_scores` = [B, N, F, T]
107
    attention_scores = self._masked_softmax(attention_scores, attention_mask)
108
109
110

    # This is actually dropping out entire tokens to attend to, which might
    # seem a bit unusual, but is taken from the original Transformer paper.
111
    attention_scores = self._dropout_layer(attention_scores)
112
    # `context_layer` = [B, F, N, H]
113
    attention_output = tf.einsum(self._combine_equation, attention_scores,
114
                                 value)
Hongkun Yu's avatar
Hongkun Yu committed
115
    attention_output = self._output_dense(attention_output)
116
    if return_attention_scores:
117
      return attention_output, attention_scores, cache
Hongkun Yu's avatar
Hongkun Yu committed
118
    return attention_output, cache