transformer.py 20.4 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer block layer."""
16
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
17
18
19
20
21
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function

Chen Chen's avatar
Chen Chen committed
22
import gin
Hongkun Yu's avatar
Hongkun Yu committed
23
24
25
import tensorflow as tf

from official.nlp.modeling.layers import attention
26
from official.nlp.modeling.layers import multi_channel_attention
27
from official.nlp.modeling.layers.util import tf_function_if_eager
Hongkun Yu's avatar
Hongkun Yu committed
28
29
30
31
32
33
34
35
36


@tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(tf.keras.layers.Layer):
  """Transformer layer.

  This layer implements the Transformer from "Attention Is All You Need".
  (https://arxiv.org/abs/1706.03762).

37
  Arguments:
Hongkun Yu's avatar
Hongkun Yu committed
38
39
40
41
42
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
43
44
    output_range: the sequence output range, [0, output_range) by slicing the
      target sequence. `None` means the target sequence is not sliced.
Hongkun Yu's avatar
Hongkun Yu committed
45
46
47
48
49
50
51
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
52
53
54
55
    use_bias: Whether to enable use_bias in attention layer.
    norm_first: Whether to normalize inputs to attention and intermediate dense
      layers.
    norm_epsilon: Epsilon value to initialize normalization layers.
Hongkun Yu's avatar
Hongkun Yu committed
56
57
58
59
60
61
62
63
  """

  def __init__(self,
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
64
               output_range=None,
Hongkun Yu's avatar
Hongkun Yu committed
65
66
67
68
69
70
71
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
72
73
74
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
Hongkun Yu's avatar
Hongkun Yu committed
75
76
77
78
79
80
81
82
               **kwargs):
    super(Transformer, self).__init__(**kwargs)

    self._num_heads = num_attention_heads
    self._intermediate_size = intermediate_size
    self._intermediate_activation = intermediate_activation
    self._attention_dropout_rate = attention_dropout_rate
    self._dropout_rate = dropout_rate
83
    self._output_range = output_range
Hongkun Yu's avatar
Hongkun Yu committed
84
85
86
87
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
88
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
89
90
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
91
92
93
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
Hongkun Yu's avatar
Hongkun Yu committed
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

  def build(self, input_shape):
    input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
    input_tensor_shape = tf.TensorShape(input_tensor)
    if len(input_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    batch_size, sequence_length, hidden_size = input_tensor_shape

    if len(input_shape) == 2:
      mask_tensor_shape = tf.TensorShape(input_shape[1])
      expected_mask_tensor_shape = tf.TensorShape(
          [batch_size, sequence_length, sequence_length])
      if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
        raise ValueError("When passing a mask tensor to TransformerLayer, the "
                         "mask tensor must be of shape [batch, "
                         "sequence_length, sequence_length] (here %s). Got a "
                         "mask tensor of shape %s." %
                         (expected_mask_tensor_shape, mask_tensor_shape))
    if hidden_size % self._num_heads != 0:
      raise ValueError(
          "The input size (%d) is not a multiple of the number of attention "
          "heads (%d)" % (hidden_size, self._num_heads))
    self._attention_head_size = int(hidden_size // self._num_heads)
118
    common_kwargs = dict(
Hongkun Yu's avatar
Hongkun Yu committed
119
120
121
122
123
124
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
125
126
127
128
129
        bias_constraint=self._bias_constraint)
    self._attention_layer = attention.MultiHeadAttention(
        num_heads=self._num_heads,
        key_size=self._attention_head_size,
        dropout=self._attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
130
        use_bias=self._use_bias,
131
132
        name="self_attention",
        **common_kwargs)
133
    # pylint: disable=protected-access
134
135
136
    # Temporarily handling for checkpoint compatible changes.
    self._attention_layer._build_from_signature(
        query=input_tensor_shape, value=input_tensor_shape)
137
138
    self._attention_output_dense = self._attention_layer._output_dense
    # pylint: enable=protected-access
Hongkun Yu's avatar
Hongkun Yu committed
139
    self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
140
141
    # Use float32 in layernorm for numeric stability.
    # It is probably safe in mixed_float16, but we haven't validated this yet.
Hongkun Yu's avatar
Hongkun Yu committed
142
143
    self._attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
Chen Chen's avatar
Chen Chen committed
144
145
            name="self_attention_layer_norm",
            axis=-1,
xinliupitt's avatar
xinliupitt committed
146
            epsilon=self._norm_epsilon,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
147
            dtype=tf.float32))
148
149
150
151
152
153
    self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self._intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
154
155
156
157
158
159
    policy = tf.keras.mixed_precision.experimental.global_policy()
    if policy.name == "mixed_bfloat16":
      # bfloat16 causes BERT with the LAMB optimizer to not converge
      # as well, so we use float32.
      # TODO(b/154538392): Investigate this.
      policy = tf.float32
Chen Chen's avatar
Chen Chen committed
160
    self._intermediate_activation_layer = tf.keras.layers.Activation(
161
        self._intermediate_activation, dtype=policy)
162
163
164
165
166
167
    self._output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
168
    self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
169
    # Use float32 in layernorm for numeric stability.
Hongkun Yu's avatar
Hongkun Yu committed
170
    self._output_layer_norm = tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
171
172
        name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon,
        dtype=tf.float32)
Hongkun Yu's avatar
Hongkun Yu committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187

    super(Transformer, self).build(input_shape)

  def get_config(self):
    config = {
        "num_attention_heads":
            self._num_heads,
        "intermediate_size":
            self._intermediate_size,
        "intermediate_activation":
            self._intermediate_activation,
        "dropout_rate":
            self._dropout_rate,
        "attention_dropout_rate":
            self._attention_dropout_rate,
188
189
        "output_range":
            self._output_range,
Hongkun Yu's avatar
Hongkun Yu committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
            tf.keras.constraints.serialize(self._bias_constraint)
    }
    base_config = super(Transformer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))

  def call(self, inputs):
    if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
      input_tensor, attention_mask = inputs
    else:
      input_tensor, attention_mask = (inputs, None)

214
215
216
217
    if self._output_range:
      target_tensor = input_tensor[:, 0:self._output_range, :]
      attention_mask = attention_mask[:, 0:self._output_range, :]
    else:
xinliupitt's avatar
xinliupitt committed
218
219
220
      if self._norm_first:
        source_tensor = input_tensor
        input_tensor = self._attention_layer_norm(input_tensor)
221
      target_tensor = input_tensor
Hongkun Yu's avatar
Hongkun Yu committed
222

223
224
    attention_output = self._attention_layer(
        query=target_tensor, value=input_tensor, attention_mask=attention_mask)
225
    attention_output = self._attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
226
227
228
229
230
231
232
233
    if self._norm_first:
      attention_output = source_tensor + attention_output
    else:
      attention_output = self._attention_layer_norm(target_tensor +
                                                    attention_output)
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self._output_layer_norm(attention_output)
234
235
236
237
238
239
240
241
242
    intermediate_output = self._intermediate_dense(attention_output)
    intermediate_output = self._intermediate_activation_layer(
        intermediate_output)
    layer_output = self._output_dense(intermediate_output)
    layer_output = self._output_dropout(layer_output)
    # During mixed precision training, attention_output is from layer norm and
    # is always fp32 for now. Cast layer_output to fp32 for the subsequent
    # add.
    layer_output = tf.cast(layer_output, tf.float32)
xinliupitt's avatar
xinliupitt committed
243
244
245
246
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self._output_layer_norm(layer_output + attention_output)
247
248

    return layer_output
249
250


Chen Chen's avatar
Chen Chen committed
251
252
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
253
254
255
256
257
class CompiledTransformer(Transformer):

  @tf_function_if_eager(experimental_compile=True)
  def call(self, inputs):
    return super(CompiledTransformer, self).call(inputs)
258
259
260
261
262
263
264
265
266
267


@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerDecoderLayer(tf.keras.layers.Layer):
  """Single transformer layer for decoder.

  It has three sub-layers:
  (1) a multi-head self-attention mechanism.
  (2) a encoder-decoder attention.
  (3) a positionwise fully connected feed-forward network.
Hongkun Yu's avatar
Hongkun Yu committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283

  Arguments:
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
    multi_channel_cross_attention: Whether to use `MultiChannelAttention` for
      cross-attention between target sequences and source sequences.
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
284
285
286
287
    use_bias: Whether to enable use_bias in attention layer.
    norm_first: Whether to normalize inputs to attention and intermediate dense
      layers.
    norm_epsilon: Epsilon value to initialize normalization layers.
288
289
290
  """

  def __init__(self,
Hongkun Yu's avatar
Hongkun Yu committed
291
292
293
294
295
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
296
               multi_channel_cross_attention=False,
Hongkun Yu's avatar
Hongkun Yu committed
297
298
299
300
301
302
303
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
304
305
306
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
307
308
309
310
311
312
               **kwargs):
    super(TransformerDecoderLayer, self).__init__(**kwargs)
    self.num_attention_heads = num_attention_heads
    self.intermediate_size = intermediate_size
    self.intermediate_activation = tf.keras.activations.get(
        intermediate_activation)
Hongkun Yu's avatar
Hongkun Yu committed
313
314
    self.dropout_rate = dropout_rate
    self.attention_dropout_rate = attention_dropout_rate
315
    self.multi_channel_cross_attention = multi_channel_cross_attention
Hongkun Yu's avatar
Hongkun Yu committed
316
317
318
319
320
321
322
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
323
324
325
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
326
327
328
329
330
    if self.multi_channel_cross_attention:
      self._cross_attention_cls = multi_channel_attention.MultiChannelAttention
    else:
      self._cross_attention_cls = attention.MultiHeadAttention

Hongkun Yu's avatar
Hongkun Yu committed
331
332
333
334
335
336
337
  def build(self, input_shape):
    target_tensor_shape = tf.TensorShape(input_shape[0])
    if len(target_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    hidden_size = target_tensor_shape[2]
    if hidden_size % self.num_attention_heads != 0:
338
339
      raise ValueError(
          "The hidden size (%d) is not a multiple of the number of attention "
Hongkun Yu's avatar
Hongkun Yu committed
340
341
          "heads (%d)" % (hidden_size, self.num_attention_heads))
    self.attention_head_size = int(hidden_size / self.num_attention_heads)
342
    common_kwargs = dict(
343
344
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
Hongkun Yu's avatar
Hongkun Yu committed
345
346
347
348
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
349
350
351
352
353
354
        bias_constraint=self._bias_constraint)
    # Self attention.
    self.self_attention = attention.CachedAttention(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
        dropout=self.attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
355
        use_bias=self._use_bias,
356
357
358
359
360
361
362
363
        name="self_attention",
        **common_kwargs)
    self.self_attention_output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
364
    self.self_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
365
        rate=self.dropout_rate)
366
367
    self.self_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
368
369
            name="self_attention_layer_norm",
            axis=-1, epsilon=self._norm_epsilon))
370
371
372
373
    # Encoder-decoder attention.
    self.encdec_attention = self._cross_attention_cls(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
Hongkun Yu's avatar
Hongkun Yu committed
374
375
        dropout=self.attention_dropout_rate,
        output_shape=hidden_size,
xinliupitt's avatar
xinliupitt committed
376
        use_bias=self._use_bias,
377
378
        name="attention/encdec",
        **common_kwargs)
379
380

    self.encdec_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
381
        rate=self.dropout_rate)
382
383
    self.encdec_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
384
385
            name="attention/encdec_output_layer_norm",
            axis=-1, epsilon=self._norm_epsilon))
386
387

    # Feed-forward projection.
388
389
390
391
392
393
    self.intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self.intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
394
395
    self.intermediate_activation_layer = tf.keras.layers.Activation(
        self.intermediate_activation)
396
397
398
399
400
401
    self.output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
402
    self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
403
    self.output_layer_norm = tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
404
        name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon)
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
    super(TransformerDecoderLayer, self).build(input_shape)

  def common_layers_with_encoder(self):
    """Gets layer objects that can make a Transformer encoder block."""
    return [
        self.self_attention, self.self_attention_layer_norm,
        self.intermediate_dense, self.output_dense, self.output_layer_norm
    ]

  def call(self, inputs, cache=None, decode_loop_step=None):
    if self.multi_channel_cross_attention:
      if len(inputs) != 5:
        raise ValueError(
            "TransformerDecoderLayer must have 5 inputs, when it uses "
            "multi_channel_cross_attention. But it got: %d" % len(inputs))
    elif len(inputs) != 4:
      raise ValueError(
          "TransformerDecoderLayer must have 4 inputs, but it got: %d" %
          len(inputs))
    input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]
xinliupitt's avatar
xinliupitt committed
425
426
427
    source_tensor = input_tensor
    if self._norm_first:
      input_tensor = self.self_attention_layer_norm(input_tensor)
428
    self_attention_output, cache = self.self_attention(
429
430
        query=input_tensor,
        value=input_tensor,
431
432
433
434
        attention_mask=self_attention_mask,
        cache=cache,
        decode_loop_step=decode_loop_step)
    self_attention_output = self.self_attention_dropout(self_attention_output)
xinliupitt's avatar
xinliupitt committed
435
436
437
438
439
440
441
442
443
    if self._norm_first:
      self_attention_output = source_tensor + self_attention_output
    else:
      self_attention_output = self.self_attention_layer_norm(
          input_tensor + self_attention_output)
    if self._norm_first:
      source_self_attention_output = self_attention_output
      self_attention_output = self.encdec_attention_layer_norm(
          self_attention_output)
444
445
446
447
    cross_attn_inputs = dict(
        query=self_attention_output,
        value=memory,
        attention_mask=attention_mask)
448
449
    if self.multi_channel_cross_attention:
      # Accesses the 5-th input tensor for the doc-attention probabilities.
450
451
      cross_attn_inputs["context_attention_weights"] = inputs[-1]
    attention_output = self.encdec_attention(**cross_attn_inputs)
452
    attention_output = self.encdec_attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
453
454
455
456
457
458
459
460
461
    if self._norm_first:
      attention_output = source_self_attention_output + attention_output
    else:
      attention_output = self.encdec_attention_layer_norm(
          self_attention_output +
          attention_output)
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self.output_layer_norm(attention_output)
462
463
464
465
466
467

    intermediate_output = self.intermediate_dense(attention_output)
    intermediate_output = self.intermediate_activation_layer(
        intermediate_output)
    layer_output = self.output_dense(intermediate_output)
    layer_output = self.output_dropout(layer_output)
xinliupitt's avatar
xinliupitt committed
468
469
470
471
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self.output_layer_norm(layer_output + attention_output)
472
    return layer_output, cache