transformer.py 22.8 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer block layer."""
16
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
17
18
19
20
21
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function

Chen Chen's avatar
Chen Chen committed
22
import gin
Hongkun Yu's avatar
Hongkun Yu committed
23
24
25
import tensorflow as tf

from official.nlp.modeling.layers import attention
26
from official.nlp.modeling.layers import multi_channel_attention
27
from official.nlp.modeling.layers.util import tf_function_if_eager
Hongkun Yu's avatar
Hongkun Yu committed
28
29
30
31
32
33
34
35
36


@tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(tf.keras.layers.Layer):
  """Transformer layer.

  This layer implements the Transformer from "Attention Is All You Need".
  (https://arxiv.org/abs/1706.03762).

37
  Arguments:
Hongkun Yu's avatar
Hongkun Yu committed
38
39
40
41
42
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
43
44
    output_range: the sequence output range, [0, output_range) by slicing the
      target sequence. `None` means the target sequence is not sliced.
Hongkun Yu's avatar
Hongkun Yu committed
45
46
47
48
49
50
51
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
52
53
    use_bias: Whether to enable use_bias in attention layer. If set False,
      use_bias in attention layer is disabled.
xinliupitt's avatar
xinliupitt committed
54
    norm_first: Whether to normalize inputs to attention and intermediate dense
55
56
      layers. If set False, output of attention and intermediate dense layers is
      normalized.
xinliupitt's avatar
xinliupitt committed
57
    norm_epsilon: Epsilon value to initialize normalization layers.
58
    intermediate_dropout: Dropout probability for intermediate_dropout_layer.
Hongkun Yu's avatar
Hongkun Yu committed
59
60
61
62
63
64
65
66
  """

  def __init__(self,
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
67
               output_range=None,
Hongkun Yu's avatar
Hongkun Yu committed
68
69
70
71
72
73
74
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
75
76
77
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
xinliupitt's avatar
xinliupitt committed
78
               intermediate_dropout=0.0,
Hongkun Yu's avatar
Hongkun Yu committed
79
80
81
82
83
84
85
86
               **kwargs):
    super(Transformer, self).__init__(**kwargs)

    self._num_heads = num_attention_heads
    self._intermediate_size = intermediate_size
    self._intermediate_activation = intermediate_activation
    self._attention_dropout_rate = attention_dropout_rate
    self._dropout_rate = dropout_rate
87
    self._output_range = output_range
Hongkun Yu's avatar
Hongkun Yu committed
88
89
90
91
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
92
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
93
94
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
95
96
97
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
xinliupitt's avatar
xinliupitt committed
98
    self._intermediate_dropout = intermediate_dropout
Hongkun Yu's avatar
Hongkun Yu committed
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122

  def build(self, input_shape):
    input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
    input_tensor_shape = tf.TensorShape(input_tensor)
    if len(input_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    batch_size, sequence_length, hidden_size = input_tensor_shape

    if len(input_shape) == 2:
      mask_tensor_shape = tf.TensorShape(input_shape[1])
      expected_mask_tensor_shape = tf.TensorShape(
          [batch_size, sequence_length, sequence_length])
      if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
        raise ValueError("When passing a mask tensor to TransformerLayer, the "
                         "mask tensor must be of shape [batch, "
                         "sequence_length, sequence_length] (here %s). Got a "
                         "mask tensor of shape %s." %
                         (expected_mask_tensor_shape, mask_tensor_shape))
    if hidden_size % self._num_heads != 0:
      raise ValueError(
          "The input size (%d) is not a multiple of the number of attention "
          "heads (%d)" % (hidden_size, self._num_heads))
    self._attention_head_size = int(hidden_size // self._num_heads)
123
    common_kwargs = dict(
Hongkun Yu's avatar
Hongkun Yu committed
124
125
126
127
128
129
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
130
131
132
133
134
        bias_constraint=self._bias_constraint)
    self._attention_layer = attention.MultiHeadAttention(
        num_heads=self._num_heads,
        key_size=self._attention_head_size,
        dropout=self._attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
135
        use_bias=self._use_bias,
136
137
        name="self_attention",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
138
    self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
139
140
    # Use float32 in layernorm for numeric stability.
    # It is probably safe in mixed_float16, but we haven't validated this yet.
Hongkun Yu's avatar
Hongkun Yu committed
141
142
    self._attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
Chen Chen's avatar
Chen Chen committed
143
144
            name="self_attention_layer_norm",
            axis=-1,
xinliupitt's avatar
xinliupitt committed
145
            epsilon=self._norm_epsilon,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
146
            dtype=tf.float32))
147
148
149
150
151
152
    self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self._intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
153
154
155
156
157
158
    policy = tf.keras.mixed_precision.experimental.global_policy()
    if policy.name == "mixed_bfloat16":
      # bfloat16 causes BERT with the LAMB optimizer to not converge
      # as well, so we use float32.
      # TODO(b/154538392): Investigate this.
      policy = tf.float32
Chen Chen's avatar
Chen Chen committed
159
    self._intermediate_activation_layer = tf.keras.layers.Activation(
160
        self._intermediate_activation, dtype=policy)
161
162
    self._intermediate_dropout_layer = tf.keras.layers.Dropout(
        rate=self._intermediate_dropout)
163
164
165
166
167
168
    self._output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
169
    self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
170
    # Use float32 in layernorm for numeric stability.
Hongkun Yu's avatar
Hongkun Yu committed
171
    self._output_layer_norm = tf.keras.layers.LayerNormalization(
172
173
174
        name="output_layer_norm",
        axis=-1,
        epsilon=self._norm_epsilon,
xinliupitt's avatar
xinliupitt committed
175
        dtype=tf.float32)
Hongkun Yu's avatar
Hongkun Yu committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190

    super(Transformer, self).build(input_shape)

  def get_config(self):
    config = {
        "num_attention_heads":
            self._num_heads,
        "intermediate_size":
            self._intermediate_size,
        "intermediate_activation":
            self._intermediate_activation,
        "dropout_rate":
            self._dropout_rate,
        "attention_dropout_rate":
            self._attention_dropout_rate,
191
192
        "output_range":
            self._output_range,
Hongkun Yu's avatar
Hongkun Yu committed
193
194
195
196
197
198
199
200
201
202
203
204
205
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
xinliupitt's avatar
xinliupitt committed
206
207
208
209
210
211
            tf.keras.constraints.serialize(self._bias_constraint),
        "use_bias":
            self._use_bias,
        "norm_first":
            self._norm_first,
        "norm_epsilon":
xinliupitt's avatar
xinliupitt committed
212
213
214
            self._norm_epsilon,
        "intermediate_dropout":
            self._intermediate_dropout
Hongkun Yu's avatar
Hongkun Yu committed
215
216
217
218
219
220
221
222
223
224
    }
    base_config = super(Transformer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))

  def call(self, inputs):
    if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
      input_tensor, attention_mask = inputs
    else:
      input_tensor, attention_mask = (inputs, None)

225
226
227
228
    if self._output_range:
      target_tensor = input_tensor[:, 0:self._output_range, :]
      attention_mask = attention_mask[:, 0:self._output_range, :]
    else:
xinliupitt's avatar
xinliupitt committed
229
230
231
      if self._norm_first:
        source_tensor = input_tensor
        input_tensor = self._attention_layer_norm(input_tensor)
232
      target_tensor = input_tensor
Hongkun Yu's avatar
Hongkun Yu committed
233

234
235
    attention_output = self._attention_layer(
        query=target_tensor, value=input_tensor, attention_mask=attention_mask)
236
    attention_output = self._attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
237
238
239
240
241
242
243
244
    if self._norm_first:
      attention_output = source_tensor + attention_output
    else:
      attention_output = self._attention_layer_norm(target_tensor +
                                                    attention_output)
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self._output_layer_norm(attention_output)
245
246
247
    intermediate_output = self._intermediate_dense(attention_output)
    intermediate_output = self._intermediate_activation_layer(
        intermediate_output)
248
    intermediate_output = self._intermediate_dropout_layer(intermediate_output)
249
250
251
252
253
254
    layer_output = self._output_dense(intermediate_output)
    layer_output = self._output_dropout(layer_output)
    # During mixed precision training, attention_output is from layer norm and
    # is always fp32 for now. Cast layer_output to fp32 for the subsequent
    # add.
    layer_output = tf.cast(layer_output, tf.float32)
xinliupitt's avatar
xinliupitt committed
255
256
257
258
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self._output_layer_norm(layer_output + attention_output)
259
260

    return layer_output
261
262


Chen Chen's avatar
Chen Chen committed
263
264
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
265
266
267
268
269
class CompiledTransformer(Transformer):

  @tf_function_if_eager(experimental_compile=True)
  def call(self, inputs):
    return super(CompiledTransformer, self).call(inputs)
270
271
272
273
274
275
276
277
278
279


@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerDecoderLayer(tf.keras.layers.Layer):
  """Single transformer layer for decoder.

  It has three sub-layers:
  (1) a multi-head self-attention mechanism.
  (2) a encoder-decoder attention.
  (3) a positionwise fully connected feed-forward network.
Hongkun Yu's avatar
Hongkun Yu committed
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295

  Arguments:
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
    multi_channel_cross_attention: Whether to use `MultiChannelAttention` for
      cross-attention between target sequences and source sequences.
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
296
297
    use_bias: Whether to enable use_bias in attention layer. If set False,
      use_bias in attention layer is disabled.
xinliupitt's avatar
xinliupitt committed
298
    norm_first: Whether to normalize inputs to attention and intermediate dense
299
300
      layers. If set False, output of attention and intermediate dense layers is
      normalized.
xinliupitt's avatar
xinliupitt committed
301
    norm_epsilon: Epsilon value to initialize normalization layers.
302
    intermediate_dropout: Dropout probability for intermediate_dropout_layer.
303
304
305
  """

  def __init__(self,
Hongkun Yu's avatar
Hongkun Yu committed
306
307
308
309
310
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
311
               multi_channel_cross_attention=False,
Hongkun Yu's avatar
Hongkun Yu committed
312
313
314
315
316
317
318
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
319
320
321
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
xinliupitt's avatar
xinliupitt committed
322
               intermediate_dropout=0.0,
323
324
325
326
327
328
               **kwargs):
    super(TransformerDecoderLayer, self).__init__(**kwargs)
    self.num_attention_heads = num_attention_heads
    self.intermediate_size = intermediate_size
    self.intermediate_activation = tf.keras.activations.get(
        intermediate_activation)
Hongkun Yu's avatar
Hongkun Yu committed
329
330
    self.dropout_rate = dropout_rate
    self.attention_dropout_rate = attention_dropout_rate
331
    self.multi_channel_cross_attention = multi_channel_cross_attention
Hongkun Yu's avatar
Hongkun Yu committed
332
333
334
335
336
337
338
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
339
340
341
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
xinliupitt's avatar
xinliupitt committed
342
    self._intermediate_dropout = intermediate_dropout
343
344
345
346
347
    if self.multi_channel_cross_attention:
      self._cross_attention_cls = multi_channel_attention.MultiChannelAttention
    else:
      self._cross_attention_cls = attention.MultiHeadAttention

Hongkun Yu's avatar
Hongkun Yu committed
348
349
350
351
352
353
354
  def build(self, input_shape):
    target_tensor_shape = tf.TensorShape(input_shape[0])
    if len(target_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    hidden_size = target_tensor_shape[2]
    if hidden_size % self.num_attention_heads != 0:
355
356
      raise ValueError(
          "The hidden size (%d) is not a multiple of the number of attention "
Hongkun Yu's avatar
Hongkun Yu committed
357
358
          "heads (%d)" % (hidden_size, self.num_attention_heads))
    self.attention_head_size = int(hidden_size / self.num_attention_heads)
359
    common_kwargs = dict(
360
361
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
Hongkun Yu's avatar
Hongkun Yu committed
362
363
364
365
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
366
367
368
369
370
371
        bias_constraint=self._bias_constraint)
    # Self attention.
    self.self_attention = attention.CachedAttention(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
        dropout=self.attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
372
        use_bias=self._use_bias,
373
374
375
376
377
378
379
380
        name="self_attention",
        **common_kwargs)
    self.self_attention_output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
381
    self.self_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
382
        rate=self.dropout_rate)
383
384
    self.self_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
385
            name="self_attention_layer_norm",
386
387
            axis=-1,
            epsilon=self._norm_epsilon))
388
389
390
391
    # Encoder-decoder attention.
    self.encdec_attention = self._cross_attention_cls(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
Hongkun Yu's avatar
Hongkun Yu committed
392
393
        dropout=self.attention_dropout_rate,
        output_shape=hidden_size,
xinliupitt's avatar
xinliupitt committed
394
        use_bias=self._use_bias,
395
396
        name="attention/encdec",
        **common_kwargs)
397
398

    self.encdec_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
399
        rate=self.dropout_rate)
400
401
    self.encdec_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
402
            name="attention/encdec_output_layer_norm",
403
404
            axis=-1,
            epsilon=self._norm_epsilon))
405
406

    # Feed-forward projection.
407
408
409
410
411
412
    self.intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self.intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
413
414
    self.intermediate_activation_layer = tf.keras.layers.Activation(
        self.intermediate_activation)
415
416
    self._intermediate_dropout_layer = tf.keras.layers.Dropout(
        rate=self._intermediate_dropout)
417
418
419
420
421
422
    self.output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
423
    self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
424
    self.output_layer_norm = tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
425
        name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon)
426
427
    super(TransformerDecoderLayer, self).build(input_shape)

xinliupitt's avatar
xinliupitt committed
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
  def get_config(self):
    config = {
        "num_attention_heads":
            self.num_attention_heads,
        "intermediate_size":
            self.intermediate_size,
        "intermediate_activation":
            self.intermediate_activation,
        "dropout_rate":
            self.dropout_rate,
        "attention_dropout_rate":
            self.attention_dropout_rate,
        "multi_channel_cross_attention":
            self.multi_channel_cross_attention,
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
            tf.keras.constraints.serialize(self._bias_constraint),
        "use_bias":
            self._use_bias,
        "norm_first":
            self._norm_first,
        "norm_epsilon":
xinliupitt's avatar
xinliupitt committed
461
462
463
            self._norm_epsilon,
        "intermediate_dropout":
            self._intermediate_dropout
xinliupitt's avatar
xinliupitt committed
464
465
466
467
    }
    base_config = super(TransformerDecoderLayer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))

468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
  def common_layers_with_encoder(self):
    """Gets layer objects that can make a Transformer encoder block."""
    return [
        self.self_attention, self.self_attention_layer_norm,
        self.intermediate_dense, self.output_dense, self.output_layer_norm
    ]

  def call(self, inputs, cache=None, decode_loop_step=None):
    if self.multi_channel_cross_attention:
      if len(inputs) != 5:
        raise ValueError(
            "TransformerDecoderLayer must have 5 inputs, when it uses "
            "multi_channel_cross_attention. But it got: %d" % len(inputs))
    elif len(inputs) != 4:
      raise ValueError(
          "TransformerDecoderLayer must have 4 inputs, but it got: %d" %
          len(inputs))
    input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]
xinliupitt's avatar
xinliupitt committed
486
487
488
    source_tensor = input_tensor
    if self._norm_first:
      input_tensor = self.self_attention_layer_norm(input_tensor)
489
    self_attention_output, cache = self.self_attention(
490
491
        query=input_tensor,
        value=input_tensor,
492
493
494
495
        attention_mask=self_attention_mask,
        cache=cache,
        decode_loop_step=decode_loop_step)
    self_attention_output = self.self_attention_dropout(self_attention_output)
xinliupitt's avatar
xinliupitt committed
496
497
498
499
500
501
502
503
504
    if self._norm_first:
      self_attention_output = source_tensor + self_attention_output
    else:
      self_attention_output = self.self_attention_layer_norm(
          input_tensor + self_attention_output)
    if self._norm_first:
      source_self_attention_output = self_attention_output
      self_attention_output = self.encdec_attention_layer_norm(
          self_attention_output)
505
506
507
508
    cross_attn_inputs = dict(
        query=self_attention_output,
        value=memory,
        attention_mask=attention_mask)
509
510
    if self.multi_channel_cross_attention:
      # Accesses the 5-th input tensor for the doc-attention probabilities.
511
512
      cross_attn_inputs["context_attention_weights"] = inputs[-1]
    attention_output = self.encdec_attention(**cross_attn_inputs)
513
    attention_output = self.encdec_attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
514
515
516
517
    if self._norm_first:
      attention_output = source_self_attention_output + attention_output
    else:
      attention_output = self.encdec_attention_layer_norm(
518
          self_attention_output + attention_output)
xinliupitt's avatar
xinliupitt committed
519
520
521
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self.output_layer_norm(attention_output)
522
523
524
525

    intermediate_output = self.intermediate_dense(attention_output)
    intermediate_output = self.intermediate_activation_layer(
        intermediate_output)
526
    intermediate_output = self._intermediate_dropout_layer(intermediate_output)
527
528
    layer_output = self.output_dense(intermediate_output)
    layer_output = self.output_dropout(layer_output)
xinliupitt's avatar
xinliupitt committed
529
530
531
532
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self.output_layer_norm(layer_output + attention_output)
533
    return layer_output, cache