transformer.py 21.9 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer block layer."""
16
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
17
18
19
20
21
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function

Chen Chen's avatar
Chen Chen committed
22
import gin
Hongkun Yu's avatar
Hongkun Yu committed
23
24
25
import tensorflow as tf

from official.nlp.modeling.layers import attention
26
from official.nlp.modeling.layers import multi_channel_attention
27
from official.nlp.modeling.layers.util import tf_function_if_eager
Hongkun Yu's avatar
Hongkun Yu committed
28
29
30
31
32
33
34
35
36


@tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(tf.keras.layers.Layer):
  """Transformer layer.

  This layer implements the Transformer from "Attention Is All You Need".
  (https://arxiv.org/abs/1706.03762).

37
  Arguments:
Hongkun Yu's avatar
Hongkun Yu committed
38
39
40
41
42
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
43
44
    output_range: the sequence output range, [0, output_range) by slicing the
      target sequence. `None` means the target sequence is not sliced.
Hongkun Yu's avatar
Hongkun Yu committed
45
46
47
48
49
50
51
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
52
53
    use_bias: Whether to enable use_bias in attention layer. If set False,
      use_bias in attention layer is disabled.
xinliupitt's avatar
xinliupitt committed
54
    norm_first: Whether to normalize inputs to attention and intermediate dense
55
56
      layers. If set False, output of attention and intermediate dense layers is
      normalized.
xinliupitt's avatar
xinliupitt committed
57
    norm_epsilon: Epsilon value to initialize normalization layers.
Hongkun Yu's avatar
Hongkun Yu committed
58
59
60
61
62
63
64
65
  """

  def __init__(self,
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
66
               output_range=None,
Hongkun Yu's avatar
Hongkun Yu committed
67
68
69
70
71
72
73
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
74
75
76
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
Hongkun Yu's avatar
Hongkun Yu committed
77
78
79
80
81
82
83
84
               **kwargs):
    super(Transformer, self).__init__(**kwargs)

    self._num_heads = num_attention_heads
    self._intermediate_size = intermediate_size
    self._intermediate_activation = intermediate_activation
    self._attention_dropout_rate = attention_dropout_rate
    self._dropout_rate = dropout_rate
85
    self._output_range = output_range
Hongkun Yu's avatar
Hongkun Yu committed
86
87
88
89
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
90
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
91
92
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
93
94
95
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
Hongkun Yu's avatar
Hongkun Yu committed
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119

  def build(self, input_shape):
    input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
    input_tensor_shape = tf.TensorShape(input_tensor)
    if len(input_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    batch_size, sequence_length, hidden_size = input_tensor_shape

    if len(input_shape) == 2:
      mask_tensor_shape = tf.TensorShape(input_shape[1])
      expected_mask_tensor_shape = tf.TensorShape(
          [batch_size, sequence_length, sequence_length])
      if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
        raise ValueError("When passing a mask tensor to TransformerLayer, the "
                         "mask tensor must be of shape [batch, "
                         "sequence_length, sequence_length] (here %s). Got a "
                         "mask tensor of shape %s." %
                         (expected_mask_tensor_shape, mask_tensor_shape))
    if hidden_size % self._num_heads != 0:
      raise ValueError(
          "The input size (%d) is not a multiple of the number of attention "
          "heads (%d)" % (hidden_size, self._num_heads))
    self._attention_head_size = int(hidden_size // self._num_heads)
120
    common_kwargs = dict(
Hongkun Yu's avatar
Hongkun Yu committed
121
122
123
124
125
126
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
127
128
129
130
131
        bias_constraint=self._bias_constraint)
    self._attention_layer = attention.MultiHeadAttention(
        num_heads=self._num_heads,
        key_size=self._attention_head_size,
        dropout=self._attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
132
        use_bias=self._use_bias,
133
134
        name="self_attention",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
135
    self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
136
137
    # Use float32 in layernorm for numeric stability.
    # It is probably safe in mixed_float16, but we haven't validated this yet.
Hongkun Yu's avatar
Hongkun Yu committed
138
139
    self._attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
Chen Chen's avatar
Chen Chen committed
140
141
            name="self_attention_layer_norm",
            axis=-1,
xinliupitt's avatar
xinliupitt committed
142
            epsilon=self._norm_epsilon,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
143
            dtype=tf.float32))
144
145
146
147
148
149
    self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self._intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
150
151
152
153
154
155
    policy = tf.keras.mixed_precision.experimental.global_policy()
    if policy.name == "mixed_bfloat16":
      # bfloat16 causes BERT with the LAMB optimizer to not converge
      # as well, so we use float32.
      # TODO(b/154538392): Investigate this.
      policy = tf.float32
Chen Chen's avatar
Chen Chen committed
156
    self._intermediate_activation_layer = tf.keras.layers.Activation(
157
        self._intermediate_activation, dtype=policy)
158
159
160
161
162
163
    self._output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
164
    self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
165
    # Use float32 in layernorm for numeric stability.
Hongkun Yu's avatar
Hongkun Yu committed
166
    self._output_layer_norm = tf.keras.layers.LayerNormalization(
167
168
169
        name="output_layer_norm",
        axis=-1,
        epsilon=self._norm_epsilon,
xinliupitt's avatar
xinliupitt committed
170
        dtype=tf.float32)
Hongkun Yu's avatar
Hongkun Yu committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

    super(Transformer, self).build(input_shape)

  def get_config(self):
    config = {
        "num_attention_heads":
            self._num_heads,
        "intermediate_size":
            self._intermediate_size,
        "intermediate_activation":
            self._intermediate_activation,
        "dropout_rate":
            self._dropout_rate,
        "attention_dropout_rate":
            self._attention_dropout_rate,
186
187
        "output_range":
            self._output_range,
Hongkun Yu's avatar
Hongkun Yu committed
188
189
190
191
192
193
194
195
196
197
198
199
200
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
xinliupitt's avatar
xinliupitt committed
201
202
203
204
205
206
207
            tf.keras.constraints.serialize(self._bias_constraint),
        "use_bias":
            self._use_bias,
        "norm_first":
            self._norm_first,
        "norm_epsilon":
            self._norm_epsilon
Hongkun Yu's avatar
Hongkun Yu committed
208
209
210
211
212
213
214
215
216
217
    }
    base_config = super(Transformer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))

  def call(self, inputs):
    if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
      input_tensor, attention_mask = inputs
    else:
      input_tensor, attention_mask = (inputs, None)

218
219
220
221
    if self._output_range:
      target_tensor = input_tensor[:, 0:self._output_range, :]
      attention_mask = attention_mask[:, 0:self._output_range, :]
    else:
xinliupitt's avatar
xinliupitt committed
222
223
224
      if self._norm_first:
        source_tensor = input_tensor
        input_tensor = self._attention_layer_norm(input_tensor)
225
      target_tensor = input_tensor
Hongkun Yu's avatar
Hongkun Yu committed
226

227
228
    attention_output = self._attention_layer(
        query=target_tensor, value=input_tensor, attention_mask=attention_mask)
229
    attention_output = self._attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
230
231
232
233
234
235
236
237
    if self._norm_first:
      attention_output = source_tensor + attention_output
    else:
      attention_output = self._attention_layer_norm(target_tensor +
                                                    attention_output)
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self._output_layer_norm(attention_output)
238
239
240
241
242
243
244
245
246
    intermediate_output = self._intermediate_dense(attention_output)
    intermediate_output = self._intermediate_activation_layer(
        intermediate_output)
    layer_output = self._output_dense(intermediate_output)
    layer_output = self._output_dropout(layer_output)
    # During mixed precision training, attention_output is from layer norm and
    # is always fp32 for now. Cast layer_output to fp32 for the subsequent
    # add.
    layer_output = tf.cast(layer_output, tf.float32)
xinliupitt's avatar
xinliupitt committed
247
248
249
250
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self._output_layer_norm(layer_output + attention_output)
251
252

    return layer_output
253
254


Chen Chen's avatar
Chen Chen committed
255
256
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
257
258
259
260
261
class CompiledTransformer(Transformer):

  @tf_function_if_eager(experimental_compile=True)
  def call(self, inputs):
    return super(CompiledTransformer, self).call(inputs)
262
263
264
265
266
267
268
269
270
271


@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerDecoderLayer(tf.keras.layers.Layer):
  """Single transformer layer for decoder.

  It has three sub-layers:
  (1) a multi-head self-attention mechanism.
  (2) a encoder-decoder attention.
  (3) a positionwise fully connected feed-forward network.
Hongkun Yu's avatar
Hongkun Yu committed
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287

  Arguments:
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
    multi_channel_cross_attention: Whether to use `MultiChannelAttention` for
      cross-attention between target sequences and source sequences.
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
288
289
    use_bias: Whether to enable use_bias in attention layer. If set False,
      use_bias in attention layer is disabled.
xinliupitt's avatar
xinliupitt committed
290
    norm_first: Whether to normalize inputs to attention and intermediate dense
291
292
      layers. If set False, output of attention and intermediate dense layers is
      normalized.
xinliupitt's avatar
xinliupitt committed
293
    norm_epsilon: Epsilon value to initialize normalization layers.
294
295
296
  """

  def __init__(self,
Hongkun Yu's avatar
Hongkun Yu committed
297
298
299
300
301
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
302
               multi_channel_cross_attention=False,
Hongkun Yu's avatar
Hongkun Yu committed
303
304
305
306
307
308
309
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
310
311
312
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
313
314
315
316
317
318
               **kwargs):
    super(TransformerDecoderLayer, self).__init__(**kwargs)
    self.num_attention_heads = num_attention_heads
    self.intermediate_size = intermediate_size
    self.intermediate_activation = tf.keras.activations.get(
        intermediate_activation)
Hongkun Yu's avatar
Hongkun Yu committed
319
320
    self.dropout_rate = dropout_rate
    self.attention_dropout_rate = attention_dropout_rate
321
    self.multi_channel_cross_attention = multi_channel_cross_attention
Hongkun Yu's avatar
Hongkun Yu committed
322
323
324
325
326
327
328
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
329
330
331
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
332
333
334
335
336
    if self.multi_channel_cross_attention:
      self._cross_attention_cls = multi_channel_attention.MultiChannelAttention
    else:
      self._cross_attention_cls = attention.MultiHeadAttention

Hongkun Yu's avatar
Hongkun Yu committed
337
338
339
340
341
342
343
  def build(self, input_shape):
    target_tensor_shape = tf.TensorShape(input_shape[0])
    if len(target_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    hidden_size = target_tensor_shape[2]
    if hidden_size % self.num_attention_heads != 0:
344
345
      raise ValueError(
          "The hidden size (%d) is not a multiple of the number of attention "
Hongkun Yu's avatar
Hongkun Yu committed
346
347
          "heads (%d)" % (hidden_size, self.num_attention_heads))
    self.attention_head_size = int(hidden_size / self.num_attention_heads)
348
    common_kwargs = dict(
349
350
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
Hongkun Yu's avatar
Hongkun Yu committed
351
352
353
354
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
355
356
357
358
359
360
        bias_constraint=self._bias_constraint)
    # Self attention.
    self.self_attention = attention.CachedAttention(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
        dropout=self.attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
361
        use_bias=self._use_bias,
362
363
364
365
366
367
368
369
        name="self_attention",
        **common_kwargs)
    self.self_attention_output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
370
    self.self_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
371
        rate=self.dropout_rate)
372
373
    self.self_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
374
            name="self_attention_layer_norm",
375
376
            axis=-1,
            epsilon=self._norm_epsilon))
377
378
379
380
    # Encoder-decoder attention.
    self.encdec_attention = self._cross_attention_cls(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
Hongkun Yu's avatar
Hongkun Yu committed
381
382
        dropout=self.attention_dropout_rate,
        output_shape=hidden_size,
xinliupitt's avatar
xinliupitt committed
383
        use_bias=self._use_bias,
384
385
        name="attention/encdec",
        **common_kwargs)
386
387

    self.encdec_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
388
        rate=self.dropout_rate)
389
390
    self.encdec_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
391
            name="attention/encdec_output_layer_norm",
392
393
            axis=-1,
            epsilon=self._norm_epsilon))
394
395

    # Feed-forward projection.
396
397
398
399
400
401
    self.intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self.intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
402
403
    self.intermediate_activation_layer = tf.keras.layers.Activation(
        self.intermediate_activation)
404
405
406
407
408
409
    self.output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
410
    self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
411
    self.output_layer_norm = tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
412
        name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon)
413
414
    super(TransformerDecoderLayer, self).build(input_shape)

xinliupitt's avatar
xinliupitt committed
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
  def get_config(self):
    config = {
        "num_attention_heads":
            self.num_attention_heads,
        "intermediate_size":
            self.intermediate_size,
        "intermediate_activation":
            self.intermediate_activation,
        "dropout_rate":
            self.dropout_rate,
        "attention_dropout_rate":
            self.attention_dropout_rate,
        "multi_channel_cross_attention":
            self.multi_channel_cross_attention,
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
            tf.keras.constraints.serialize(self._bias_constraint),
        "use_bias":
            self._use_bias,
        "norm_first":
            self._norm_first,
        "norm_epsilon":
xinliupitt's avatar
xinliupitt committed
448
            self._norm_epsilon
xinliupitt's avatar
xinliupitt committed
449
450
451
452
    }
    base_config = super(TransformerDecoderLayer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))

453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
  def common_layers_with_encoder(self):
    """Gets layer objects that can make a Transformer encoder block."""
    return [
        self.self_attention, self.self_attention_layer_norm,
        self.intermediate_dense, self.output_dense, self.output_layer_norm
    ]

  def call(self, inputs, cache=None, decode_loop_step=None):
    if self.multi_channel_cross_attention:
      if len(inputs) != 5:
        raise ValueError(
            "TransformerDecoderLayer must have 5 inputs, when it uses "
            "multi_channel_cross_attention. But it got: %d" % len(inputs))
    elif len(inputs) != 4:
      raise ValueError(
          "TransformerDecoderLayer must have 4 inputs, but it got: %d" %
          len(inputs))
    input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]
xinliupitt's avatar
xinliupitt committed
471
472
473
    source_tensor = input_tensor
    if self._norm_first:
      input_tensor = self.self_attention_layer_norm(input_tensor)
474
    self_attention_output, cache = self.self_attention(
475
476
        query=input_tensor,
        value=input_tensor,
477
478
479
480
        attention_mask=self_attention_mask,
        cache=cache,
        decode_loop_step=decode_loop_step)
    self_attention_output = self.self_attention_dropout(self_attention_output)
xinliupitt's avatar
xinliupitt committed
481
482
483
484
485
486
487
488
489
    if self._norm_first:
      self_attention_output = source_tensor + self_attention_output
    else:
      self_attention_output = self.self_attention_layer_norm(
          input_tensor + self_attention_output)
    if self._norm_first:
      source_self_attention_output = self_attention_output
      self_attention_output = self.encdec_attention_layer_norm(
          self_attention_output)
490
491
492
493
    cross_attn_inputs = dict(
        query=self_attention_output,
        value=memory,
        attention_mask=attention_mask)
494
495
    if self.multi_channel_cross_attention:
      # Accesses the 5-th input tensor for the doc-attention probabilities.
496
497
      cross_attn_inputs["context_attention_weights"] = inputs[-1]
    attention_output = self.encdec_attention(**cross_attn_inputs)
498
    attention_output = self.encdec_attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
499
500
501
502
    if self._norm_first:
      attention_output = source_self_attention_output + attention_output
    else:
      attention_output = self.encdec_attention_layer_norm(
503
          self_attention_output + attention_output)
xinliupitt's avatar
xinliupitt committed
504
505
506
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self.output_layer_norm(attention_output)
507
508
509
510
511
512

    intermediate_output = self.intermediate_dense(attention_output)
    intermediate_output = self.intermediate_activation_layer(
        intermediate_output)
    layer_output = self.output_dense(intermediate_output)
    layer_output = self.output_dropout(layer_output)
xinliupitt's avatar
xinliupitt committed
513
514
515
516
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self.output_layer_norm(layer_output + attention_output)
517
    return layer_output, cache