transformer.py 22.3 KB
Newer Older
Hongkun Yu's avatar
Hongkun Yu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer block layer."""
16
# pylint: disable=g-classes-have-attributes
Hongkun Yu's avatar
Hongkun Yu committed
17
18
19
20
21
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function

Chen Chen's avatar
Chen Chen committed
22
import gin
Hongkun Yu's avatar
Hongkun Yu committed
23
24
25
import tensorflow as tf

from official.nlp.modeling.layers import attention
26
from official.nlp.modeling.layers import multi_channel_attention
27
from official.nlp.modeling.layers.util import tf_function_if_eager
Hongkun Yu's avatar
Hongkun Yu committed
28
29
30
31
32
33
34
35
36


@tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(tf.keras.layers.Layer):
  """Transformer layer.

  This layer implements the Transformer from "Attention Is All You Need".
  (https://arxiv.org/abs/1706.03762).

37
  Arguments:
Hongkun Yu's avatar
Hongkun Yu committed
38
39
40
41
42
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
43
44
    output_range: the sequence output range, [0, output_range) by slicing the
      target sequence. `None` means the target sequence is not sliced.
Hongkun Yu's avatar
Hongkun Yu committed
45
46
47
48
49
50
51
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
52
53
    use_bias: Whether to enable use_bias in attention layer. If set False,
      use_bias in attention layer is disabled.
xinliupitt's avatar
xinliupitt committed
54
    norm_first: Whether to normalize inputs to attention and intermediate dense
xinliupitt's avatar
xinliupitt committed
55
56
      layers. If set False, output of attention and intermediate dense layers
      is normalized.
xinliupitt's avatar
xinliupitt committed
57
    norm_epsilon: Epsilon value to initialize normalization layers.
Hongkun Yu's avatar
Hongkun Yu committed
58
59
60
61
62
63
64
65
  """

  def __init__(self,
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
66
               output_range=None,
Hongkun Yu's avatar
Hongkun Yu committed
67
68
69
70
71
72
73
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
74
75
76
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
Hongkun Yu's avatar
Hongkun Yu committed
77
78
79
80
81
82
83
84
               **kwargs):
    super(Transformer, self).__init__(**kwargs)

    self._num_heads = num_attention_heads
    self._intermediate_size = intermediate_size
    self._intermediate_activation = intermediate_activation
    self._attention_dropout_rate = attention_dropout_rate
    self._dropout_rate = dropout_rate
85
    self._output_range = output_range
Hongkun Yu's avatar
Hongkun Yu committed
86
87
88
89
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
90
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
Hongkun Yu's avatar
Hongkun Yu committed
91
92
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
93
94
95
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
Hongkun Yu's avatar
Hongkun Yu committed
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119

  def build(self, input_shape):
    input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
    input_tensor_shape = tf.TensorShape(input_tensor)
    if len(input_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    batch_size, sequence_length, hidden_size = input_tensor_shape

    if len(input_shape) == 2:
      mask_tensor_shape = tf.TensorShape(input_shape[1])
      expected_mask_tensor_shape = tf.TensorShape(
          [batch_size, sequence_length, sequence_length])
      if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
        raise ValueError("When passing a mask tensor to TransformerLayer, the "
                         "mask tensor must be of shape [batch, "
                         "sequence_length, sequence_length] (here %s). Got a "
                         "mask tensor of shape %s." %
                         (expected_mask_tensor_shape, mask_tensor_shape))
    if hidden_size % self._num_heads != 0:
      raise ValueError(
          "The input size (%d) is not a multiple of the number of attention "
          "heads (%d)" % (hidden_size, self._num_heads))
    self._attention_head_size = int(hidden_size // self._num_heads)
120
    common_kwargs = dict(
Hongkun Yu's avatar
Hongkun Yu committed
121
122
123
124
125
126
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
127
128
129
130
131
        bias_constraint=self._bias_constraint)
    self._attention_layer = attention.MultiHeadAttention(
        num_heads=self._num_heads,
        key_size=self._attention_head_size,
        dropout=self._attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
132
        use_bias=self._use_bias,
133
134
        name="self_attention",
        **common_kwargs)
135
    # pylint: disable=protected-access
136
137
138
    # Temporarily handling for checkpoint compatible changes.
    self._attention_layer._build_from_signature(
        query=input_tensor_shape, value=input_tensor_shape)
139
140
    self._attention_output_dense = self._attention_layer._output_dense
    # pylint: enable=protected-access
Hongkun Yu's avatar
Hongkun Yu committed
141
    self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
142
143
    # Use float32 in layernorm for numeric stability.
    # It is probably safe in mixed_float16, but we haven't validated this yet.
Hongkun Yu's avatar
Hongkun Yu committed
144
145
    self._attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
Chen Chen's avatar
Chen Chen committed
146
147
            name="self_attention_layer_norm",
            axis=-1,
xinliupitt's avatar
xinliupitt committed
148
            epsilon=self._norm_epsilon,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
149
            dtype=tf.float32))
150
151
152
153
154
155
    self._intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self._intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
156
157
158
159
160
161
    policy = tf.keras.mixed_precision.experimental.global_policy()
    if policy.name == "mixed_bfloat16":
      # bfloat16 causes BERT with the LAMB optimizer to not converge
      # as well, so we use float32.
      # TODO(b/154538392): Investigate this.
      policy = tf.float32
Chen Chen's avatar
Chen Chen committed
162
    self._intermediate_activation_layer = tf.keras.layers.Activation(
163
        self._intermediate_activation, dtype=policy)
164
165
166
167
168
169
    self._output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
170
    self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
Zongwei Zhou's avatar
Zongwei Zhou committed
171
    # Use float32 in layernorm for numeric stability.
Hongkun Yu's avatar
Hongkun Yu committed
172
    self._output_layer_norm = tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
173
174
        name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon,
        dtype=tf.float32)
Hongkun Yu's avatar
Hongkun Yu committed
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189

    super(Transformer, self).build(input_shape)

  def get_config(self):
    config = {
        "num_attention_heads":
            self._num_heads,
        "intermediate_size":
            self._intermediate_size,
        "intermediate_activation":
            self._intermediate_activation,
        "dropout_rate":
            self._dropout_rate,
        "attention_dropout_rate":
            self._attention_dropout_rate,
190
191
        "output_range":
            self._output_range,
Hongkun Yu's avatar
Hongkun Yu committed
192
193
194
195
196
197
198
199
200
201
202
203
204
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
xinliupitt's avatar
xinliupitt committed
205
206
207
208
209
210
211
            tf.keras.constraints.serialize(self._bias_constraint),
        "use_bias":
            self._use_bias,
        "norm_first":
            self._norm_first,
        "norm_epsilon":
            self._norm_epsilon
Hongkun Yu's avatar
Hongkun Yu committed
212
213
214
215
216
217
218
219
220
221
    }
    base_config = super(Transformer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))

  def call(self, inputs):
    if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
      input_tensor, attention_mask = inputs
    else:
      input_tensor, attention_mask = (inputs, None)

222
223
224
225
    if self._output_range:
      target_tensor = input_tensor[:, 0:self._output_range, :]
      attention_mask = attention_mask[:, 0:self._output_range, :]
    else:
xinliupitt's avatar
xinliupitt committed
226
227
228
      if self._norm_first:
        source_tensor = input_tensor
        input_tensor = self._attention_layer_norm(input_tensor)
229
      target_tensor = input_tensor
Hongkun Yu's avatar
Hongkun Yu committed
230

231
232
    attention_output = self._attention_layer(
        query=target_tensor, value=input_tensor, attention_mask=attention_mask)
233
    attention_output = self._attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
234
235
236
237
238
239
240
241
    if self._norm_first:
      attention_output = source_tensor + attention_output
    else:
      attention_output = self._attention_layer_norm(target_tensor +
                                                    attention_output)
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self._output_layer_norm(attention_output)
242
243
244
245
246
247
248
249
250
    intermediate_output = self._intermediate_dense(attention_output)
    intermediate_output = self._intermediate_activation_layer(
        intermediate_output)
    layer_output = self._output_dense(intermediate_output)
    layer_output = self._output_dropout(layer_output)
    # During mixed precision training, attention_output is from layer norm and
    # is always fp32 for now. Cast layer_output to fp32 for the subsequent
    # add.
    layer_output = tf.cast(layer_output, tf.float32)
xinliupitt's avatar
xinliupitt committed
251
252
253
254
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self._output_layer_norm(layer_output + attention_output)
255
256

    return layer_output
257
258


Chen Chen's avatar
Chen Chen committed
259
260
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
261
262
263
264
265
class CompiledTransformer(Transformer):

  @tf_function_if_eager(experimental_compile=True)
  def call(self, inputs):
    return super(CompiledTransformer, self).call(inputs)
266
267
268
269
270
271
272
273
274
275


@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerDecoderLayer(tf.keras.layers.Layer):
  """Single transformer layer for decoder.

  It has three sub-layers:
  (1) a multi-head self-attention mechanism.
  (2) a encoder-decoder attention.
  (3) a positionwise fully connected feed-forward network.
Hongkun Yu's avatar
Hongkun Yu committed
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291

  Arguments:
    num_attention_heads: Number of attention heads.
    intermediate_size: Size of the intermediate layer.
    intermediate_activation: Activation for the intermediate layer.
    dropout_rate: Dropout probability for the post-attention and output dropout.
    attention_dropout_rate: Dropout probability for within the attention layer.
    multi_channel_cross_attention: Whether to use `MultiChannelAttention` for
      cross-attention between target sequences and source sequences.
    kernel_initializer: Initializer for dense layer kernels.
    bias_initializer: Initializer for dense layer biases.
    kernel_regularizer: Regularizer for dense layer kernels.
    bias_regularizer: Regularizer for dense layer biases.
    activity_regularizer: Regularizer for dense layer activity.
    kernel_constraint: Constraint for dense layer kernels.
    bias_constraint: Constraint for dense layer kernels.
xinliupitt's avatar
xinliupitt committed
292
293
    use_bias: Whether to enable use_bias in attention layer. If set False,
      use_bias in attention layer is disabled.
xinliupitt's avatar
xinliupitt committed
294
    norm_first: Whether to normalize inputs to attention and intermediate dense
xinliupitt's avatar
xinliupitt committed
295
296
      layers. If set False, output of attention and intermediate dense layers
      is normalized.
xinliupitt's avatar
xinliupitt committed
297
    norm_epsilon: Epsilon value to initialize normalization layers.
298
299
300
  """

  def __init__(self,
Hongkun Yu's avatar
Hongkun Yu committed
301
302
303
304
305
               num_attention_heads,
               intermediate_size,
               intermediate_activation,
               dropout_rate=0.0,
               attention_dropout_rate=0.0,
306
               multi_channel_cross_attention=False,
Hongkun Yu's avatar
Hongkun Yu committed
307
308
309
310
311
312
313
               kernel_initializer="glorot_uniform",
               bias_initializer="zeros",
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
xinliupitt's avatar
xinliupitt committed
314
315
316
               use_bias=True,
               norm_first=False,
               norm_epsilon=1e-12,
317
318
319
320
321
322
               **kwargs):
    super(TransformerDecoderLayer, self).__init__(**kwargs)
    self.num_attention_heads = num_attention_heads
    self.intermediate_size = intermediate_size
    self.intermediate_activation = tf.keras.activations.get(
        intermediate_activation)
Hongkun Yu's avatar
Hongkun Yu committed
323
324
    self.dropout_rate = dropout_rate
    self.attention_dropout_rate = attention_dropout_rate
325
    self.multi_channel_cross_attention = multi_channel_cross_attention
Hongkun Yu's avatar
Hongkun Yu committed
326
327
328
329
330
331
332
    self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
    self._bias_initializer = tf.keras.initializers.get(bias_initializer)
    self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
    self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
    self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
    self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
    self._bias_constraint = tf.keras.constraints.get(bias_constraint)
xinliupitt's avatar
xinliupitt committed
333
334
335
    self._use_bias = use_bias
    self._norm_first = norm_first
    self._norm_epsilon = norm_epsilon
336
337
338
339
340
    if self.multi_channel_cross_attention:
      self._cross_attention_cls = multi_channel_attention.MultiChannelAttention
    else:
      self._cross_attention_cls = attention.MultiHeadAttention

Hongkun Yu's avatar
Hongkun Yu committed
341
342
343
344
345
346
347
  def build(self, input_shape):
    target_tensor_shape = tf.TensorShape(input_shape[0])
    if len(target_tensor_shape) != 3:
      raise ValueError("TransformerLayer expects a three-dimensional input of "
                       "shape [batch, sequence, width].")
    hidden_size = target_tensor_shape[2]
    if hidden_size % self.num_attention_heads != 0:
348
349
      raise ValueError(
          "The hidden size (%d) is not a multiple of the number of attention "
Hongkun Yu's avatar
Hongkun Yu committed
350
351
          "heads (%d)" % (hidden_size, self.num_attention_heads))
    self.attention_head_size = int(hidden_size / self.num_attention_heads)
352
    common_kwargs = dict(
353
354
        kernel_initializer=self._kernel_initializer,
        bias_initializer=self._bias_initializer,
Hongkun Yu's avatar
Hongkun Yu committed
355
356
357
358
        kernel_regularizer=self._kernel_regularizer,
        bias_regularizer=self._bias_regularizer,
        activity_regularizer=self._activity_regularizer,
        kernel_constraint=self._kernel_constraint,
359
360
361
362
363
364
        bias_constraint=self._bias_constraint)
    # Self attention.
    self.self_attention = attention.CachedAttention(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
        dropout=self.attention_dropout_rate,
xinliupitt's avatar
xinliupitt committed
365
        use_bias=self._use_bias,
366
367
368
369
370
371
372
373
        name="self_attention",
        **common_kwargs)
    self.self_attention_output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
374
    self.self_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
375
        rate=self.dropout_rate)
376
377
    self.self_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
378
379
            name="self_attention_layer_norm",
            axis=-1, epsilon=self._norm_epsilon))
380
381
382
383
    # Encoder-decoder attention.
    self.encdec_attention = self._cross_attention_cls(
        num_heads=self.num_attention_heads,
        key_size=self.attention_head_size,
Hongkun Yu's avatar
Hongkun Yu committed
384
385
        dropout=self.attention_dropout_rate,
        output_shape=hidden_size,
xinliupitt's avatar
xinliupitt committed
386
        use_bias=self._use_bias,
387
388
        name="attention/encdec",
        **common_kwargs)
389
390

    self.encdec_attention_dropout = tf.keras.layers.Dropout(
Hongkun Yu's avatar
Hongkun Yu committed
391
        rate=self.dropout_rate)
392
393
    self.encdec_attention_layer_norm = (
        tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
394
395
            name="attention/encdec_output_layer_norm",
            axis=-1, epsilon=self._norm_epsilon))
396
397

    # Feed-forward projection.
398
399
400
401
402
403
    self.intermediate_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, self.intermediate_size),
        bias_axes="d",
        name="intermediate",
        **common_kwargs)
404
405
    self.intermediate_activation_layer = tf.keras.layers.Activation(
        self.intermediate_activation)
406
407
408
409
410
411
    self.output_dense = tf.keras.layers.experimental.EinsumDense(
        "abc,cd->abd",
        output_shape=(None, hidden_size),
        bias_axes="d",
        name="output",
        **common_kwargs)
Hongkun Yu's avatar
Hongkun Yu committed
412
    self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
413
    self.output_layer_norm = tf.keras.layers.LayerNormalization(
xinliupitt's avatar
xinliupitt committed
414
        name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon)
415
416
    super(TransformerDecoderLayer, self).build(input_shape)

xinliupitt's avatar
xinliupitt committed
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
  def get_config(self):
    config = {
        "num_attention_heads":
            self.num_attention_heads,
        "intermediate_size":
            self.intermediate_size,
        "intermediate_activation":
            self.intermediate_activation,
        "dropout_rate":
            self.dropout_rate,
        "attention_dropout_rate":
            self.attention_dropout_rate,
        "multi_channel_cross_attention":
            self.multi_channel_cross_attention,
        "kernel_initializer":
            tf.keras.initializers.serialize(self._kernel_initializer),
        "bias_initializer":
            tf.keras.initializers.serialize(self._bias_initializer),
        "kernel_regularizer":
            tf.keras.regularizers.serialize(self._kernel_regularizer),
        "bias_regularizer":
            tf.keras.regularizers.serialize(self._bias_regularizer),
        "activity_regularizer":
            tf.keras.regularizers.serialize(self._activity_regularizer),
        "kernel_constraint":
            tf.keras.constraints.serialize(self._kernel_constraint),
        "bias_constraint":
            tf.keras.constraints.serialize(self._bias_constraint),
        "use_bias":
            self._use_bias,
        "norm_first":
            self._norm_first,
        "norm_epsilon":
            self._norm_epsilon,
        "cross_attention_cls":
            self._cross_attention_cls
    }
    base_config = super(TransformerDecoderLayer, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))


458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
  def common_layers_with_encoder(self):
    """Gets layer objects that can make a Transformer encoder block."""
    return [
        self.self_attention, self.self_attention_layer_norm,
        self.intermediate_dense, self.output_dense, self.output_layer_norm
    ]

  def call(self, inputs, cache=None, decode_loop_step=None):
    if self.multi_channel_cross_attention:
      if len(inputs) != 5:
        raise ValueError(
            "TransformerDecoderLayer must have 5 inputs, when it uses "
            "multi_channel_cross_attention. But it got: %d" % len(inputs))
    elif len(inputs) != 4:
      raise ValueError(
          "TransformerDecoderLayer must have 4 inputs, but it got: %d" %
          len(inputs))
    input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]
xinliupitt's avatar
xinliupitt committed
476
477
478
    source_tensor = input_tensor
    if self._norm_first:
      input_tensor = self.self_attention_layer_norm(input_tensor)
479
    self_attention_output, cache = self.self_attention(
480
481
        query=input_tensor,
        value=input_tensor,
482
483
484
485
        attention_mask=self_attention_mask,
        cache=cache,
        decode_loop_step=decode_loop_step)
    self_attention_output = self.self_attention_dropout(self_attention_output)
xinliupitt's avatar
xinliupitt committed
486
487
488
489
490
491
492
493
494
    if self._norm_first:
      self_attention_output = source_tensor + self_attention_output
    else:
      self_attention_output = self.self_attention_layer_norm(
          input_tensor + self_attention_output)
    if self._norm_first:
      source_self_attention_output = self_attention_output
      self_attention_output = self.encdec_attention_layer_norm(
          self_attention_output)
495
496
497
498
    cross_attn_inputs = dict(
        query=self_attention_output,
        value=memory,
        attention_mask=attention_mask)
499
500
    if self.multi_channel_cross_attention:
      # Accesses the 5-th input tensor for the doc-attention probabilities.
501
502
      cross_attn_inputs["context_attention_weights"] = inputs[-1]
    attention_output = self.encdec_attention(**cross_attn_inputs)
503
    attention_output = self.encdec_attention_dropout(attention_output)
xinliupitt's avatar
xinliupitt committed
504
505
506
507
508
509
510
511
512
    if self._norm_first:
      attention_output = source_self_attention_output + attention_output
    else:
      attention_output = self.encdec_attention_layer_norm(
          self_attention_output +
          attention_output)
    if self._norm_first:
      source_attention_output = attention_output
      attention_output = self.output_layer_norm(attention_output)
513
514
515
516
517
518

    intermediate_output = self.intermediate_dense(attention_output)
    intermediate_output = self.intermediate_activation_layer(
        intermediate_output)
    layer_output = self.output_dense(intermediate_output)
    layer_output = self.output_dropout(layer_output)
xinliupitt's avatar
xinliupitt committed
519
520
521
522
    if self._norm_first:
      layer_output = source_attention_output + layer_output
    else:
      layer_output = self.output_layer_norm(layer_output + attention_output)
523
    return layer_output, cache