transformer.py 26.4 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Transformer."""
import math
import torch
19
import torch.nn.functional as F
20

Mohammad's avatar
Mohammad committed
21
from megatron import get_args
22
from megatron import mpu
23
from .module import MegatronModule
24
from megatron.model.enums import AttnMaskType, LayerType, AttnType
25
from megatron.model import import_layernorm
26
27
from megatron.model.fused_softmax import FusedScaleMaskSoftmax
from megatron.model.fused_bias_gelu import bias_gelu_impl
28
from megatron.model.utils import attention_mask_func, openai_gelu, erf_gelu
29

30
31
32
33
34
# flags required to enable jit fusion kernels
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
35
36
37
38
39
40
41
42
43
44
45

""" We use the following notation throughout this file:
     h: hidden size
     n: number of attention heads
     p: number of model parallel partitions
     np: n/p
     hp: h/p
     hn: h/n
     b: batch size
     s: sequence length
     l: number of layers
46
    Transformer takes input of size [s, b, h] and returns a
47
48
49
50
51
52
53
54
55
56
57
58
59
    tensor of the same size. We use the following arguments:
        hyperparameters: transformer hyperparameters
"""

class ParallelMLP(MegatronModule):
    """MLP.

    MLP will take the input with h hidden state, project it to 4*h
    hidden dimension, perform nonlinear transformation, and project the
    state back into h hidden dimension. At the end, dropout is also
    applied.
    """

60
    def __init__(self, init_method, output_layer_init_method):
61
        super(ParallelMLP, self).__init__()
Mohammad's avatar
Mohammad committed
62
        args = get_args()
63
64
65

        # Project to 4h.
        self.dense_h_to_4h = mpu.ColumnParallelLinear(
Mohammad's avatar
Mohammad committed
66
            args.hidden_size,
67
            args.ffn_hidden_size,
68
            gather_output=False,
69
70
            init_method=init_method,
            skip_bias_add=True)
71

72
73
74
75
76
77
        self.bias_gelu_fusion = args.bias_gelu_fusion
        self.activation_func = F.gelu
        if args.openai_gelu:
            self.activation_func = openai_gelu
        elif args.onnx_safe:
            self.activation_func = erf_gelu
78
79
80

        # Project back to h.
        self.dense_4h_to_h = mpu.RowParallelLinear(
81
            args.ffn_hidden_size,
Mohammad's avatar
Mohammad committed
82
            args.hidden_size,
83
            input_is_parallel=True,
84
85
            init_method=output_layer_init_method,
            skip_bias_add=True)
86

87
88
89

    def forward(self, hidden_states):

90
91
        # [s, b, 4hp]
        intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
92

93
94
95
96
97
98
99
100
101
102
        if self.bias_gelu_fusion:
             intermediate_parallel = \
                     bias_gelu_impl(intermediate_parallel, bias_parallel)
        else:
            intermediate_parallel = \
                self.activation_func(intermediate_parallel + bias_parallel)

        # [s, b, h]
        output, output_bias = self.dense_4h_to_h(intermediate_parallel)
        return output, output_bias
103
104


105
class ParallelAttention(MegatronModule):
106
107
108
109
110
    """Parallel self-attention layer abstract class.

    Self-attention layer takes input with size [b, s, h]
    and returns output of the same size.
    """
Neel Kant's avatar
Neel Kant committed
111

112
    def __init__(self, init_method,
113
114
115
116
                 output_layer_init_method, layer_number,
                 attention_type=AttnType.self_attn,
                 attn_mask_type=AttnMaskType.padding):
        super(ParallelAttention, self).__init__()
Mohammad's avatar
Mohammad committed
117
        args = get_args()
Mohammad's avatar
Mohammad committed
118
        self.fp16 = args.fp16
119

Mohammad's avatar
Mohammad committed
120
121
        self.apply_query_key_layer_scaling = args.apply_query_key_layer_scaling
        self.attention_softmax_in_fp32 = args.attention_softmax_in_fp32
122
123
124
        if self.apply_query_key_layer_scaling:
            self.attention_softmax_in_fp32 = True
        self.layer_number = max(1, layer_number)
125
126
127
128
        self.attention_type = attention_type
        self.attn_mask_type = attn_mask_type

        projection_size = args.kv_channels * args.num_attention_heads
129
130

        # Per attention head and per partition values.
131
        world_size = mpu.get_tensor_model_parallel_world_size()
132
        self.hidden_size_per_partition = mpu.divide(projection_size,
Mohammad's avatar
Mohammad committed
133
                                                    world_size)
134
        self.hidden_size_per_attention_head = mpu.divide(
135
            projection_size, args.num_attention_heads)
136
        self.num_attention_heads_per_partition = mpu.divide(
Mohammad's avatar
Mohammad committed
137
            args.num_attention_heads, world_size)
138
139

        # Strided linear layer.
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
        if attention_type == AttnType.self_attn:
            self.query_key_value = mpu.ColumnParallelLinear(
                args.hidden_size,
                3 * projection_size,
                gather_output=False,
                init_method=init_method)
        else:
            assert attention_type == AttnType.cross_attn
            self.query = mpu.ColumnParallelLinear(
                args.hidden_size,
                projection_size,
                gather_output=False,
                init_method=init_method)

            self.key_value = mpu.ColumnParallelLinear(
                args.hidden_size,
                2 * projection_size,
                gather_output=False,
                init_method=init_method)
159

160
161
162
163
164
165
166
167
        coeff = None
        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
        if self.apply_query_key_layer_scaling:
            coeff = self.layer_number
            self.norm_factor *= coeff

        self.scale_mask_softmax = FusedScaleMaskSoftmax(
            self.fp16,
168
169
            self.attn_mask_type,
            args.masked_softmax_fusion,
170
            attention_mask_func,
171
172
173
            self.attention_softmax_in_fp32,
            coeff)

174
175
176
        # Dropout. Note that for a single iteration, this layer will generate
        # different outputs on different number of parallel partitions but
        # on average it should not be partition dependent.
Mohammad's avatar
Mohammad committed
177
        self.attention_dropout = torch.nn.Dropout(args.attention_dropout)
178
179
180

        # Output.
        self.dense = mpu.RowParallelLinear(
181
            projection_size,
Mohammad's avatar
Mohammad committed
182
            args.hidden_size,
183
            input_is_parallel=True,
184
185
            init_method=output_layer_init_method,
            skip_bias_add=True)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
186

187
    def forward(self, hidden_states, attention_mask, layer_past=None,
188
                get_key_value=False, encoder_output=None):
189
        # hidden_states: [sq, b, h]
190

191
192
193
        # =====================
        # Query, Key, and Value
        # =====================
194

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
        if self.attention_type == AttnType.self_attn:
            # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
            mixed_x_layer, _ = self.query_key_value(hidden_states)

            # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
            new_tensor_shape = mixed_x_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 3 * self.hidden_size_per_attention_head)
            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)

            # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
            (query_layer,
             key_layer,
             value_layer) = mpu.split_tensor_along_last_dim(mixed_x_layer, 3)
        else:
            # Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
            mixed_kv_layer, _ = self.key_value(encoder_output)

            # [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
            new_tensor_shape = mixed_kv_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 2 * self.hidden_size_per_attention_head)
            mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)

            # [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
            (key_layer,
             value_layer) = mpu.split_tensor_along_last_dim(mixed_kv_layer, 2)

            # Attention head [sq, b, h] --> [sq, b, hp]
            query_layer, _ = self.query(hidden_states)
            # [sq, b, hp] --> [sq, b, np, hn]
            new_tensor_shape = query_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 self.hidden_size_per_attention_head)
            query_layer = query_layer.view(*new_tensor_shape)
230

231
232
233
        # ==================================
        # Adjust key and value for inference
        # ==================================
234
235
236
237

        if layer_past is not None:
            past_key, past_value = layer_past
            key_layer = torch.cat((past_key.type_as(key_layer),
238
                                   key_layer), dim=0)
239
            value_layer = torch.cat((past_value.type_as(value_layer),
240
                                     value_layer), dim=0)
241
242
243
        if get_key_value:
            present = (key_layer, value_layer)

244
245
246
        # ===================================
        # Raw attention scores. [b, np, s, s]
        # ===================================
247

248
        # [b, np, sq, sk]
249
250
251
        output_size = (query_layer.size(1),
                       query_layer.size(2),
                       query_layer.size(0),
252
                       key_layer.size(0))
253

254
        # [sq, b, np, hn] -> [sq, b * np, hn]
255
256
        query_layer = query_layer.view(output_size[2],
                                       output_size[0] * output_size[1], -1)
257
        # [sk, b, np, hn] -> [sk, b * np, hn]
258
259
260
        key_layer = key_layer.view(output_size[3],
                                   output_size[0] * output_size[1], -1)

261
        # preallocting result tensor: [b * np, sq, sk]
262
        matmul_result = torch.empty(
263
264
            output_size[0]*output_size[1],
            output_size[2],
265
            output_size[3],
266
            dtype=query_layer.dtype,
267
268
            device=torch.cuda.current_device())

269
        # Raw attention scores. [b * np, sq, sk]
270
271
        matmul_result = torch.baddbmm(
            matmul_result,
272
            query_layer.transpose(0, 1),   # [b * np, sq, hn]
273
            key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]
274
275
            beta=0.0, alpha=(1.0/self.norm_factor))

276
        # change view to [b, np, sq, sk]
277
278
279
        attention_scores = matmul_result.view(*output_size)

        # ==================================================
280
        # Update attention mask for inference. [b, np, sq, sk]
281
        # ==================================================
282

283
284
285
286
287
        if get_key_value:
            with torch.no_grad():
                if layer_past is not None:
                    attention_mask = attention_mask[
                        ...,
Neel Kant's avatar
Neel Kant committed
288
                        attention_scores.size(3) - 1,
289
290
291
292
293
294
295
                        :attention_scores.size(3)].unsqueeze(2)
                else:
                    attention_mask = attention_mask[
                        ...,
                        :attention_scores.size(3),
                        :attention_scores.size(3)]

296
297
298
        # ===========================
        # Attention probs and dropout
        # ===========================
299

300
        # attention scores and attention mask [b, np, sq, sk]
301
302
        attention_probs = self.scale_mask_softmax(attention_scores,
                                                  attention_mask)
303

304
305
306
307
308
309
        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        with mpu.get_cuda_rng_tracker().fork():
            attention_probs = self.attention_dropout(attention_probs)

        # =========================
310
        # Context layer. [sq, b, hp]
311
312
        # =========================

313
314
        # value_layer -> context layer.
        # [sk, b, np, hn] --> [b, np, sq, hn]
315

316
        # context layer shape: [b, np, sq, hn]
317
318
319
320
        output_size = (value_layer.size(1),
                       value_layer.size(2),
                       query_layer.size(0),
                       value_layer.size(3))
321

322
        # change view [sk, b * np, hn]
323
        value_layer = value_layer.view(value_layer.size(0),
324
                                       output_size[0] * output_size[1], -1)
325

326
        # change view [b * np, sq, sk]
327
328
        attention_probs = attention_probs.view(output_size[0] * output_size[1],
                                               output_size[2], -1)
329

330
        # matmul: [b * np, sq, hn]
331
        context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
332

333
        # change view [b, np, sq, hn]
334
335
        context_layer = context_layer.view(*output_size)

336
        # [b, np, sq, hn] --> [sq, b, np, hn]
337
338
        context_layer = context_layer.permute(2, 0, 1, 3).contiguous()

339
        # [sq, b, np, hn] --> [sq, b, hp]
340
341
342
343
344
        new_context_layer_shape = context_layer.size()[:-2] + \
            (self.hidden_size_per_partition,)
        context_layer = context_layer.view(*new_context_layer_shape)

        # =================
345
        # Output. [sq, b, h]
346
347
348
        # =================

        output, bias = self.dense(context_layer)
349
350
351
352

        if get_key_value:
            output = [output, present]

353
354
355
        return output, bias


356
def bias_dropout_add(x, bias, residual, prob, training):
357
358
359
360
361
362
363
364
365
366
367
368
369
    # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
    out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
    out = residual + out
    return out


def get_bias_dropout_add(training):
    def _bias_dropout_add(x, bias, residual, prob):
        return bias_dropout_add(x, bias, residual, prob, training)
    return _bias_dropout_add


@torch.jit.script
370
def bias_dropout_add_fused_train(x, bias, residual, prob):
371
372
373
374
375
    # type: (Tensor, Tensor, Tensor, float) -> Tensor
    return bias_dropout_add(x, bias, residual, prob, True)


@torch.jit.script
376
def bias_dropout_add_fused_inference(x, bias, residual, prob):
377
378
    # type: (Tensor, Tensor, Tensor, float) -> Tensor
    return bias_dropout_add(x, bias, residual, prob, False)
379
380
381
382
383


class ParallelTransformerLayer(MegatronModule):
    """A single transformer layer.

384
    Transformer layer takes input with size [b, s, h] and returns an
385
386
    output of the same size.
    """
Neel Kant's avatar
Neel Kant committed
387

388
389
    def __init__(self, init_method, output_layer_init_method,
                 layer_number, layer_type=LayerType.encoder,
390
                 self_attn_mask_type=AttnMaskType.padding):
Mohammad's avatar
Mohammad committed
391
        args = get_args()
392
393

        super(ParallelTransformerLayer, self).__init__()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
394
        self.layer_number = layer_number
395
        self.layer_type = layer_type
396
397

        self.apply_residual_connection_post_layernorm \
Mohammad's avatar
Mohammad committed
398
            = args.apply_residual_connection_post_layernorm
399

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
400
401
402
        self.bf16 = args.bf16
        self.fp32_residual_connection = args.fp32_residual_connection

403
        # Layernorm on the input data.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
404
        LayerNorm = import_layernorm(self.fp32_residual_connection, self.bf16)
405
        self.input_layernorm = LayerNorm(
Mohammad's avatar
Mohammad committed
406
407
            args.hidden_size,
            eps=args.layernorm_epsilon)
408
409

        # Self attention.
410
411
412
413
414
415
        self.self_attention = ParallelAttention(
            init_method,
            output_layer_init_method,
            layer_number,
            attention_type=AttnType.self_attn,
            attn_mask_type=self_attn_mask_type)
416
417
        self.hidden_dropout = args.hidden_dropout
        self.bias_dropout_fusion = args.bias_dropout_fusion
418

419
        # Layernorm on the attention output
420
        self.post_attention_layernorm = LayerNorm(
Mohammad's avatar
Mohammad committed
421
422
            args.hidden_size,
            eps=args.layernorm_epsilon)
423

424
425
426
427
428
429
430
431
432
433
434
        if self.layer_type == LayerType.decoder:
            self.inter_attention = ParallelAttention(
                init_method,
                output_layer_init_method,
                layer_number,
                attention_type=AttnType.cross_attn)
            # Layernorm on the attention output.
            self.post_inter_attention_layernorm = LayerNorm(
                args.hidden_size,
                eps=args.layernorm_epsilon)

435
        # MLP
436
        self.mlp = ParallelMLP(init_method,
Mohammad's avatar
Mohammad committed
437
                               output_layer_init_method)
438

439
440
441
    def forward(self, hidden_states, attention_mask,
                encoder_output=None, enc_dec_attn_mask=None,
                layer_past=None, get_key_value=False):
442
443
        # hidden_states: [b, s, h]

444
        # Layer norm at the beginning of the transformer layer.
445
        layernorm_output = self.input_layernorm(hidden_states)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
446
447
        if self.bf16 and self.fp32_residual_connection:
            layernorm_output = layernorm_output.bfloat16()
448
        # Self attention.
449
        attention_output, attention_bias = \
450
451
452
453
            self.self_attention(layernorm_output,
                                attention_mask,
                                layer_past=layer_past,
                                get_key_value=get_key_value)
454

455
456
        if get_key_value:
            attention_output, presents = attention_output
457

458
459
        # Residual connection.
        if self.apply_residual_connection_post_layernorm:
460
461
462
463
            residual = layernorm_output
        else:
            residual = hidden_states

464
465
        # jit scripting for a nn.module (with dropout) is not
        # trigerring the fusion kernel. For now, we use two
466
467
468
469
470
471
472
        # different nn.functional routines to account for varying
        # dropout semantics during training and inference phases.
        if self.bias_dropout_fusion:
            if self.training:
                bias_dropout_add_func = bias_dropout_add_fused_train
            else:
                bias_dropout_add_func = bias_dropout_add_fused_inference
473
        else:
474
475
            bias_dropout_add_func = get_bias_dropout_add(self.training)

476
        # re-enable torch grad to enable fused optimization.
477
478
479
480
481
482
483
        with torch.enable_grad():
            layernorm_input = bias_dropout_add_func(
                attention_output,
                attention_bias.expand_as(residual),
                residual,
                self.hidden_dropout)

484
485
        # Layer norm post the self attention.
        layernorm_output = self.post_attention_layernorm(layernorm_input)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
486
487
        if self.bf16 and self.fp32_residual_connection:
            layernorm_output = layernorm_output.bfloat16()
488

489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
        if self.layer_type == LayerType.decoder:
            attention_output, attention_bias = \
                self.inter_attention(layernorm_output,
                                     enc_dec_attn_mask,
                                     encoder_output=encoder_output)
            # residual connection
            if self.apply_residual_connection_post_layernorm:
                residual = layernorm_output
            else:
                residual = layernorm_input

            # re-enable torch grad to enable fused optimization.
            with torch.enable_grad():
                layernorm_input = bias_dropout_add_func(
                    attention_output,
                    attention_bias.expand_as(residual),
                    residual,
                    self.hidden_dropout)

            # Layer norm post the decoder attention
            layernorm_output = self.post_inter_attention_layernorm(layernorm_input)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
510
511
            if self.bf16 and self.fp32_residual_connection:
                layernorm_output = layernorm_output.bfloat16()
512

513
        # MLP.
514
        mlp_output, mlp_bias = self.mlp(layernorm_output)
515

516
517
        # Second residual connection.
        if self.apply_residual_connection_post_layernorm:
518
            residual = layernorm_output
519
        else:
520
521
            residual = layernorm_input

522
        # re-enable torch grad to enable fused optimization.
523
524
525
526
527
528
        with torch.enable_grad():
            output = bias_dropout_add_func(
                mlp_output,
                mlp_bias.expand_as(residual),
                residual,
                self.hidden_dropout)
529
530
531
532
533
534
535
536
537
538

        if get_key_value:
            output = [output, presents]

        return output


class ParallelTransformer(MegatronModule):
    """Transformer class."""

539
    def __init__(self, init_method, output_layer_init_method,
540
541
                 layer_type=LayerType.encoder,
                 self_attn_mask_type=AttnMaskType.padding):
542
        super(ParallelTransformer, self).__init__()
Mohammad's avatar
Mohammad committed
543
        args = get_args()
544

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
545
        self.bf16 = args.bf16
546
547
        self.fp32_residual_connection = args.fp32_residual_connection

548
        # Store activation checkpoiting flag.
Mohammad's avatar
Mohammad committed
549
550
        self.checkpoint_activations = args.checkpoint_activations
        self.checkpoint_num_layers = args.checkpoint_num_layers
551

552
        # Number of layers.
553
        assert args.num_layers % mpu.get_pipeline_model_parallel_world_size() == 0, \
554
            'num_layers must be divisible by pipeline_model_parallel_size'
555
        self.num_layers = args.num_layers // mpu.get_pipeline_model_parallel_world_size()
Mohammad's avatar
Mohammad committed
556
557
558

        # Transformer layers.
        def build_layer(layer_number):
559
            return ParallelTransformerLayer(
560
561
562
                init_method,
                output_layer_init_method,
                layer_number,
563
564
                layer_type=layer_type,
                self_attn_mask_type=self_attn_mask_type)
565
566
        if args.virtual_pipeline_model_parallel_size is not None:
            assert args.num_layers % args.virtual_pipeline_model_parallel_size == 0, \
567
568
569
570
                'num_layers_per_stage must be divisible by ' \
                'virtual_pipeline_model_parallel_size'
            # Number of layers in each model chunk is the number of layers in the stage,
            # divided by the number of model chunks in a stage.
571
            self.num_layers = self.num_layers // args.virtual_pipeline_model_parallel_size
572
573
574
575
576
577
578
579
            # With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
            # layers to stages like (each list is a model chunk):
            # Stage 0: [0]  [2]  [4]  [6]
            # Stage 1: [1]  [3]  [5]  [7]
            # With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
            # layers to stages like (each list is a model chunk):
            # Stage 0: [0, 1]  [4, 5]
            # Stage 1: [2, 3]  [6, 7]
580
581
582
583
            offset = mpu.get_virtual_pipeline_model_parallel_rank() * (
                    args.num_layers // args.virtual_pipeline_model_parallel_size) + \
                (mpu.get_pipeline_model_parallel_rank() * self.num_layers)
        else:
584
            # Each stage gets a contiguous set of layers.
585
            offset = mpu.get_pipeline_model_parallel_rank() * self.num_layers
586
        self.layers = torch.nn.ModuleList(
587
            [build_layer(i + 1 + offset) for i in range(self.num_layers)])
588

589
        if mpu.is_pipeline_last_stage():
590
            # Final layer norm before output.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
591
592
            LayerNorm = import_layernorm(self.fp32_residual_connection,
                                         self.bf16)
593
594
595
            self.final_layernorm = LayerNorm(
                args.hidden_size,
                eps=args.layernorm_epsilon)
596

Mohammad's avatar
Mohammad committed
597
    def _get_layer(self, layer_number):
598
        return self.layers[layer_number]
Mohammad's avatar
Mohammad committed
599

600
601
    def _checkpointed_forward(self, hidden_states, attention_mask,
                              encoder_output, enc_dec_attn_mask):
602
603
604
605
        """Forward method with activation checkpointing."""
        def custom(start, end):
            def custom_forward(*inputs):
                x_ = inputs[0]
606
607
608
                attention_mask = inputs[1]
                encoder_output = inputs[2]
                enc_dec_attn_mask = inputs[3]
Mohammad's avatar
Mohammad committed
609
610
                for index in range(start, end):
                    layer = self._get_layer(index)
611
                    x_ = layer(x_, attention_mask, encoder_output, enc_dec_attn_mask)
612
613
614
                return x_
            return custom_forward

615
616
        # Make sure memory is freed.
        mpu.reset_checkpointed_activations_memory_buffer()
617
        l = 0
Mohammad's avatar
Mohammad committed
618
        while l < self.num_layers:
619
            hidden_states = mpu.checkpoint(
Neel Kant's avatar
Neel Kant committed
620
                custom(l, l + self.checkpoint_num_layers),
621
                hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
622
623
624
625
626
            l += self.checkpoint_num_layers

        return hidden_states

    def forward(self, hidden_states, attention_mask, layer_past=None,
627
                get_key_value=False, encoder_output=None, enc_dec_attn_mask=None):
628

629
        # Checks.
630
631
632
633
634
635
636
637
638
        if layer_past is not None:
            assert get_key_value, \
                'for not None values in layer_past, ' \
                'expected get_key_value to be set'
        if get_key_value:
            assert not self.checkpoint_activations, \
                'get_key_value does not work with ' \
                'activation checkpointing'

639
640
        if mpu.is_pipeline_first_stage():
            # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
mshoeybi's avatar
mshoeybi committed
641
            # If the input flag for fp32 residual connection is set, convert for float.
642
643
            if self.fp32_residual_connection:
                hidden_states = hidden_states.transpose(0, 1).contiguous().float()
mshoeybi's avatar
mshoeybi committed
644
            # Otherwise, leave it as is.
645
646
            else:
                hidden_states = hidden_states.transpose(0, 1).contiguous()
647

Vijay Korthikanti's avatar
Vijay Korthikanti committed
648
649
650
        if encoder_output is not None:
             encoder_output = encoder_output.transpose(0, 1).contiguous()
          
651
652
        if self.checkpoint_activations:
            hidden_states = self._checkpointed_forward(hidden_states,
653
654
655
                                                       attention_mask,
                                                       encoder_output,
                                                       enc_dec_attn_mask)
656
657
658
        else:
            if get_key_value:
                presents = []
Mohammad's avatar
Mohammad committed
659
660
            for index in range(self.num_layers):
                layer = self._get_layer(index)
661
662
                past = None
                if layer_past is not None:
Mohammad's avatar
Mohammad committed
663
                    past = layer_past[index]
664
665
                hidden_states = layer(hidden_states,
                                      attention_mask,
666
667
                                      encoder_output=encoder_output,
                                      enc_dec_attn_mask=enc_dec_attn_mask,
668
669
670
671
672
                                      layer_past=past,
                                      get_key_value=get_key_value)
                if get_key_value:
                    hidden_states, present = hidden_states
                    presents.append(present)
673

674
        # Final layer norm.
675
        if mpu.is_pipeline_last_stage():
676
677
            # Reverting data format change [s b h] --> [b s h].
            hidden_states = hidden_states.transpose(0, 1).contiguous()
678
            output = self.final_layernorm(hidden_states)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
679
680
            if self.bf16 and self.fp32_residual_connection:
                output = output.bfloat16()
681
682
        else:
            output = hidden_states
683
684
685
686
        if get_key_value:
            output = [output, presents]

        return output