transformer.py 29 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Transformer."""
import math
import torch
19
import torch.nn.functional as F
20

Mohammad's avatar
Mohammad committed
21
from megatron import get_args
22
from megatron import mpu
23
from .module import MegatronModule
24
from megatron.model.enums import AttnMaskType, ModelType, LayerType, AttnType
25
from megatron.model import LayerNorm
26
27
from megatron.model.fused_softmax import FusedScaleMaskSoftmax
from megatron.model.fused_bias_gelu import bias_gelu_impl
28
from megatron.model.utils import attention_mask_func, openai_gelu, erf_gelu
29
30
31
32
33
34
35
36
37
38
39
40


""" We use the following notation throughout this file:
     h: hidden size
     n: number of attention heads
     p: number of model parallel partitions
     np: n/p
     hp: h/p
     hn: h/n
     b: batch size
     s: sequence length
     l: number of layers
41
    Transformer takes input of size [s, b, h] and returns a
42
43
44
45
46
47
48
49
50
    tensor of the same size. We use the following arguments:
        hyperparameters: transformer hyperparameters
"""

class ParallelMLP(MegatronModule):
    """MLP.

    MLP will take the input with h hidden state, project it to 4*h
    hidden dimension, perform nonlinear transformation, and project the
hwijeen's avatar
hwijeen committed
51
    state back into h hidden dimension.
52
53
    """

54
    def __init__(self, init_method, output_layer_init_method):
55
        super(ParallelMLP, self).__init__()
Mohammad's avatar
Mohammad committed
56
        args = get_args()
57
58
59

        # Project to 4h.
        self.dense_h_to_4h = mpu.ColumnParallelLinear(
Mohammad's avatar
Mohammad committed
60
            args.hidden_size,
61
            args.ffn_hidden_size,
62
            gather_output=False,
63
64
            init_method=init_method,
            skip_bias_add=True)
65

66
67
68
69
70
71
        self.bias_gelu_fusion = args.bias_gelu_fusion
        self.activation_func = F.gelu
        if args.openai_gelu:
            self.activation_func = openai_gelu
        elif args.onnx_safe:
            self.activation_func = erf_gelu
72
73
74

        # Project back to h.
        self.dense_4h_to_h = mpu.RowParallelLinear(
75
            args.ffn_hidden_size,
Mohammad's avatar
Mohammad committed
76
            args.hidden_size,
77
            input_is_parallel=True,
78
79
            init_method=output_layer_init_method,
            skip_bias_add=True)
80

81
82
    def forward(self, hidden_states):

83
84
        # [s, b, 4hp]
        intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
85

86
87
88
89
90
91
92
93
94
95
        if self.bias_gelu_fusion:
             intermediate_parallel = \
                     bias_gelu_impl(intermediate_parallel, bias_parallel)
        else:
            intermediate_parallel = \
                self.activation_func(intermediate_parallel + bias_parallel)

        # [s, b, h]
        output, output_bias = self.dense_4h_to_h(intermediate_parallel)
        return output, output_bias
96
97


98
class ParallelAttention(MegatronModule):
99
100
101
102
103
    """Parallel self-attention layer abstract class.

    Self-attention layer takes input with size [b, s, h]
    and returns output of the same size.
    """
Neel Kant's avatar
Neel Kant committed
104

105
    def __init__(self, init_method,
106
107
108
109
                 output_layer_init_method, layer_number,
                 attention_type=AttnType.self_attn,
                 attn_mask_type=AttnMaskType.padding):
        super(ParallelAttention, self).__init__()
Mohammad's avatar
Mohammad committed
110
        args = get_args()
Mohammad's avatar
Mohammad committed
111
        self.fp16 = args.fp16
112
        self.bf16 = args.bf16
113

Mohammad's avatar
Mohammad committed
114
115
        self.apply_query_key_layer_scaling = args.apply_query_key_layer_scaling
        self.attention_softmax_in_fp32 = args.attention_softmax_in_fp32
116
117
118
        if self.apply_query_key_layer_scaling:
            self.attention_softmax_in_fp32 = True
        self.layer_number = max(1, layer_number)
119
120
        self.attention_type = attention_type
        self.attn_mask_type = attn_mask_type
121
        self.params_dtype = args.params_dtype
122
123

        projection_size = args.kv_channels * args.num_attention_heads
124
125

        # Per attention head and per partition values.
126
        world_size = mpu.get_tensor_model_parallel_world_size()
127
        self.hidden_size_per_partition = mpu.divide(projection_size,
Mohammad's avatar
Mohammad committed
128
                                                    world_size)
129
        self.hidden_size_per_attention_head = mpu.divide(
130
            projection_size, args.num_attention_heads)
131
        self.num_attention_heads_per_partition = mpu.divide(
Mohammad's avatar
Mohammad committed
132
            args.num_attention_heads, world_size)
133
134

        # Strided linear layer.
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
        if attention_type == AttnType.self_attn:
            self.query_key_value = mpu.ColumnParallelLinear(
                args.hidden_size,
                3 * projection_size,
                gather_output=False,
                init_method=init_method)
        else:
            assert attention_type == AttnType.cross_attn
            self.query = mpu.ColumnParallelLinear(
                args.hidden_size,
                projection_size,
                gather_output=False,
                init_method=init_method)

            self.key_value = mpu.ColumnParallelLinear(
                args.hidden_size,
                2 * projection_size,
                gather_output=False,
                init_method=init_method)
154

155
156
157
158
159
160
161
        coeff = None
        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
        if self.apply_query_key_layer_scaling:
            coeff = self.layer_number
            self.norm_factor *= coeff

        self.scale_mask_softmax = FusedScaleMaskSoftmax(
162
            self.fp16, self.bf16,
163
164
            self.attn_mask_type,
            args.masked_softmax_fusion,
165
            attention_mask_func,
166
167
168
            self.attention_softmax_in_fp32,
            coeff)

169
170
171
        # Dropout. Note that for a single iteration, this layer will generate
        # different outputs on different number of parallel partitions but
        # on average it should not be partition dependent.
Mohammad's avatar
Mohammad committed
172
        self.attention_dropout = torch.nn.Dropout(args.attention_dropout)
173
174
175

        # Output.
        self.dense = mpu.RowParallelLinear(
176
            projection_size,
Mohammad's avatar
Mohammad committed
177
            args.hidden_size,
178
            input_is_parallel=True,
179
180
            init_method=output_layer_init_method,
            skip_bias_add=True)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
181

182
        # Inference key-value memory
mshoeybi's avatar
mshoeybi committed
183
184
        self.inference_key_memory = None
        self.inference_value_memory = None
185
186
187
188
189
190
191
192
193
194
195
196
197


    def _allocate_memory(self, inference_max_sequence_len, batch_size):
        return torch.empty(
            inference_max_sequence_len,
            batch_size,
            self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head,
            dtype=self.params_dtype,
            device=torch.cuda.current_device())
        

    def forward(self, hidden_states, attention_mask,
mshoeybi's avatar
mshoeybi committed
198
                encoder_output=None, inference_params=None):
199
        # hidden_states: [sq, b, h]
200

201
202
203
204

        # =================================================
        # Pre-allocate memory for key-values for inference.
        # =================================================
mshoeybi's avatar
mshoeybi committed
205
206
207
        if inference_params:
            if inference_params.allocate_key_value_memory:
                inf_max_seq_len = inference_params.max_sequence_len
mshoeybi's avatar
mshoeybi committed
208
209
210
211
212
                inf_max_batch_size = inference_params.max_batch_size
                self.inference_key_memory = self._allocate_memory(
                    inf_max_seq_len, inf_max_batch_size)
                self.inference_value_memory = self._allocate_memory(
                    inf_max_seq_len, inf_max_batch_size)
mshoeybi's avatar
mshoeybi committed
213
        # This is added for safety. In case inference_params
214
215
        # is not provided, make sure there is no potential memory left
        # from previous inference.
mshoeybi's avatar
mshoeybi committed
216
        else:
mshoeybi's avatar
mshoeybi committed
217
            self.inference_key_memory = None
mshoeybi's avatar
mshoeybi committed
218
            self.inference_value_memory = None
mshoeybi's avatar
mshoeybi committed
219

220

221
222
223
        # =====================
        # Query, Key, and Value
        # =====================
224

225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        if self.attention_type == AttnType.self_attn:
            # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
            mixed_x_layer, _ = self.query_key_value(hidden_states)

            # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
            new_tensor_shape = mixed_x_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 3 * self.hidden_size_per_attention_head)
            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)

            # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
            (query_layer,
             key_layer,
             value_layer) = mpu.split_tensor_along_last_dim(mixed_x_layer, 3)
        else:
            # Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
            mixed_kv_layer, _ = self.key_value(encoder_output)

            # [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
            new_tensor_shape = mixed_kv_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 2 * self.hidden_size_per_attention_head)
            mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)

            # [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
            (key_layer,
             value_layer) = mpu.split_tensor_along_last_dim(mixed_kv_layer, 2)

            # Attention head [sq, b, h] --> [sq, b, hp]
            query_layer, _ = self.query(hidden_states)
            # [sq, b, hp] --> [sq, b, np, hn]
            new_tensor_shape = query_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 self.hidden_size_per_attention_head)
            query_layer = query_layer.view(*new_tensor_shape)
260
261


mshoeybi's avatar
mshoeybi committed
262
263
264
        # ==================================
        # Adjust key and value for inference
        # ==================================
265

mshoeybi's avatar
mshoeybi committed
266
        if inference_params:
mshoeybi's avatar
mshoeybi committed
267
268
269
270
271
272
            batch_start = inference_params.batch_size_offset
            batch_end = batch_start + key_layer.size(1)
            assert batch_end <= self.inference_key_memory.size(1)
            sequence_start = inference_params.sequence_len_offset
            sequence_end = sequence_start + key_layer.size(0)
            assert sequence_end <= self.inference_key_memory.size(0)
273
            # Copy key and values.
mshoeybi's avatar
mshoeybi committed
274
275
276
277
278
279
280
281
282
283
            self.inference_key_memory[sequence_start:sequence_end,
                                      batch_start:batch_end,
                                      ...] = key_layer
            self.inference_value_memory[sequence_start:sequence_end,
                                        batch_start:batch_end,
                                        ...] = value_layer
            key_layer = self.inference_key_memory[
                :sequence_end, batch_start:batch_end, ...]
            value_layer = self.inference_value_memory[
                :sequence_end, batch_start:batch_end, ...]
284

285

286
287
288
        # ===================================
        # Raw attention scores. [b, np, s, s]
        # ===================================
289

290
        # [b, np, sq, sk]
291
292
293
        output_size = (query_layer.size(1),
                       query_layer.size(2),
                       query_layer.size(0),
294
                       key_layer.size(0))
295

296
        # [sq, b, np, hn] -> [sq, b * np, hn]
297
298
        query_layer = query_layer.view(output_size[2],
                                       output_size[0] * output_size[1], -1)
299
        # [sk, b, np, hn] -> [sk, b * np, hn]
300
301
302
        key_layer = key_layer.view(output_size[3],
                                   output_size[0] * output_size[1], -1)

303
        # preallocting result tensor: [b * np, sq, sk]
304
        matmul_result = torch.empty(
305
306
            output_size[0]*output_size[1],
            output_size[2],
307
            output_size[3],
308
            dtype=query_layer.dtype,
309
310
            device=torch.cuda.current_device())

311
        # Raw attention scores. [b * np, sq, sk]
312
313
        matmul_result = torch.baddbmm(
            matmul_result,
314
            query_layer.transpose(0, 1),   # [b * np, sq, hn]
315
            key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]
316
317
            beta=0.0, alpha=(1.0/self.norm_factor))

318
        # change view to [b, np, sq, sk]
319
320
        attention_scores = matmul_result.view(*output_size)

321

322
323
324
        # ===========================
        # Attention probs and dropout
        # ===========================
325

326
        # attention scores and attention mask [b, np, sq, sk]
327
328
        attention_probs = self.scale_mask_softmax(attention_scores,
                                                  attention_mask)
329

330
331
332
333
334
335
        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        with mpu.get_cuda_rng_tracker().fork():
            attention_probs = self.attention_dropout(attention_probs)

        # =========================
336
        # Context layer. [sq, b, hp]
337
338
        # =========================

339
340
        # value_layer -> context layer.
        # [sk, b, np, hn] --> [b, np, sq, hn]
341

342
        # context layer shape: [b, np, sq, hn]
343
344
345
346
        output_size = (value_layer.size(1),
                       value_layer.size(2),
                       query_layer.size(0),
                       value_layer.size(3))
347

348
        # change view [sk, b * np, hn]
349
        value_layer = value_layer.view(value_layer.size(0),
350
                                       output_size[0] * output_size[1], -1)
351

352
        # change view [b * np, sq, sk]
353
354
        attention_probs = attention_probs.view(output_size[0] * output_size[1],
                                               output_size[2], -1)
355

356
        # matmul: [b * np, sq, hn]
357
        context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
358

359
        # change view [b, np, sq, hn]
360
361
        context_layer = context_layer.view(*output_size)

362
        # [b, np, sq, hn] --> [sq, b, np, hn]
363
364
        context_layer = context_layer.permute(2, 0, 1, 3).contiguous()

365
        # [sq, b, np, hn] --> [sq, b, hp]
366
367
368
369
370
        new_context_layer_shape = context_layer.size()[:-2] + \
            (self.hidden_size_per_partition,)
        context_layer = context_layer.view(*new_context_layer_shape)

        # =================
371
        # Output. [sq, b, h]
372
373
374
        # =================

        output, bias = self.dense(context_layer)
375

376
377
378
        return output, bias


379
def bias_dropout_add(x, bias, residual, prob, training):
380
381
382
383
384
385
386
387
388
389
390
391
392
    # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
    out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
    out = residual + out
    return out


def get_bias_dropout_add(training):
    def _bias_dropout_add(x, bias, residual, prob):
        return bias_dropout_add(x, bias, residual, prob, training)
    return _bias_dropout_add


@torch.jit.script
393
394
395
396
def bias_dropout_add_fused_train(x: torch.Tensor,
                                 bias: torch.Tensor,
                                 residual: torch.Tensor,
                                 prob: float) -> torch.Tensor:
397
398
399
400
    return bias_dropout_add(x, bias, residual, prob, True)


@torch.jit.script
401
402
403
404
def bias_dropout_add_fused_inference(x: torch.Tensor,
                                     bias: torch.Tensor,
                                     residual: torch.Tensor,
                                     prob: float) -> torch.Tensor:
405
    return bias_dropout_add(x, bias, residual, prob, False)
406
407
408
409
410


class ParallelTransformerLayer(MegatronModule):
    """A single transformer layer.

411
    Transformer layer takes input with size [b, s, h] and returns an
412
413
    output of the same size.
    """
Neel Kant's avatar
Neel Kant committed
414

415
416
    def __init__(self, init_method, output_layer_init_method,
                 layer_number, layer_type=LayerType.encoder,
417
                 self_attn_mask_type=AttnMaskType.padding):
Mohammad's avatar
Mohammad committed
418
        args = get_args()
419
420

        super(ParallelTransformerLayer, self).__init__()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
421
        self.layer_number = layer_number
422
        self.layer_type = layer_type
423
424

        self.apply_residual_connection_post_layernorm \
Mohammad's avatar
Mohammad committed
425
            = args.apply_residual_connection_post_layernorm
426

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
427
428
429
        self.bf16 = args.bf16
        self.fp32_residual_connection = args.fp32_residual_connection

430
431
        # Layernorm on the input data.
        self.input_layernorm = LayerNorm(
Mohammad's avatar
Mohammad committed
432
433
            args.hidden_size,
            eps=args.layernorm_epsilon)
434
435

        # Self attention.
436
437
438
439
440
441
        self.self_attention = ParallelAttention(
            init_method,
            output_layer_init_method,
            layer_number,
            attention_type=AttnType.self_attn,
            attn_mask_type=self_attn_mask_type)
442
443
        self.hidden_dropout = args.hidden_dropout
        self.bias_dropout_fusion = args.bias_dropout_fusion
444

445
        # Layernorm on the attention output
446
        self.post_attention_layernorm = LayerNorm(
Mohammad's avatar
Mohammad committed
447
448
            args.hidden_size,
            eps=args.layernorm_epsilon)
449

450
451
452
453
454
455
456
457
458
459
460
        if self.layer_type == LayerType.decoder:
            self.inter_attention = ParallelAttention(
                init_method,
                output_layer_init_method,
                layer_number,
                attention_type=AttnType.cross_attn)
            # Layernorm on the attention output.
            self.post_inter_attention_layernorm = LayerNorm(
                args.hidden_size,
                eps=args.layernorm_epsilon)

461
        # MLP
462
        self.mlp = ParallelMLP(init_method,
Mohammad's avatar
Mohammad committed
463
                               output_layer_init_method)
464

465
    def forward(self, hidden_states, attention_mask,
mshoeybi's avatar
mshoeybi committed
466
467
                encoder_output=None, enc_dec_attn_mask=None,
                inference_params=None):
468
469
        # hidden_states: [b, s, h]

470
        # Layer norm at the beginning of the transformer layer.
471
472
        layernorm_output = self.input_layernorm(hidden_states)
        # Self attention.
473
        attention_output, attention_bias = \
474
475
476
            self.self_attention(
                layernorm_output,
                attention_mask,
mshoeybi's avatar
mshoeybi committed
477
                inference_params=inference_params)
478

479
480
        # Residual connection.
        if self.apply_residual_connection_post_layernorm:
481
482
483
484
            residual = layernorm_output
        else:
            residual = hidden_states

485
486
        # jit scripting for a nn.module (with dropout) is not
        # trigerring the fusion kernel. For now, we use two
487
488
489
490
491
492
493
        # different nn.functional routines to account for varying
        # dropout semantics during training and inference phases.
        if self.bias_dropout_fusion:
            if self.training:
                bias_dropout_add_func = bias_dropout_add_fused_train
            else:
                bias_dropout_add_func = bias_dropout_add_fused_inference
494
        else:
495
496
            bias_dropout_add_func = get_bias_dropout_add(self.training)

497
        # re-enable torch grad to enable fused optimization.
498
499
500
501
502
503
504
        with torch.enable_grad():
            layernorm_input = bias_dropout_add_func(
                attention_output,
                attention_bias.expand_as(residual),
                residual,
                self.hidden_dropout)

505
506
507
        # Layer norm post the self attention.
        layernorm_output = self.post_attention_layernorm(layernorm_input)

508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
        if self.layer_type == LayerType.decoder:
            attention_output, attention_bias = \
                self.inter_attention(layernorm_output,
                                     enc_dec_attn_mask,
                                     encoder_output=encoder_output)
            # residual connection
            if self.apply_residual_connection_post_layernorm:
                residual = layernorm_output
            else:
                residual = layernorm_input

            # re-enable torch grad to enable fused optimization.
            with torch.enable_grad():
                layernorm_input = bias_dropout_add_func(
                    attention_output,
                    attention_bias.expand_as(residual),
                    residual,
                    self.hidden_dropout)

            # Layer norm post the decoder attention
            layernorm_output = self.post_inter_attention_layernorm(layernorm_input)

530
        # MLP.
531
        mlp_output, mlp_bias = self.mlp(layernorm_output)
532

533
534
        # Second residual connection.
        if self.apply_residual_connection_post_layernorm:
535
            residual = layernorm_output
536
        else:
537
538
            residual = layernorm_input

539
        # re-enable torch grad to enable fused optimization.
540
541
542
543
544
545
        with torch.enable_grad():
            output = bias_dropout_add_func(
                mlp_output,
                mlp_bias.expand_as(residual),
                residual,
                self.hidden_dropout)
546
547
548
549
550
551
552

        return output


class ParallelTransformer(MegatronModule):
    """Transformer class."""

553
    def __init__(self, init_method, output_layer_init_method,
554
                 layer_type=LayerType.encoder,
555
556
                 self_attn_mask_type=AttnMaskType.padding,
                 pre_process=True, post_process=True):
557
        super(ParallelTransformer, self).__init__()
Mohammad's avatar
Mohammad committed
558
        args = get_args()
559

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
560
        self.bf16 = args.bf16
561
        self.fp32_residual_connection = args.fp32_residual_connection
562
563
564
        self.pre_process = pre_process
        self.post_process = post_process
        self.input_tensor = None
565

566
        # Store activation checkpoiting flag.
567
568
        self.activations_checkpoint_method = args.activations_checkpoint_method
        self.activations_checkpoint_num_layers = args.activations_checkpoint_num_layers
mshoeybi's avatar
mshoeybi committed
569
        self.distribute_checkpointed_activations = args.distribute_checkpointed_activations
570

571
        # Number of layers.
572
573
        self.num_layers = mpu.get_num_layers(
            args, args.model_type == ModelType.encoder_and_decoder)
Mohammad's avatar
Mohammad committed
574
575
576

        # Transformer layers.
        def build_layer(layer_number):
577
            return ParallelTransformerLayer(
578
579
580
                init_method,
                output_layer_init_method,
                layer_number,
581
582
                layer_type=layer_type,
                self_attn_mask_type=self_attn_mask_type)
583
584
        if args.virtual_pipeline_model_parallel_size is not None:
            assert args.num_layers % args.virtual_pipeline_model_parallel_size == 0, \
585
586
587
588
                'num_layers_per_stage must be divisible by ' \
                'virtual_pipeline_model_parallel_size'
            # Number of layers in each model chunk is the number of layers in the stage,
            # divided by the number of model chunks in a stage.
589
            self.num_layers = self.num_layers // args.virtual_pipeline_model_parallel_size
590
591
592
593
594
595
596
597
            # With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
            # layers to stages like (each list is a model chunk):
            # Stage 0: [0]  [2]  [4]  [6]
            # Stage 1: [1]  [3]  [5]  [7]
            # With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
            # layers to stages like (each list is a model chunk):
            # Stage 0: [0, 1]  [4, 5]
            # Stage 1: [2, 3]  [6, 7]
598
            offset = mpu.get_virtual_pipeline_model_parallel_rank() * (
599
                args.num_layers // args.virtual_pipeline_model_parallel_size) + \
600
601
                (mpu.get_pipeline_model_parallel_rank() * self.num_layers)
        else:
602
            # Each stage gets a contiguous set of layers.
603
            offset = mpu.get_pipeline_model_parallel_rank() * self.num_layers
604

605
        self.layers = torch.nn.ModuleList(
606
            [build_layer(i + 1 + offset) for i in range(self.num_layers)])
607

608
        if self.post_process:
609
610
611
612
            # Final layer norm before output.
            self.final_layernorm = LayerNorm(
                args.hidden_size,
                eps=args.layernorm_epsilon)
613

Mohammad's avatar
Mohammad committed
614
    def _get_layer(self, layer_number):
615
        return self.layers[layer_number]
Mohammad's avatar
Mohammad committed
616

617
618
    def _checkpointed_forward(self, hidden_states, attention_mask,
                              encoder_output, enc_dec_attn_mask):
619
620
621
622
        """Forward method with activation checkpointing."""
        def custom(start, end):
            def custom_forward(*inputs):
                x_ = inputs[0]
623
624
625
                attention_mask = inputs[1]
                encoder_output = inputs[2]
                enc_dec_attn_mask = inputs[3]
Mohammad's avatar
Mohammad committed
626
627
                for index in range(start, end):
                    layer = self._get_layer(index)
628
                    x_ = layer(x_, attention_mask, encoder_output, enc_dec_attn_mask)
629
630
631
                return x_
            return custom_forward

mshoeybi's avatar
mshoeybi committed
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
        def distribute_checkpointed_activations_helper(layer_number):
            """Distribute checkpointed activations across the tensor model
               Parallel ranks if the `distribute-checkpointed-activations
               is on and either of the following conditions is met:
                 - it is not the first layer in the in the pipeline stage.
                   The first layer is used in the pipeline parallelism 
                   and changing its shape throws error in the backward pass.
                 - we are at the first pipline stage so the input tensor is
                   not used in pipeline parallelism. Note that no pipeline
                   parallelism is a special case of this.
            """
            not_first_layer_in_pipeline_stage = (layer_number > 0)
            is_first_pipeline_stage = (
                mpu.get_pipeline_model_parallel_rank() == 0)
            return self.distribute_checkpointed_activations and \
                (not_first_layer_in_pipeline_stage or is_first_pipeline_stage)

649
650
651
652
653
654
655
656
        if self.activations_checkpoint_method == 'uniform':
            # Uniformly divide the total number of Transformer layers and checkpoint
            # the input activation of each divided chunk.
            # A method to further reduce memory usage reducing checkpoints.
            l = 0
            while l < self.num_layers:
                hidden_states = mpu.checkpoint(
                    custom(l, l + self.activations_checkpoint_num_layers),
mshoeybi's avatar
mshoeybi committed
657
                    distribute_checkpointed_activations_helper(l),
658
659
660
661
662
663
664
665
666
667
                    hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
                l += self.activations_checkpoint_num_layers
        elif self.activations_checkpoint_method == 'block':
            # Checkpoint the input activation of only a set number of individual
            # Transformer layers and skip the rest.
            # A method fully use the device memory removing redundant re-computation.
            for l in range(self.num_layers):
                if l < self.activations_checkpoint_num_layers:
                    hidden_states = mpu.checkpoint(
                        custom(l, l + 1),
mshoeybi's avatar
mshoeybi committed
668
                        distribute_checkpointed_activations_helper(l),
669
670
671
672
673
674
                        hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
                else:
                    hidden_states = custom(l, l + 1)(
                        hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
        else:
            raise ValueError("Invalid activation checkpoint method.")
675
676
677

        return hidden_states

678
    def set_input_tensor(self, input_tensor):
679
680
681
682
683
684
685
        """Set input tensor to be used instead of forward()'s input.

        When doing pipeline parallelism the input from the previous
        stage comes from communication, not from the input, so the
        model's forward_step_func won't have it. This function is thus
        used by internal code to bypass the input provided by the
        forward_step_func"""
686
687
        self.input_tensor = input_tensor

688
    def forward(self, hidden_states, attention_mask,
mshoeybi's avatar
mshoeybi committed
689
690
                encoder_output=None, enc_dec_attn_mask=None,
                inference_params=None):
691

692
        # Checks.
mshoeybi's avatar
mshoeybi committed
693
        if inference_params:
694
            assert self.activations_checkpoint_method is None, \
695
                'inference does not work with activation checkpointing'
696

697
        if self.pre_process:
698
            # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
mshoeybi's avatar
mshoeybi committed
699
            # If the input flag for fp32 residual connection is set, convert for float.
700
701
            if self.fp32_residual_connection:
                hidden_states = hidden_states.transpose(0, 1).contiguous().float()
mshoeybi's avatar
mshoeybi committed
702
            # Otherwise, leave it as is.
703
704
            else:
                hidden_states = hidden_states.transpose(0, 1).contiguous()
705
        else:
706
            # See set_input_tensor()
707
            hidden_states = self.input_tensor
708

Vijay Korthikanti's avatar
Vijay Korthikanti committed
709
710
        if encoder_output is not None:
             encoder_output = encoder_output.transpose(0, 1).contiguous()
711

712
        if self.activations_checkpoint_method is not None:
713
            hidden_states = self._checkpointed_forward(hidden_states,
714
715
716
                                                       attention_mask,
                                                       encoder_output,
                                                       enc_dec_attn_mask)
717
        else:
Mohammad's avatar
Mohammad committed
718
719
            for index in range(self.num_layers):
                layer = self._get_layer(index)
720
721
722
723
724
                hidden_states = layer(
                    hidden_states,
                    attention_mask,
                    encoder_output=encoder_output,
                    enc_dec_attn_mask=enc_dec_attn_mask,
mshoeybi's avatar
mshoeybi committed
725
726
                    inference_params=inference_params)

727

728
        # Final layer norm.
729
        if self.post_process:
730
731
            # Reverting data format change [s b h] --> [b s h].
            hidden_states = hidden_states.transpose(0, 1).contiguous()
732
733
734
            output = self.final_layernorm(hidden_states)
        else:
            output = hidden_states
735
        
736
        return output