transformer.py 52.1 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
3
4

"""Transformer."""
import math
5
from contextlib import nullcontext
6
import torch
7
import torch.nn.functional as F
8

9
from megatron import get_timers, get_args, core, get_num_microbatches
10
from .module import MegatronModule
11
from megatron.core import mpu, tensor_parallel
12
from megatron.core.enums import ModelType
Mostofa Patwary's avatar
Mostofa Patwary committed
13
from megatron.model import LayerNorm
14
from megatron.model.enums import AttnMaskType, LayerType, AttnType
15
16
from megatron.model.fused_softmax import FusedScaleMaskSoftmax
from megatron.model.fused_bias_gelu import bias_gelu_impl
17
from megatron.model.utils import attention_mask_func, openai_gelu, erf_gelu
18

19
20
21
22
23
24
25
26
27
28
try:
    from einops import rearrange
except ImportError:
    rearrange = None

try:
    from flash_attn.flash_attn_interface import flash_attn_unpadded_func
except ImportError:
    flash_attn_unpadded_func = None

29
30
31
32
33
34
35
36
37
38
""" We use the following notation throughout this file:
     h: hidden size
     n: number of attention heads
     p: number of model parallel partitions
     np: n/p
     hp: h/p
     hn: h/n
     b: batch size
     s: sequence length
     l: number of layers
39
    Transformer takes input of size [s, b, h] and returns a
40
41
42
43
    tensor of the same size. We use the following arguments:
        hyperparameters: transformer hyperparameters
"""

44
class DropPath(MegatronModule):
45
    """Drop paths (Stochastic Depth) per sample
46
47
48
    (when applied in main path of residual blocks).
    """

Vijay Korthikanti's avatar
Vijay Korthikanti committed
49
    def __init__(self, drop_prob=0.):
50
51
52
        super(DropPath, self).__init__()
        self.drop_prob = drop_prob

Vijay Korthikanti's avatar
Vijay Korthikanti committed
53
    def forward(self, hidden_state):
54
        if self.drop_prob == 0. or not self.training:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
55
            return hidden_state
56
57
        keep_prob = 1 - self.drop_prob
        # work with diff dim tensors, not just 2D ConvNets
58
59
        # hidden_state: [s, b, h]
        shape = (1,) + (hidden_state.shape[1],) + (1,) * (hidden_state.ndim - 2)
60
        random_tensor = keep_prob + \
Vijay Korthikanti's avatar
Vijay Korthikanti committed
61
            torch.rand(shape, dtype=hidden_state.dtype, device=hidden_state.device)
62
        random_tensor.floor_()  # binarize
Vijay Korthikanti's avatar
Vijay Korthikanti committed
63
        output = hidden_state.div(keep_prob) * random_tensor
64
65
        return output

66
67
68
69
70
71
72
73
74
75
76
def _args_to_kwargs():
    args = get_args()

    common_kwargs = {
        "params_dtype": args.params_dtype,
        "use_cpu_initialization": args.use_cpu_initialization,
        "perform_initialization": args.perform_initialization,
        "gradient_accumulation_fusion": args.gradient_accumulation_fusion,
        "sequence_parallel_enabled": args.sequence_parallel,
    }
    return common_kwargs
77

78
79
80
81
82
class ParallelMLP(MegatronModule):
    """MLP.

    MLP will take the input with h hidden state, project it to 4*h
    hidden dimension, perform nonlinear transformation, and project the
hwijeen's avatar
hwijeen committed
83
    state back into h hidden dimension.
84
85
    """

86
    def __init__(self, init_method, output_layer_init_method):
87
        super(ParallelMLP, self).__init__()
Mohammad's avatar
Mohammad committed
88
        args = get_args()
89

90

91
        # Project to 4h.
92
        self.dense_h_to_4h = tensor_parallel.ColumnParallelLinear(
Mohammad's avatar
Mohammad committed
93
            args.hidden_size,
94
            args.ffn_hidden_size,
95
            gather_output=False,
96
            init_method=init_method,
97
98
99
            skip_bias_add=True,
            async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce,
            **_args_to_kwargs())
100

101
102
103
104
105
106
        self.bias_gelu_fusion = args.bias_gelu_fusion
        self.activation_func = F.gelu
        if args.openai_gelu:
            self.activation_func = openai_gelu
        elif args.onnx_safe:
            self.activation_func = erf_gelu
107
108

        # Project back to h.
109
        self.dense_4h_to_h = tensor_parallel.RowParallelLinear(
110
            args.ffn_hidden_size,
Mohammad's avatar
Mohammad committed
111
            args.hidden_size,
112
            input_is_parallel=True,
113
            init_method=output_layer_init_method,
114
115
            skip_bias_add=True,
            **_args_to_kwargs())
116

117
118
    def forward(self, hidden_states):

119
120
        # [s, b, 4hp]
        intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
121

122
123
124
125
126
127
128
129
130
131
        if self.bias_gelu_fusion:
             intermediate_parallel = \
                     bias_gelu_impl(intermediate_parallel, bias_parallel)
        else:
            intermediate_parallel = \
                self.activation_func(intermediate_parallel + bias_parallel)

        # [s, b, h]
        output, output_bias = self.dense_4h_to_h(intermediate_parallel)
        return output, output_bias
132

rprenger's avatar
rprenger committed
133
134
135
136
class SwitchMLP(MegatronModule):
    """
    Routes input to one of N MLP "experts"
    """
rprenger's avatar
rprenger committed
137
    def __init__(self, init_method, output_layer_init_method):
rprenger's avatar
rprenger committed
138
139
        super(SwitchMLP, self).__init__()
        args = get_args()
rprenger's avatar
rprenger committed
140
        self.router = torch.nn.Linear(args.hidden_size, args.num_experts)
rprenger's avatar
rprenger committed
141
        self.experts = torch.nn.ModuleList()
rprenger's avatar
rprenger committed
142
        for i in range(args.num_experts):
rprenger's avatar
rprenger committed
143
            self.experts.append(ParallelMLP(init_method, output_layer_init_method))
144

rprenger's avatar
rprenger committed
145
    def forward(self, hidden_states):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
146
147
148
        # hidden_states: [s, b, h]
        s = hidden_states.size(0)
        b = hidden_states.size(1)
rprenger's avatar
rprenger committed
149
150
        h = hidden_states.size(2)
        route = self.router(hidden_states)
rprenger's avatar
rprenger committed
151
        route = torch.nn.functional.softmax(route, dim=2)
rprenger's avatar
rprenger committed
152
        max_prob, max_ind = torch.max(route, dim=2)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
153
        max_prob = torch.unsqueeze(max_prob, 2) # [s b 1]
154

rprenger's avatar
rprenger committed
155
        # TODO (rprenger) TODO this could be made easier to read
Vijay Korthikanti's avatar
Vijay Korthikanti committed
156
        # Converting [s, b, h] to [s*b, h].
157
        # Each vector could be routed differently
Vijay Korthikanti's avatar
Vijay Korthikanti committed
158
159
160
        hidden_states = hidden_states.view(-1, hidden_states.size(2)) # [s*b h]
        max_prob = max_prob.view(-1, max_prob.size(2)) # [s*b 1]
        max_ind = max_ind.view(-1) # [s*b]
rprenger's avatar
rprenger committed
161
162
163

        output_total = torch.empty_like(hidden_states)
        output_bias_total = torch.empty_like(hidden_states)
rprenger's avatar
rprenger committed
164
        #TODO (rprenger) This does each expert in serial, but it could be parallelized
165

rprenger's avatar
rprenger committed
166
        for expert_num, expert in enumerate(self.experts):
167
168
            local_indices = (max_ind == expert_num).nonzero()
            hidden = hidden_states[local_indices,:]
rprenger's avatar
rprenger committed
169
170
            output, output_bias = expert(hidden)
            output_bias = output_bias.expand_as(output)
171
172
173
            output_total[local_indices,:] = output
            output_bias_total[local_indices,:] = output_bias

rprenger's avatar
rprenger committed
174
175
        output_total = output_total*max_prob
        output_bias_total = output_bias_total*max_prob
Vijay Korthikanti's avatar
Vijay Korthikanti committed
176
177
        output_total = output_total.view(s, b, h)
        output_bias_total = output_bias_total.view(s, b, h)
rprenger's avatar
rprenger committed
178
179

        return output_total, output_bias_total
180

181
182

class CoreAttention(MegatronModule):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
183

184
185
186
187
188
189
190
191
192
193
194
195
196
    def __init__(self, layer_number,
                 attn_mask_type=AttnMaskType.padding):
        super(CoreAttention, self).__init__()
        args = get_args()
        self.fp16 = args.fp16
        self.bf16 = args.bf16

        self.apply_query_key_layer_scaling = args.apply_query_key_layer_scaling
        self.attention_softmax_in_fp32 = args.attention_softmax_in_fp32
        if self.apply_query_key_layer_scaling:
            self.attention_softmax_in_fp32 = True
        self.layer_number = max(1, layer_number)
        self.attn_mask_type = attn_mask_type
Vijay Korthikanti's avatar
Vijay Korthikanti committed
197
        self.sequence_parallel = args.sequence_parallel
198
199
200
201

        projection_size = args.kv_channels * args.num_attention_heads

        # Per attention head and per partition values.
202
        world_size = mpu.get_tensor_model_parallel_world_size()
203
204
205
        self.hidden_size_per_partition = core.utils.divide(projection_size,
                                                           world_size)
        self.hidden_size_per_attention_head = core.utils.divide(
206
            projection_size, args.num_attention_heads)
207
        self.num_attention_heads_per_partition = core.utils.divide(
208
            args.num_attention_heads, world_size)
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227

        coeff = None
        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
        if self.apply_query_key_layer_scaling:
            coeff = self.layer_number
            self.norm_factor *= coeff

        self.scale_mask_softmax = FusedScaleMaskSoftmax(
            self.fp16, self.bf16,
            self.attn_mask_type,
            args.masked_softmax_fusion,
            attention_mask_func,
            self.attention_softmax_in_fp32,
            coeff)

        # Dropout. Note that for a single iteration, this layer will generate
        # different outputs on different number of parallel partitions but
        # on average it should not be partition dependent.
        self.attention_dropout = torch.nn.Dropout(args.attention_dropout)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
228

229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
    def forward(self, query_layer, key_layer,
                value_layer, attention_mask):

        # ===================================
        # Raw attention scores. [b, np, s, s]
        # ===================================

        # [b, np, sq, sk]
        output_size = (query_layer.size(1),
                       query_layer.size(2),
                       query_layer.size(0),
                       key_layer.size(0))

        # [sq, b, np, hn] -> [sq, b * np, hn]
        query_layer = query_layer.view(output_size[2],
                                       output_size[0] * output_size[1], -1)
        # [sk, b, np, hn] -> [sk, b * np, hn]
        key_layer = key_layer.view(output_size[3],
                                   output_size[0] * output_size[1], -1)

Vijay Korthikanti's avatar
Vijay Korthikanti committed
249
        # preallocting input tensor: [b * np, sq, sk]
250
        matmul_input_buffer = mpu.get_global_memory_buffer().get_tensor(
251
            (output_size[0]*output_size[1], output_size[2], output_size[3]),
Vijay Korthikanti's avatar
Vijay Korthikanti committed
252
            query_layer.dtype, "mpu")
253
254
255

        # Raw attention scores. [b * np, sq, sk]
        matmul_result = torch.baddbmm(
Vijay Korthikanti's avatar
Vijay Korthikanti committed
256
            matmul_input_buffer,
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
            query_layer.transpose(0, 1),   # [b * np, sq, hn]
            key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]
            beta=0.0, alpha=(1.0/self.norm_factor))

        # change view to [b, np, sq, sk]
        attention_scores = matmul_result.view(*output_size)

        # ===========================
        # Attention probs and dropout
        # ===========================

        # attention scores and attention mask [b, np, sq, sk]
        attention_probs = self.scale_mask_softmax(attention_scores,
                                                  attention_mask)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
Vijay Korthikanti's avatar
Vijay Korthikanti committed
274
        if not self.sequence_parallel:
275
            with tensor_parallel.get_cuda_rng_tracker().fork():
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
                attention_probs = self.attention_dropout(attention_probs)
        else:
            attention_probs = self.attention_dropout(attention_probs)

        # =========================
        # Context layer. [sq, b, hp]
        # =========================

        # value_layer -> context layer.
        # [sk, b, np, hn] --> [b, np, sq, hn]

        # context layer shape: [b, np, sq, hn]
        output_size = (value_layer.size(1),
                       value_layer.size(2),
                       query_layer.size(0),
                       value_layer.size(3))

        # change view [sk, b * np, hn]
        value_layer = value_layer.view(value_layer.size(0),
                                       output_size[0] * output_size[1], -1)

        # change view [b * np, sq, sk]
        attention_probs = attention_probs.view(output_size[0] * output_size[1],
                                               output_size[2], -1)

        # matmul: [b * np, sq, hn]
        context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))

        # change view [b, np, sq, hn]
        context_layer = context_layer.view(*output_size)

        # [b, np, sq, hn] --> [sq, b, np, hn]
        context_layer = context_layer.permute(2, 0, 1, 3).contiguous()

        # [sq, b, np, hn] --> [sq, b, hp]
        new_context_layer_shape = context_layer.size()[:-2] + \
            (self.hidden_size_per_partition,)
        context_layer = context_layer.view(*new_context_layer_shape)

        return context_layer


318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
class FlashSelfAttention(torch.nn.Module):
    """Implement the scaled dot product attention with softmax.
    Arguments
    ---------
        softmax_scale: The temperature to use for the softmax attention.
                      (default: 1/sqrt(d_keys) where d_keys is computed at
                      runtime)
        attention_dropout: The dropout rate to apply to the attention
                           (default: 0.0)
    """
    def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
                 device=None, dtype=None):
        super().__init__()
        assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
                                                      'e.g., with pip install flash-attn')
        assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
        self.causal = causal
        self.softmax_scale = softmax_scale
        self.dropout_p = attention_dropout

    def forward(self, q, k, v):
        """Implements the multihead softmax attention.
        Arguments
        ---------
            q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
        """
        assert q.dtype in [torch.float16, torch.bfloat16]
        assert q.is_cuda
        batch_size, seqlen = q.shape[0], q.shape[1]
        q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
        max_s = seqlen
        cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
                                  device=q.device)
        output = flash_attn_unpadded_func(
            q, k, v, cu_seqlens, cu_seqlens, max_s, max_s,
            self.dropout_p if self.training else 0.0,
            softmax_scale=self.softmax_scale, causal=self.causal
        )
        output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
        return output


360
class ParallelAttention(MegatronModule):
361
362
    """Parallel self-attention layer abstract class.

Vijay Korthikanti's avatar
Vijay Korthikanti committed
363
    Self-attention layer takes input with size [s, b, h]
364
365
    and returns output of the same size.
    """
Neel Kant's avatar
Neel Kant committed
366

367
    def __init__(self, init_method,
368
369
370
371
                 output_layer_init_method, layer_number,
                 attention_type=AttnType.self_attn,
                 attn_mask_type=AttnMaskType.padding):
        super(ParallelAttention, self).__init__()
Mohammad's avatar
Mohammad committed
372
        args = get_args()
373
        self.layer_number = max(1, layer_number)
374
375
        self.attention_type = attention_type
        self.attn_mask_type = attn_mask_type
376
        self.params_dtype = args.params_dtype
377
378
379
380
381
382
383
384
385
386
387
388
389
        self.sequence_parallel = args.sequence_parallel

        self.use_flash_attn = args.use_flash_attn
        if self.use_flash_attn:
            if flash_attn_unpadded_func is None:
                raise ImportError('FlashAttention is not installed, please install with '
                                  'pip install flash-attn')
            assert attention_type == AttnType.self_attn, ('FlashAttention code path only supports '
                                                          'self-attention for now')
            assert self.attn_mask_type == AttnMaskType.causal, ('FlashAttention code path only '
                                                                'supports causal mask for now')
            if rearrange is None:
                raise ImportError('einops is not installed, please install with pip install einops')
390
391

        projection_size = args.kv_channels * args.num_attention_heads
392
393

        # Per attention head and per partition values.
394
        world_size = mpu.get_tensor_model_parallel_world_size()
395
        self.hidden_size_per_attention_head = core.utils.divide(
396
            projection_size, args.num_attention_heads)
397
        self.num_attention_heads_per_partition = core.utils.divide(
Mohammad's avatar
Mohammad committed
398
            args.num_attention_heads, world_size)
399
400

        # Strided linear layer.
401
        if attention_type == AttnType.self_attn:
402
            self.query_key_value = tensor_parallel.ColumnParallelLinear(
403
404
405
                args.hidden_size,
                3 * projection_size,
                gather_output=False,
406
407
408
                init_method=init_method,
                async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce,
                **_args_to_kwargs())
409
410
        else:
            assert attention_type == AttnType.cross_attn
411
            self.query = tensor_parallel.ColumnParallelLinear(
412
413
414
                args.hidden_size,
                projection_size,
                gather_output=False,
415
416
417
                init_method=init_method,
                async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce,
                **_args_to_kwargs())
418

419

420
            self.key_value = tensor_parallel.ColumnParallelLinear(
421
422
423
                args.hidden_size,
                2 * projection_size,
                gather_output=False,
424
425
426
                init_method=init_method,
                async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce,
                **_args_to_kwargs())
427

428
429
        self.core_attention = CoreAttention(self.layer_number,
                                            self.attn_mask_type)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
430
        self.checkpoint_core_attention = args.recompute_granularity == 'selective'
431

432
433
434
435
436
        if self.use_flash_attn:
            self.core_attention_flash = FlashSelfAttention(
                causal=True, attention_dropout=args.attention_dropout
            )

437
        # Output.
438
        self.dense = tensor_parallel.RowParallelLinear(
439
            projection_size,
Mohammad's avatar
Mohammad committed
440
            args.hidden_size,
441
            input_is_parallel=True,
442
            init_method=output_layer_init_method,
443
444
            skip_bias_add=True,
            **_args_to_kwargs())
Vijay Korthikanti's avatar
Vijay Korthikanti committed
445

446
447
448
449
450
451
452
453
454
455
456
457
    def _checkpointed_attention_forward(self, query_layer, key_layer,
                                        value_layer, attention_mask):
        """Forward method with activation checkpointing."""
        def custom_forward(*inputs):
            query_layer = inputs[0]
            key_layer = inputs[1]
            value_layer = inputs[2]
            attention_mask = inputs[3]
            output_ = self.core_attention(query_layer, key_layer,
                                          value_layer, attention_mask)
            return output_

458
        hidden_states = tensor_parallel.checkpoint(
459
460
461
462
            custom_forward,
            False, query_layer, key_layer, value_layer, attention_mask)

        return hidden_states
463
464
465
466
467
468
469
470
471
472
473

    def _allocate_memory(self, inference_max_sequence_len, batch_size):
        return torch.empty(
            inference_max_sequence_len,
            batch_size,
            self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head,
            dtype=self.params_dtype,
            device=torch.cuda.current_device())

    def forward(self, hidden_states, attention_mask,
mshoeybi's avatar
mshoeybi committed
474
                encoder_output=None, inference_params=None):
475
        # hidden_states: [sq, b, h]
476

477
478
479
        # =================================================
        # Pre-allocate memory for key-values for inference.
        # =================================================
Lawrence McAfee's avatar
Retro  
Lawrence McAfee committed
480

mshoeybi's avatar
mshoeybi committed
481
        if inference_params:
482
            if self.layer_number not in inference_params.key_value_memory_dict:
mshoeybi's avatar
mshoeybi committed
483
                inf_max_seq_len = inference_params.max_sequence_len
mshoeybi's avatar
mshoeybi committed
484
                inf_max_batch_size = inference_params.max_batch_size
485
                inference_key_memory = self._allocate_memory(
mshoeybi's avatar
mshoeybi committed
486
                    inf_max_seq_len, inf_max_batch_size)
487
                inference_value_memory = self._allocate_memory(
mshoeybi's avatar
mshoeybi committed
488
                    inf_max_seq_len, inf_max_batch_size)
489
490
491
492
493
                inference_params.key_value_memory_dict[self.layer_number] = (
                    inference_key_memory, inference_value_memory)
            else:
                inference_key_memory, inference_value_memory = \
                    inference_params.key_value_memory_dict[self.layer_number]
mshoeybi's avatar
mshoeybi committed
494

495
496
497
        # =====================
        # Query, Key, and Value
        # =====================
498

499
500
501
502
503
504
505
506
507
508
509
510
511
        if self.attention_type == AttnType.self_attn:
            # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
            mixed_x_layer, _ = self.query_key_value(hidden_states)

            # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
            new_tensor_shape = mixed_x_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 3 * self.hidden_size_per_attention_head)
            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)

            # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
            (query_layer,
             key_layer,
512
             value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_x_layer, 3)
513
514
515
516
517
518
519
520
521
522
523
524
        else:
            # Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
            mixed_kv_layer, _ = self.key_value(encoder_output)

            # [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
            new_tensor_shape = mixed_kv_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 2 * self.hidden_size_per_attention_head)
            mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)

            # [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
            (key_layer,
525
             value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_kv_layer, 2)
526
527
528
529
530
531
532
533

            # Attention head [sq, b, h] --> [sq, b, hp]
            query_layer, _ = self.query(hidden_states)
            # [sq, b, hp] --> [sq, b, np, hn]
            new_tensor_shape = query_layer.size()[:-1] + \
                (self.num_attention_heads_per_partition,
                 self.hidden_size_per_attention_head)
            query_layer = query_layer.view(*new_tensor_shape)
534

mshoeybi's avatar
mshoeybi committed
535
536
537
        # ==================================
        # Adjust key and value for inference
        # ==================================
538

mshoeybi's avatar
mshoeybi committed
539
        if inference_params:
mshoeybi's avatar
mshoeybi committed
540
541
            batch_start = inference_params.batch_size_offset
            batch_end = batch_start + key_layer.size(1)
542
            assert batch_end <= inference_key_memory.size(1)
mshoeybi's avatar
mshoeybi committed
543
544
            sequence_start = inference_params.sequence_len_offset
            sequence_end = sequence_start + key_layer.size(0)
545
            assert sequence_end <= inference_key_memory.size(0)
546
            # Copy key and values.
547
548
549
550
551
            inference_key_memory[sequence_start:sequence_end,
                                 batch_start:batch_end, ...] = key_layer
            inference_value_memory[sequence_start:sequence_end,
                                   batch_start:batch_end, ...] = value_layer
            key_layer = inference_key_memory[
mshoeybi's avatar
mshoeybi committed
552
                :sequence_end, batch_start:batch_end, ...]
553
            value_layer = inference_value_memory[
mshoeybi's avatar
mshoeybi committed
554
                :sequence_end, batch_start:batch_end, ...]
555

556
557
558
        # ==================================
        # core attention computation
        # ==================================
559

560
561
562
563
564
565
566
        if not self.use_flash_attn:
            if self.checkpoint_core_attention:
                context_layer = self._checkpointed_attention_forward(
                    query_layer, key_layer, value_layer, attention_mask)
            else:
                context_layer = self.core_attention(
                    query_layer, key_layer, value_layer, attention_mask)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
567
        else:
568
569
570
571
572
573
574
575
            q, k, v = [rearrange(x, 's b ... -> b s ...').contiguous()
                       for x in (query_layer, key_layer, value_layer)]
            if not self.sequence_parallel:
                with tensor_parallel.get_cuda_rng_tracker().fork():
                    context_layer = self.core_attention_flash(q, k, v)
            else:
                context_layer = self.core_attention_flash(q, k, v)
            context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous()
576
577

        # =================
578
        # Output. [sq, b, h]
579
580
581
        # =================

        output, bias = self.dense(context_layer)
582

583
584
585
        return output, bias


586
def bias_dropout_add(x, bias, residual, prob, training):
587
588
589
590
591
592
593
594
595
596
597
598
599
    # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
    out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
    out = residual + out
    return out


def get_bias_dropout_add(training):
    def _bias_dropout_add(x, bias, residual, prob):
        return bias_dropout_add(x, bias, residual, prob, training)
    return _bias_dropout_add


@torch.jit.script
600
601
602
603
def bias_dropout_add_fused_train(x: torch.Tensor,
                                 bias: torch.Tensor,
                                 residual: torch.Tensor,
                                 prob: float) -> torch.Tensor:
604
605
606
607
    return bias_dropout_add(x, bias, residual, prob, True)


@torch.jit.script
608
609
610
611
def bias_dropout_add_fused_inference(x: torch.Tensor,
                                     bias: torch.Tensor,
                                     residual: torch.Tensor,
                                     prob: float) -> torch.Tensor:
612
    return bias_dropout_add(x, bias, residual, prob, False)
613
614
615
616
617


class ParallelTransformerLayer(MegatronModule):
    """A single transformer layer.

Vijay Korthikanti's avatar
Vijay Korthikanti committed
618
    Transformer layer takes input with size [s, b, h] and returns an
619
620
    output of the same size.
    """
Neel Kant's avatar
Neel Kant committed
621

622
623
    def __init__(self, init_method, output_layer_init_method,
                 layer_number, layer_type=LayerType.encoder,
624
625
                 self_attn_mask_type=AttnMaskType.padding,
                 drop_path_rate=0.):
Mohammad's avatar
Mohammad committed
626
        args = get_args()
627
628

        super(ParallelTransformerLayer, self).__init__()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
629
        self.layer_number = layer_number
630
        self.layer_type = layer_type
631
632

        self.apply_residual_connection_post_layernorm \
Mohammad's avatar
Mohammad committed
633
            = args.apply_residual_connection_post_layernorm
634

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
635
636
637
        self.bf16 = args.bf16
        self.fp32_residual_connection = args.fp32_residual_connection

Mostofa Patwary's avatar
Mostofa Patwary committed
638
639
640
        apply_layernorm_1p = False
        if args.apply_layernorm_1p:
            apply_layernorm_1p = True
Mostofa Patwary's avatar
Mostofa Patwary committed
641

642
643
        # Layernorm on the input data.
        self.input_layernorm = LayerNorm(
Mohammad's avatar
Mohammad committed
644
            args.hidden_size,
Sangkug Lym's avatar
Sangkug Lym committed
645
            eps=args.layernorm_epsilon,
646
            no_persist_layer_norm=args.no_persist_layer_norm,
Mostofa Patwary's avatar
Mostofa Patwary committed
647
648
            sequence_parallel=args.sequence_parallel,
            apply_layernorm_1p=apply_layernorm_1p)
649
650

        # Self attention.
651
652
653
654
655
656
        self.self_attention = ParallelAttention(
            init_method,
            output_layer_init_method,
            layer_number,
            attention_type=AttnType.self_attn,
            attn_mask_type=self_attn_mask_type)
657
658
        self.hidden_dropout = args.hidden_dropout
        self.bias_dropout_fusion = args.bias_dropout_fusion
Vijay Korthikanti's avatar
Vijay Korthikanti committed
659
        self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else None
660

661
        # Layernorm on the attention output
662
        self.post_attention_layernorm = LayerNorm(
Mohammad's avatar
Mohammad committed
663
            args.hidden_size,
Sangkug Lym's avatar
Sangkug Lym committed
664
            eps=args.layernorm_epsilon,
665
            no_persist_layer_norm=args.no_persist_layer_norm,
Mostofa Patwary's avatar
Mostofa Patwary committed
666
667
            sequence_parallel=args.sequence_parallel,
            apply_layernorm_1p=apply_layernorm_1p)
668

669
670
671
672
673
674
675
676
677
        if self.layer_type == LayerType.decoder:
            self.inter_attention = ParallelAttention(
                init_method,
                output_layer_init_method,
                layer_number,
                attention_type=AttnType.cross_attn)
            # Layernorm on the attention output.
            self.post_inter_attention_layernorm = LayerNorm(
                args.hidden_size,
Sangkug Lym's avatar
Sangkug Lym committed
678
                eps=args.layernorm_epsilon,
679
                no_persist_layer_norm=args.no_persist_layer_norm,
Mostofa Patwary's avatar
Mostofa Patwary committed
680
681
                sequence_parallel=args.sequence_parallel,
                apply_layernorm_1p=apply_layernorm_1p)
682

683
        # MLP
rprenger's avatar
rprenger committed
684
685
686
687
        if args.num_experts is not None:
            self.mlp = SwitchMLP(init_method, output_layer_init_method)
        else:
            self.mlp = ParallelMLP(init_method, output_layer_init_method)
688

689
690
691
692
693
694
695
        # Set bias+dropout+add fusion grad_enable execution handler.
        TORCH_MAJOR = int(torch.__version__.split('.')[0])
        TORCH_MINOR = int(torch.__version__.split('.')[1])
        use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)
        self.bias_dropout_add_exec_handler = \
                nullcontext if use_nvfuser else torch.enable_grad

696
    def forward(self, hidden_states, attention_mask,
mshoeybi's avatar
mshoeybi committed
697
698
                encoder_output=None, enc_dec_attn_mask=None,
                inference_params=None):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
699
        # hidden_states: [s, b, h]
700

701
        # Layer norm at the beginning of the transformer layer.
702
703
        layernorm_output = self.input_layernorm(hidden_states)
        # Self attention.
704
        attention_output, attention_bias = \
705
706
707
            self.self_attention(
                layernorm_output,
                attention_mask,
mshoeybi's avatar
mshoeybi committed
708
                inference_params=inference_params)
709

710
711
        # Residual connection.
        if self.apply_residual_connection_post_layernorm:
712
713
714
715
            residual = layernorm_output
        else:
            residual = hidden_states

Vijay Korthikanti's avatar
Vijay Korthikanti committed
716
        if self.drop_path is None:
717
718
719
720
721
722
723
724
725
            # jit scripting for a nn.module (with dropout) is not
            # trigerring the fusion kernel. For now, we use two
            # different nn.functional routines to account for varying
            # dropout semantics during training and inference phases.
            if self.bias_dropout_fusion:
                if self.training:
                    bias_dropout_add_func = bias_dropout_add_fused_train
                else:
                    bias_dropout_add_func = bias_dropout_add_fused_inference
726
            else:
727
                bias_dropout_add_func = get_bias_dropout_add(self.training)
728

729
            with self.bias_dropout_add_exec_handler():
730
731
732
733
734
735
736
737
738
739
                layernorm_input = bias_dropout_add_func(
                    attention_output,
                    attention_bias.expand_as(residual),
                    residual,
                    self.hidden_dropout)
        else:
            out = torch.nn.functional.dropout(attention_output + attention_bias,
                                              p=self.hidden_dropout,
                                              training=self.training)
            layernorm_input = residual + self.drop_path(out)
740

741
742
743
        # Layer norm post the self attention.
        layernorm_output = self.post_attention_layernorm(layernorm_input)

744
745
746
747
748
749
750
751
752
753
754
        if self.layer_type == LayerType.decoder:
            attention_output, attention_bias = \
                self.inter_attention(layernorm_output,
                                     enc_dec_attn_mask,
                                     encoder_output=encoder_output)
            # residual connection
            if self.apply_residual_connection_post_layernorm:
                residual = layernorm_output
            else:
                residual = layernorm_input

755
            with self.bias_dropout_add_exec_handler():
756
757
758
759
760
761
762
763
764
                layernorm_input = bias_dropout_add_func(
                    attention_output,
                    attention_bias.expand_as(residual),
                    residual,
                    self.hidden_dropout)

            # Layer norm post the decoder attention
            layernorm_output = self.post_inter_attention_layernorm(layernorm_input)

765
        # MLP.
766
        mlp_output, mlp_bias = self.mlp(layernorm_output)
767

768
769
        # Second residual connection.
        if self.apply_residual_connection_post_layernorm:
770
            residual = layernorm_output
771
        else:
772
773
            residual = layernorm_input

Vijay Korthikanti's avatar
Vijay Korthikanti committed
774
        if self.drop_path is None:
775
            with self.bias_dropout_add_exec_handler():
776
777
778
779
780
                output = bias_dropout_add_func(
                    mlp_output,
                    mlp_bias.expand_as(residual),
                    residual,
                    self.hidden_dropout)
781
782
783
784
785
786
787

            # Jit compiled function creates 'view' tensor. This tensor
            # potentially gets saved in the MPU checkpoint function context,
            # which rejects view tensors. While making a viewless tensor here
            # won't result in memory savings (like the data loader, or
            # p2p_communication), it serves to document the origin of this
            # 'view' tensor.
788
789
790
            output = core.utils.make_viewless_tensor(inp = output,
                                                     requires_grad = output.requires_grad,
                                                     keep_graph = True)
791

792
793
794
795
796
        else:
            out = torch.nn.functional.dropout(mlp_output + mlp_bias,
                                              p=self.hidden_dropout,
                                              training=self.training)
            output = residual + self.drop_path(out)
797
798
799
800

        return output


801
802
803
class NoopTransformerLayer(MegatronModule):
    """A single 'no-op' transformer layer.

Lawrence McAfee's avatar
Lawrence McAfee committed
804
    The sole purpose of this layer is for when a standalone embedding layer
805
    is used (i.e., args.standalone_embedding_stage == True). In this case,
Lawrence McAfee's avatar
Lawrence McAfee committed
806
807
808
809
810
811
812
813
814
    zero transformer layers are assigned when pipeline rank == 0. Additionally,
    when virtual pipeline rank >= 1, zero total model parameters are created
    (virtual rank 0 contains the input embedding). This results in the model's
    input and output tensors being the same, which causes an error when
    performing certain memory optimiations on the output tensor (e.g.,
    deallocating it). Thus, this layer disconnects the input from the output
    via a clone. Since ranks containing a no-op layer are generally under-
    utilized (both compute and memory), there's no worry of any performance
    degredation.
815
816
817
818
819
820
821
822
823
824
825
826
    """

    def __init__(self, layer_number):
        super().__init__()
        self.layer_number = layer_number

    def forward(self, hidden_states, attention_mask,
                encoder_output=None, enc_dec_attn_mask=None,
                inference_params=None):
        return hidden_states.clone()


Jared Casper's avatar
Jared Casper committed
827
def _get_num_layers(args, is_encoder_and_decoder_model, is_decoder=False):
828
    """Compute the number of transformer layers resident on the current rank."""
Jared Casper's avatar
Jared Casper committed
829
    if mpu.get_pipeline_model_parallel_world_size() > 1:
830
831
832
833
834
835
836
837
838
839
840
841
842
        if is_encoder_and_decoder_model:
            assert args.pipeline_model_parallel_split_rank is not None

            # When a standalone embedding stage is used, a rank is taken from
            # the encoder's ranks, to be used for the encoder's embedding
            # layer. This way, the rank referenced by the 'split rank' remains
            # the same whether or not a standalone embedding stage is used.
            num_ranks_in_encoder = (
                args.pipeline_model_parallel_split_rank - 1
                if args.standalone_embedding_stage else
                args.pipeline_model_parallel_split_rank
            )
            num_ranks_in_decoder = args.transformer_pipeline_model_parallel_size - num_ranks_in_encoder
Jared Casper's avatar
Jared Casper committed
843
844
845
846
            assert args.encoder_num_layers % num_ranks_in_encoder == 0, \
                    'encoder_num_layers (%d) must be divisible by number of ranks given to encoder (%d)' % (args.encoder_num_layers, num_ranks_in_encoder)
            assert args.decoder_num_layers % num_ranks_in_decoder == 0, \
                    'decoder_num_layers (%d) must be divisible by number of ranks given to decoder (%d)' % (args.decoder_num_layers, num_ranks_in_decoder)
Jared Casper's avatar
Jared Casper committed
847
            if mpu.is_pipeline_stage_before_split():
848
849
850
                num_layers = (
                    0
                    if args.standalone_embedding_stage
Jared Casper's avatar
Jared Casper committed
851
                    and mpu.get_pipeline_model_parallel_rank() == 0 else
Jared Casper's avatar
Jared Casper committed
852
                    args.encoder_num_layers // num_ranks_in_encoder
853
854
                )
            else:
Jared Casper's avatar
Jared Casper committed
855
                num_layers = args.decoder_num_layers // num_ranks_in_decoder
856
        else:
Jared Casper's avatar
Jared Casper committed
857
            assert args.num_layers == args.encoder_num_layers
858
859
860
861
862
863
864
865
866
867
            assert args.num_layers % args.transformer_pipeline_model_parallel_size == 0, \
                'num_layers must be divisible by transformer_pipeline_model_parallel_size'

            # When a standalone embedding stage is used, all transformer layers
            # are divided among pipeline rank >= 1, while on pipeline rank 0,
            # ranks either contain the input embedding layer (virtual pp rank 0),
            # or no layers at all (virtual pp rank >= 1).
            num_layers = (
                0
                if args.standalone_embedding_stage
Jared Casper's avatar
Jared Casper committed
868
                and mpu.get_pipeline_model_parallel_rank() == 0 else
869
870
871
                args.num_layers // args.transformer_pipeline_model_parallel_size
            )
    else:
Jared Casper's avatar
Jared Casper committed
872
873
874
875
        if not is_decoder:
            num_layers = args.encoder_num_layers
        else:
            num_layers = args.decoder_num_layers
876
877
878
    return num_layers


879
880
881
class ParallelTransformer(MegatronModule):
    """Transformer class."""

882
    def __init__(self, init_method, output_layer_init_method,
883
                 layer_type=LayerType.encoder,
884
                 self_attn_mask_type=AttnMaskType.padding,
885
                 post_layer_norm=True,
886
887
                 pre_process=True, post_process=True,
                 drop_path_rate=0.0):
888
        super(ParallelTransformer, self).__init__()
Mohammad's avatar
Mohammad committed
889
        args = get_args()
890

891
892
        self.layer_type = layer_type
        self.model_type = args.model_type
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
893
        self.bf16 = args.bf16
894
        self.fp32_residual_connection = args.fp32_residual_connection
895
        self.post_layer_norm = post_layer_norm
896
897
898
        self.pre_process = pre_process
        self.post_process = post_process
        self.input_tensor = None
899
        self.drop_path_rate = drop_path_rate
900
        self.transformer_impl = args.transformer_impl
901

902
        # Store activation checkpoiting flag.
Vijay Korthikanti's avatar
Vijay Korthikanti committed
903
904
905
        self.recompute_granularity = args.recompute_granularity
        self.recompute_method = args.recompute_method
        self.recompute_num_layers = args.recompute_num_layers
Vijay Korthikanti's avatar
Vijay Korthikanti committed
906
907
        self.distribute_saved_activations = \
            args.distribute_saved_activations and not args.sequence_parallel
908

Vijay Korthikanti's avatar
Vijay Korthikanti committed
909
        self.sequence_parallel = args.sequence_parallel
910

911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
        # Transformer Engine Init.
        if self.transformer_impl == 'transformer_engine':
            global transformer_engine
            import transformer_engine
        self.use_fp8 = args.fp8_e4m3 or args.fp8_hybrid
        self.fp8_recipe = None
        self.fp8_group = mpu.get_data_parallel_group()
        if self.use_fp8:
            if args.fp8_e4m3:
                fp8_format = transformer_engine.common.recipe.Format.E4M3
            elif args.fp8_hybrid:
                fp8_format = transformer_engine.common.recipe.Format.HYBRID
            self.fp8_recipe = transformer_engine.common.recipe.DelayedScaling(
                margin=args.fp8_margin,
                interval=args.fp8_interval,
                fp8_format=fp8_format,
                amax_history_len=args.fp8_amax_history_len,
                amax_compute_algo=args.fp8_amax_compute_algo,
                override_linear_precision=(False, False, not args.fp8_wgrad),
            )

        self.num_microbatches_in_previous_step = -1
        self.microbatch_count = 0
        self.checkpoint_core_attention = args.recompute_granularity == 'selective'

936
        # Number of layers.
937
        self.num_layers = _get_num_layers(
938
939
940
            args,
            args.model_type == ModelType.encoder_and_decoder,
            layer_type == LayerType.decoder)
Mohammad's avatar
Mohammad committed
941

Vijay Korthikanti's avatar
Vijay Korthikanti committed
942
        self.drop_path_rates = [rate.item() for rate in torch.linspace(0, self.drop_path_rate, args.num_layers)]
943

Mohammad's avatar
Mohammad committed
944
945
        # Transformer layers.
        def build_layer(layer_number):
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
            if args.transformer_impl == 'local':
                return ParallelTransformerLayer(
                    init_method,
                    output_layer_init_method,
                    layer_number,
                    layer_type=layer_type,
                    self_attn_mask_type=self_attn_mask_type,
                    drop_path_rate=self.drop_path_rates[layer_number - 1])
            else:
                return transformer_engine.pytorch.TransformerLayer(
                    args.hidden_size,
                    args.ffn_hidden_size,
                    args.num_attention_heads,
                    layernorm_epsilon=args.layernorm_epsilon,
                    hidden_dropout=args.hidden_dropout,
                    attention_dropout=args.attention_dropout,
                    init_method=init_method,
                    output_layer_init_method=output_layer_init_method,
                    layer_number=layer_number,
                    kv_channels=args.kv_channels,
                    self_attn_mask_type=self_attn_mask_type.name,
                    tp_group=mpu.get_tensor_model_parallel_group(),
                    get_rng_state_tracker=tensor_parallel.get_cuda_rng_tracker,
                    fuse_wgrad_accumulation=args.gradient_accumulation_fusion,
                    apply_query_key_layer_scaling=args.apply_query_key_layer_scaling,
                    attention_softmax_in_fp32=args.attention_softmax_in_fp32,
                    seq_length=args.seq_length,
                    micro_batch_size=args.micro_batch_size,
                    sequence_parallel=args.sequence_parallel,
                    params_dtype=args.params_dtype,
                    apply_residual_connection_post_layernorm=args.apply_residual_connection_post_layernorm,
                    output_layernorm=False,
                    layer_type="encoder",
                    drop_path_rate=self.drop_path_rates[layer_number - 1],
                    set_parallel_mode=True,
                    fuse_qkv_params=True)

983
984
        if args.virtual_pipeline_model_parallel_size is not None:
            assert args.num_layers % args.virtual_pipeline_model_parallel_size == 0, \
985
986
                'num_layers_per_stage must be divisible by ' \
                'virtual_pipeline_model_parallel_size'
Vijay Korthikanti's avatar
Vijay Korthikanti committed
987
            assert args.model_type != ModelType.encoder_and_decoder
988
989
            # Number of layers in each model chunk is the number of layers in the stage,
            # divided by the number of model chunks in a stage.
990
            self.num_layers = self.num_layers // args.virtual_pipeline_model_parallel_size
991
992
993
994
995
996
997
998
            # With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
            # layers to stages like (each list is a model chunk):
            # Stage 0: [0]  [2]  [4]  [6]
            # Stage 1: [1]  [3]  [5]  [7]
            # With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
            # layers to stages like (each list is a model chunk):
            # Stage 0: [0, 1]  [4, 5]
            # Stage 1: [2, 3]  [6, 7]
999
            offset = mpu.get_virtual_pipeline_model_parallel_rank() * (
1000
                args.num_layers // args.virtual_pipeline_model_parallel_size) + \
1001
                (mpu.get_pipeline_model_parallel_rank() * self.num_layers)
1002
        else:
1003
            # Each stage gets a contiguous set of layers.
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1004
            if args.model_type == ModelType.encoder_and_decoder and \
1005
1006
                    mpu.get_pipeline_model_parallel_world_size() > 1:
                pipeline_rank = mpu.get_pipeline_model_parallel_rank()
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1007
1008
1009
1010
1011
1012
                if layer_type == LayerType.encoder:
                    offset = pipeline_rank * self.num_layers
                else:
                    num_ranks_in_enc = args.pipeline_model_parallel_split_rank
                    offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
            else:
1013
                offset = mpu.get_pipeline_model_parallel_rank() * self.num_layers
1014

1015
        if self.num_layers == 0:
Lawrence McAfee's avatar
Lawrence McAfee committed
1016
            # When a standalone embedding stage is used (e.g.,
1017
            # args.standalone_embedding_stage == True), virtual pipeline ranks
1018
            # on pipeline rank 0 will have zero transformer layers assigned to
Lawrence McAfee's avatar
Lawrence McAfee committed
1019
1020
1021
1022
1023
            # them. This results in the model's input and output tensors to be
            # the same, which will cause failure for certain output tensor
            # optimizations (e.g., pipeline output deallocation). To remedy
            # this, we assign a 'no-op' layer on these ranks, which will
            # disconnect the input tensor from the output tensor.
1024
1025
1026
1027
1028
            self.num_layers = 1
            self.layers = torch.nn.ModuleList([ NoopTransformerLayer(1) ])
        else:
            self.layers = torch.nn.ModuleList(
                [build_layer(i + 1 + offset) for i in range(self.num_layers)])
1029

Mostofa Patwary's avatar
Mostofa Patwary committed
1030
1031
1032
        apply_layernorm_1p = False
        if args.apply_layernorm_1p:
            apply_layernorm_1p = True
Mostofa Patwary's avatar
Mostofa Patwary committed
1033

1034
        if self.post_process and self.post_layer_norm:
1035
1036
1037
            # Final layer norm before output.
            self.final_layernorm = LayerNorm(
                args.hidden_size,
Sangkug Lym's avatar
Sangkug Lym committed
1038
                eps=args.layernorm_epsilon,
1039
                no_persist_layer_norm=args.no_persist_layer_norm,
Mostofa Patwary's avatar
Mostofa Patwary committed
1040
1041
                sequence_parallel=args.sequence_parallel,
                apply_layernorm_1p=apply_layernorm_1p)
1042

Mohammad's avatar
Mohammad committed
1043
    def _get_layer(self, layer_number):
1044
        return self.layers[layer_number]
Mohammad's avatar
Mohammad committed
1045

1046
    def _checkpointed_forward(self, hidden_states, attention_mask,
1047
                              encoder_output, enc_dec_attn_mask, is_first_microbatch):
1048
        """Forward method with activation checkpointing."""
1049
1050
        def custom(start, end, is_transformer_engine=False):
            def custom_forward(*args, **kwargs):
Mohammad's avatar
Mohammad committed
1051
1052
                for index in range(start, end):
                    layer = self._get_layer(index)
1053
                    x_ = layer(*args, **kwargs)
1054
                return x_
1055
1056
1057
1058
1059
1060
            def custom_forward_transformer_engine(*args, **kwargs):
                return custom_forward(*args, is_first_microbatch=is_first_microbatch, **kwargs)
            if not is_transformer_engine:
                return custom_forward
            else:
                return custom_forward_transformer_engine
1061

Vijay Korthikanti's avatar
Vijay Korthikanti committed
1062
        if self.recompute_method == 'uniform':
1063
1064
1065
1066
1067
            # Uniformly divide the total number of Transformer layers and checkpoint
            # the input activation of each divided chunk.
            # A method to further reduce memory usage reducing checkpoints.
            l = 0
            while l < self.num_layers:
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
                if self.transformer_impl == 'transformer_engine':
                    hidden_states = transformer_engine.pytorch.distributed.checkpoint(
                        custom(l, l + self.recompute_num_layers, is_transformer_engine=True),
                        self.distribute_saved_activations,
                        tensor_parallel.get_cuda_rng_tracker,
                        mpu.get_tensor_model_parallel_group(),
                        hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
                else:
                    hidden_states = tensor_parallel.checkpoint(
                        custom(l, l + self.recompute_num_layers),
                        self.distribute_saved_activations,
                        hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)

Vijay Korthikanti's avatar
Vijay Korthikanti committed
1081
                l += self.recompute_num_layers
1082

Vijay Korthikanti's avatar
Vijay Korthikanti committed
1083
        elif self.recompute_method == 'block':
1084
1085
1086
1087
            # Checkpoint the input activation of only a set number of individual
            # Transformer layers and skip the rest.
            # A method fully use the device memory removing redundant re-computation.
            for l in range(self.num_layers):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1088
                if l < self.recompute_num_layers:
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
                    if self.transformer_impl == 'transformer_engine':
                        hidden_states = transformer_engine.pytorch.distributed.checkpoint(
                            custom(l, l + 1, is_transformer_engine=True),
                            self.distribute_saved_activations,
                            tensor_parallel.get_cuda_rng_tracker,
                            mpu.get_tensor_model_parallel_group(),
                            hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
                    else:
                        hidden_states = tensor_parallel.checkpoint(
                            custom(l, l + 1),
                            self.distribute_saved_activations,
                            hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
1101
                else:
1102
1103
1104
1105
1106
1107
                    if self.transformer_impl == 'transformer_engine':
                        hidden_states = custom(l, l + 1, is_transformer_engine=True)(
                            hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
                    else:
                        hidden_states = custom(l, l + 1)(
                            hidden_states, attention_mask, encoder_output, enc_dec_attn_mask)
1108
        else:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1109
            raise ValueError("Invalid activation recompute method.")
1110
1111
1112

        return hidden_states

1113
    def set_input_tensor(self, input_tensor):
1114
1115
1116
1117
1118
1119
1120
        """Set input tensor to be used instead of forward()'s input.

        When doing pipeline parallelism the input from the previous
        stage comes from communication, not from the input, so the
        model's forward_step_func won't have it. This function is thus
        used by internal code to bypass the input provided by the
        forward_step_func"""
1121
1122
        self.input_tensor = input_tensor

1123
    def forward(self, hidden_states, attention_mask,
mshoeybi's avatar
mshoeybi committed
1124
1125
                encoder_output=None, enc_dec_attn_mask=None,
                inference_params=None):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1126
1127
        # hidden_states: [s, b, h]

1128
        # Checks.
mshoeybi's avatar
mshoeybi committed
1129
        if inference_params:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1130
            assert self.recompute_granularity is None, \
1131
                'inference does not work with activation checkpointing'
1132

1133
        if not self.pre_process:
1134
            # See set_input_tensor()
1135
            hidden_states = self.input_tensor
1136

1137
1138
        # Viewless tensor.
        # - We only need to create a viewless tensor in the case of micro batch
1139
1140
1141
1142
        #   size (mbs) == 1, since in this case, 'hidden_states.transpose()'
        #   above creates a view tensor, and '.contiguous()' is a pass-through.
        #   For mbs >= 2, '.contiguous()' creates a new tensor, eliminating
        #   the need to make it viewless.
1143
1144
1145
1146
        #
        #   However, we don't explicitly check mbs == 1 here because
        #   make_viewless_tensor() has negligible overhead when its input
        #   is already viewless.
1147
        #
1148
1149
1150
1151
        # - For the 'else' case above, calling make_viewless_tensor() here is
        #   likely redundant, since p2p_communication.py (likely originator)
        #   already creates viewless tensors. That said, make_viewless_tensor()
        #   is called here to be future-proof and corner-case-proof.
1152
        hidden_states = core.utils.make_viewless_tensor(
1153
            hidden_states,
1154
1155
            requires_grad=True,
            keep_graph=True,
1156
1157
        )

Vijay Korthikanti's avatar
Vijay Korthikanti committed
1158
        if self.sequence_parallel:
1159
            rng_context = tensor_parallel.get_cuda_rng_tracker().fork()
1160
        else:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1161
            rng_context = nullcontext()
Vijay Korthikanti's avatar
Vijay Korthikanti committed
1162
1163

        with rng_context:
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
            # The fp8_autocast context manager is a no-op when enabled=True
            # The if...else serves to short circuit name resolution for fp8_autocast
            with transformer_engine.pytorch.fp8_autocast(
                enabled=self.use_fp8,
                fp8_recipe=self.fp8_recipe,
                fp8_group=self.fp8_group
            ) if self.use_fp8 else nullcontext():
                # Determine if the current iteration is first microbatch
                if self.num_microbatches_in_previous_step != get_num_microbatches():
                    self.microbatch_count = 0 # Reset count on new batch size rampup interval
                self.num_microbatches_in_previous_step = get_num_microbatches()
                is_first_microbatch = self.microbatch_count % get_num_microbatches() == 0

                # Forward pass.
                if self.recompute_granularity == 'full':
                    hidden_states = self._checkpointed_forward(hidden_states,
                                                               attention_mask,
                                                               encoder_output,
                                                               enc_dec_attn_mask,
                                                               is_first_microbatch)
                else:
                    forward_kwargs = {
                        'encoder_output': encoder_output,
                        'enc_dec_attn_mask': enc_dec_attn_mask,
                        'inference_params': inference_params,
                    }

                    if self.transformer_impl == 'transformer_engine':
                        forward_kwargs['is_first_microbatch'] = is_first_microbatch
                        forward_kwargs['checkpoint_core_attention'] = self.checkpoint_core_attention

                    for index in range(self.num_layers):
                        layer = self._get_layer(index)

                        hidden_states = layer(
                            hidden_states,
                            attention_mask,
                            **forward_kwargs)

                # Skip counter update for eval and activation checkpointing
                if torch.is_grad_enabled() and self.training:
                    self.microbatch_count += 1
mshoeybi's avatar
mshoeybi committed
1206

1207
        # Final layer norm.
1208
        if self.post_process and self.post_layer_norm:
1209
1210
            hidden_states = self.final_layernorm(hidden_states)

1211
        return hidden_states