linear.py 43.8 KB
Newer Older
1
# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.

"""Linear API"""
6
import os
7
import logging
8
from typing import Any, Callable, Dict, Optional, Tuple, Union
9
10
11

import torch

12
import transformer_engine_torch as tex
13
14
15
16
17
18
19
20
21

from .base import (
    get_workspace,
    get_ub,
    TransformerEngineBaseModule,
    _2X_ACC_FPROP,
    _2X_ACC_DGRAD,
    _2X_ACC_WGRAD,
)
22
from ._common import _noop_cat
23
from ..fp8 import get_fp8_te_dtype, FP8GlobalStateManager
24
25
26
from ..utils import (
    divide,
    cast_if_needed,
27
    assert_dim_for_fp8_exec,
28
    clear_tensor_data,
29
    init_method_constant,
30
    requires_grad,
31
32
33
34
35
36
37
)
from ..distributed import (
    set_tensor_model_parallel_attributes,
    get_distributed_world_size,
    allreduce,
    reduce_scatter_along_first_dim,
    gather_along_first_dim,
38
39
    is_fp8_activation_recompute_enabled,
    in_fp8_activation_recompute_phase,
40
41
    _fsdp_scatter_tensors,
    _fsdp_gather_tensors,
42
43
44
45
46
47
48
49
)
from ..cpp_extensions import (
    fp8_gemm,
    gemm,
    fp8_cast_transpose_fused,
    cast_to_fp8,
)
from ..constants import GemmParallelModes, dist_group_type
50
from ..jit import no_torch_dynamo
51
from ..graph import is_graph_capturing
52
53
from ..float8_tensor import Float8Tensor

54
# NVTE_DEBUG = 0/1 # disables/enables debug mode, default = 0
55
_NVTE_DEBUG = int(os.getenv("NVTE_DEBUG", "0"))
56
57
58
59
60
61
62
63
# NVTE_DEBUG_LEVEL = 0/1/2 # enables more and more verbose debug mode, default = 0
_NVTE_DEBUG_LEVEL = int(os.getenv("NVTE_DEBUG_LEVEL", "0"))
log_level = _NVTE_DEBUG * _NVTE_DEBUG_LEVEL
log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
logging.basicConfig(
    format="[%(levelname)-8s | %(name)-19s]: %(message)s",
    level=log_levels[log_level if log_level in [0, 1, 2] else 2],
)
64

65
66
67
68
69
70
71
72
73
74
75
__all__ = ["Linear"]


class _Linear(torch.autograd.Function):
    """Linear semi-top level module
    Calls custom cuda extensions.
    """

    @staticmethod
    def forward(
        ctx,
76
        weight: Union[Float8Tensor, torch.Tensor],
77
        weight_fp8: Optional[Float8Tensor],
78
79
80
81
82
83
84
85
        inp: torch.Tensor,
        bias: torch.Tensor,
        use_bias: bool,
        is_first_microbatch: Union[bool, None],
        fp8: bool,
        fp8_calibration: bool,
        fp8_meta: Dict[str, Any],
        fuse_wgrad_accumulation: bool,
86
        cpu_offloading: bool,
87
88
89
90
91
92
93
        tp_group: Union[dist_group_type, None],
        tp_size: int,
        sequence_parallel: bool,
        tensor_parallel: bool,
        activation_dtype: torch.dtype,
        parallel_mode: Union[str, None],
        is_grad_enabled: bool,
94
95
        ub_overlap_rs: bool,
        ub_overlap_ag: bool,
96
        ub_name: str,
97
        is_first_module_in_mha: bool,
98
        fsdp_group: Union[dist_group_type, None],
99
    ) -> torch.Tensor:
100
        logger = logging.getLogger("Linear")
101
102
103
104
        is_input_fp8 = isinstance(inp, Float8Tensor)
        if is_input_fp8:
            fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_INPUT] = inp._scale_inv[0]

105
106
107
        # Make sure input dimensions are compatible
        in_features = weight.shape[-1]
        assert inp.shape[-1] == in_features, "GEMM not possible"
108
        inputmat = inp.view(-1, in_features)
109
        if fp8:
110
111
            assert_dim_for_fp8_exec(inputmat)
            assert_dim_for_fp8_exec(weight)
112

113
114
        tp_world_size = get_distributed_world_size(tp_group)
        ub_overlap_rs = False if tp_world_size == 1 else ub_overlap_rs
115
116

        # Cast input to expected dtype
117
        inputmat = cast_if_needed(inputmat, activation_dtype)
118
        inputmat_t = None
119
        inputmat_no_fp8 = inputmat
120

121
122
        if fp8:
            fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
123
124
125
126
127
128
129
130
131
            if isinstance(inputmat, Float8Tensor):
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat_t = inputmat.transpose_2d()
132
            else:
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat, inputmat_t = fp8_cast_transpose_fused(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
                    )
                else:
                    # FP8 input for forward
                    inputmat = cast_to_fp8(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
                    )
154
155
156
157
158
159
160

        # Column Parallel Linear
        if parallel_mode == "column" and sequence_parallel:
            inputmat_total, _ = gather_along_first_dim(inputmat, tp_group)
        else:
            inputmat_total = inputmat
        if fp8:
161
            logger.debug("Running forward in FP8")
162

163
            bias_dtype = torch.bfloat16 if activation_dtype == torch.float32 else activation_dtype
164
165
            bias = cast_if_needed(bias, bias_dtype) if use_bias else bias

166
167
            # Use FP8 weights
            if weight_fp8 is None:
168
                weight_fp8 = weight
169

170
            assert isinstance(weight_fp8, Float8Tensor)
171

172
173
174
175
176
            if is_first_module_in_mha:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
                    tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_meta["scaling_fwd"],
                    fp8_dtype_forward,
177
178
                    torch.uint8,
                )
179
180
            else:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
181
182
183
184
185
                    None,
                    None,
                    None,
                    activation_dtype,
                )
186

187
            if ub_overlap_rs:
188
                ub_obj_projout = get_ub(ub_name + "_fprop")
189
190
191
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
                dim_size[0] = dim_size[0] // tp_world_size
192
                dim_size[1] = weight_fp8.size(0)
193
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
194
195
                if ub_obj_projout.is_p2p_overlap():
                    if ub_obj_projout.is_atomic_gemm():
196
                        ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_RS_P2P
197
198
199
200
201
202
203
                    else:
                        ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS_P2P
                else:
                    if ub_obj_projout.is_atomic_gemm():
                        ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_RS
                    else:
                        ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS
204
205
206
207
208
209
                if ub_obj_projout.is_fp8_ubuf():
                    proj_out_index = tex.FP8FwdTensors.GEMM1_OUTPUT
                    meta_tensor = fp8_meta["scaling_fwd"]
                    proj_out_tetype = fp8_dtype_forward
                    proj_out_pttype = torch.uint8
                    ub_obj_projout.set_ubuf_scale_inv(meta_tensor.scale_inv[proj_out_index])
210
211
            else:
                dim_size = list(inputmat_total.size())
212
                dim_size[1] = weight_fp8.size(0)
213
                out = torch.empty(dim_size, dtype=proj_out_pttype, device=inputmat_total.device)
214
215

            _ = fp8_gemm(
216
                weight_fp8._data,
217
218
219
                weight_fp8._scale_inv,
                0,
                weight_fp8._fp8_dtype,
220
221
222
223
224
                (
                    inputmat_total._data
                    if isinstance(inputmat_total, Float8Tensor)
                    else inputmat_total
                ),
225
226
227
                fp8_meta["scaling_fwd"].scale_inv,
                tex.FP8FwdTensors.GEMM1_INPUT,
                fp8_dtype_forward,
228
                proj_out_pttype,
229
230
231
232
233
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                use_split_accumulator=_2X_ACC_FPROP,
                out=out,
234
235
236
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
237
                out_index=proj_out_index,
238
239
                fp8_meta_tensor=meta_tensor,
                D_dtype=proj_out_tetype,
240
            )
241
            if is_first_module_in_mha:
242
243
                out = Float8Tensor(
                    data=out,
244
245
246
247
248
249
                    fp8_meta=fp8_meta,
                    fp8_meta_forward=True,
                    fp8_meta_index=tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_dtype=fp8_dtype_forward,
                    dtype=activation_dtype,
                )
250
        else:
251
            logger.debug("Running forward in %s", activation_dtype)
252

253
254
255
256
257
258
            # Cast for native AMP
            weight = cast_if_needed(weight, activation_dtype)
            bias = cast_if_needed(bias, activation_dtype) if use_bias else bias

            if fp8_calibration:
                # amax of input
259
                amin, amax = inputmat_total.aminmax()
260
261
262
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = torch.max(
                    -amin, amax
                ).float()
263
                # amax of weight
264
                amin, amax = weight.aminmax()
265
266
267
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = torch.max(
                    -amin, amax
                ).float()
268

269
            if ub_overlap_rs:
270
                ub_obj_projout = get_ub(ub_name + "_fprop")
271
272
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
273
                dim_size[0] = dim_size[0] // get_distributed_world_size(tp_group)
274
275
                dim_size[1] = weight.size(0)
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
276
277
278
279
                if ub_obj_projout.is_p2p_overlap():
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS_P2P
                else:
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS
280
281
282
283
284
            else:
                dim_size = list(inputmat_total.size())
                dim_size[1] = weight.size(0)
                out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)

285
            _ = gemm(
286
287
288
289
290
291
292
                weight,
                inputmat_total,
                activation_dtype,
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                out=out,
293
294
295
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
296
297
298
            )

        if is_grad_enabled:
299
300
301
302
303
304
305
306
            saved_inputmat = None
            saved_inputmat_t = None
            if weight.requires_grad:
                if fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad:
                    if inputmat_t is None:
                        saved_inputmat = inputmat
                    else:
                        saved_inputmat_t = inputmat_t
307
308
                        if cpu_offloading:
                            saved_inputmat_t.activation_offloading = True
309
310
                else:
                    saved_inputmat = inputmat_no_fp8
311
312

                if cpu_offloading:
313
314
                    if fp8 and weight_fp8 is not None:
                        weight_fp8.weight_offloading = True
315
316
317
318
319
                    weight.weight_offloading = True

                    if saved_inputmat is not None:
                        saved_inputmat.activation_offloading = True

320
321
322
323
324
            # Scatter intermediate/activation tensors saved for the backward pass
            # NOTE: FSDP sharding is not valid for models initialized with primary Fp8 weights
            ctx.fsdp_group = fsdp_group
            ctx.fsdp_shapes = _fsdp_scatter_tensors(
                fsdp_group,
325
326
                saved_inputmat,  # None if fp8 == False
                saved_inputmat_t,  # None if fp8 == False AND not is_grad_enabled
327
328
329
                weight_fp8 if fp8 and not isinstance(weight, Float8Tensor) else None,
            )

330
            ctx.save_for_backward(
331
332
                saved_inputmat,
                saved_inputmat_t,
333
                weight,
334
                weight_fp8,
335
                weight.main_grad if cpu_offloading and fuse_wgrad_accumulation else None,
336
337
                fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None,
            )
338

339
340
341
342
            ctx.activation_dtype = activation_dtype
            ctx.fp8 = fp8
            ctx.fp8_meta = fp8_meta
            ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
343
            ctx.cpu_offloading = cpu_offloading
344
345
346
347
348
349
350
            ctx.is_first_microbatch = is_first_microbatch
            ctx.use_bias = use_bias
            ctx.sequence_parallel = sequence_parallel
            ctx.tensor_parallel = tensor_parallel
            ctx.inp_shape = inp.shape
            ctx.parallel_mode = parallel_mode
            ctx.tp_group = tp_group
351
            ctx.ub_overlap_ag = ub_overlap_ag
352
            ctx.ub_name = ub_name
353
354
            ctx.tp_size = tp_size
            ctx.requires_dgrad = inp.requires_grad
355
            ctx.is_input_fp8 = is_input_fp8
356
357
358
            ctx.reduce_and_update_bwd_fp8_tensors = False
            if ctx.fp8 and requires_grad(inp, weight, bias):
                ctx.reduce_and_update_bwd_fp8_tensors = (
359
360
361
                    ctx.reduce_and_update_bwd_fp8_tensors
                    or FP8GlobalStateManager.is_first_fp8_module()
                )
362
363

        # Row Parallel Linear
364
        if ub_overlap_rs:
365
366
367
368
369
370
371
372
373
374
            out = rs_out
        elif parallel_mode == "row" and sequence_parallel:
            out, _ = reduce_scatter_along_first_dim(out, tp_group)
        elif parallel_mode == "row" and tensor_parallel:
            out, _ = allreduce(out, tp_group)

        # [*, in_features] -> [*, out_features] except first dimension changes for SP
        return out.view(-1, *inp.shape[1:-1], out.shape[-1])

    @staticmethod
375
    def backward(ctx, grad_output: torch.Tensor) -> Tuple[Union[torch.Tensor, None], ...]:
376
        logger = logging.getLogger("Linear")
377
        if isinstance(grad_output, Float8Tensor):
378
            ctx.fp8_meta["scaling_bwd"].scale_inv[
379
380
                tex.FP8BwdTensors.GRAD_OUTPUT1
            ] = grad_output._scale_inv
381

382
        with torch.cuda.nvtx.range("_Linear_backward"):
383
384
385
386
            (
                inputmat,
                inputmat_t,
                weight,
387
                weight_fp8,
388
                main_grad,
389
390
                fwd_scale_inverses,
            ) = ctx.saved_tensors
391

392
393
394
395
396
397
398
399
            # Gather intermediate/activation tensors if needed
            # NOTE: weight_fp8 = weight when ctx.fp8 == False and torch.disttributed.FSDP already
            #       shards/unshards the base weights so we don't do it ourselves
            _fsdp_gather_tensors(
                ctx.fsdp_group,
                ctx.fsdp_shapes,
                inputmat,
                inputmat_t,
400
401
                weight_fp8 if ctx.fp8 and not isinstance(weight, Float8Tensor) else None,
            )
402

403
            if ctx.cpu_offloading and ctx.fuse_wgrad_accumulation:
404
                weight = torch.nn.Parameter(weight.requires_grad)
405
406
                weight.main_grad = main_grad

407
408
409
            tp_world_size = get_distributed_world_size(ctx.tp_group)
            ctx.ub_overlap_ag = False if tp_world_size == 1 else ctx.ub_overlap_ag
            if ctx.ub_overlap_ag:
410
411
                dim_size = list(grad_output.size())
                dim_size[0] = dim_size[0] * tp_world_size
412
                ctx.ub_obj_gradout = get_ub(ctx.ub_name + "_dgrad")
413
414
415
416
                if ctx.ub_obj_gradout.is_atomic_gemm():
                    ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_AG_P2P
                else:
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG_P2P
417

418
419
420
421
422
423
424
425
426
427
428
            (
                grad_output,
                grad_output_c,
                grad_output_t,
                grad_bias,
            ) = TransformerEngineBaseModule.grad_output_preprocess(
                ctx, grad_output, ctx.parallel_mode == "row"
            )

            # Column Parallel Linear
            # Overlap input AG with dgrad
429
430
431
            inputmat_total = None
            inputmat_t_total = None
            handle = None
432
            if weight.requires_grad and ctx.parallel_mode == "column" and ctx.sequence_parallel:
433
434
435
                inputmat_total, handle = gather_along_first_dim(
                    inputmat, ctx.tp_group, async_op=ctx.requires_dgrad
                )
436
437
            else:
                inputmat_total = inputmat
438
                inputmat_t_total = inputmat_t
439
440
441
442
443
444
445
446
447

            if ctx.is_first_microbatch is not None:
                accumulate_wgrad_into_param_main_grad = (
                    ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
                )
            else:
                accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation

            if ctx.fp8:
448
449
                fp8_dtype_forward = get_fp8_te_dtype(ctx.fp8_meta["recipe"], fprop_tensor=True)
                fp8_dtype_backward = get_fp8_te_dtype(ctx.fp8_meta["recipe"], fprop_tensor=False)
450
451
452

            if ctx.requires_dgrad:
                if ctx.fp8:
453
                    logger.debug("Running backward in FP8")
454
455
456
457
458
459

                    if ctx.is_input_fp8:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
                            tex.FP8BwdTensors.GRAD_INPUT1,
                            ctx.fp8_meta["scaling_bwd"],
                            fp8_dtype_backward,
460
461
                            torch.uint8,
                        )
462
463
                    else:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
464
465
466
467
468
                            None,
                            None,
                            None,
                            ctx.activation_dtype,
                        )
469
                    dgrad, _ = fp8_gemm(
470
471
472
473
                        weight_fp8.transpose_2d(),
                        weight_fp8._scale_inv,
                        0,
                        weight_fp8._fp8_dtype,
474
475
476
477
                        grad_output_c,
                        ctx.fp8_meta["scaling_bwd"].scale_inv,
                        tex.FP8BwdTensors.GRAD_OUTPUT1,
                        fp8_dtype_backward,
478
                        output_dtype,
479
480
                        get_workspace(),
                        use_split_accumulator=_2X_ACC_DGRAD,
481
482
                        ub_algo=ub_algo if ctx.ub_overlap_ag else None,
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
483
484
485
                        out_index=out_index,
                        fp8_meta_tensor=meta_tensor,
                        D_dtype=output_te_dtype,
486
                    )
487
                    if output_dtype == torch.uint8:
488
489
                        dgrad = Float8Tensor(
                            data=dgrad,
490
491
492
493
494
                            fp8_meta=ctx.fp8_meta,
                            fp8_meta_forward=False,
                            fp8_meta_index=tex.FP8BwdTensors.GRAD_INPUT1,
                            fp8_dtype=fp8_dtype_backward,
                            dtype=ctx.activation_dtype,
495
                        )
496
                else:
497
                    logger.debug("Running backward in %s", ctx.activation_dtype)
498

499
500
501
502
503
504
505
                    dgrad, _, _ = gemm(
                        weight,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NN",
                        grad=True,
506
507
508
509
510
                        ub_algo=(
                            tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG_P2P
                            if ctx.ub_overlap_ag
                            else None
                        ),
511
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
512
513
514
515
                    )

                # Overlap dgrad-RS/AR with wgrad
                if ctx.parallel_mode == "column" and ctx.sequence_parallel:
516
517
                    if handle is not None:
                        handle.wait()
518
519
520
521
522
523
524
525
526
527
                    dgrad, handle = reduce_scatter_along_first_dim(
                        dgrad, ctx.tp_group, async_op=True
                    )
                elif ctx.parallel_mode == "column" and ctx.tensor_parallel:
                    dgrad, handle = allreduce(dgrad, ctx.tp_group, async_op=True)

            if weight.requires_grad:
                if ctx.fp8:
                    # WGRAD
                    if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
528
                        if ctx.ub_overlap_ag:
529
530
531
532
                            if isinstance(grad_output_c, Float8Tensor):
                                grad_output_t = grad_output_c.transpose_2d()
                            else:
                                grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
533
                        if inputmat_t_total is None:
534
535
536
537
                            if isinstance(inputmat_total, Float8Tensor):
                                inputmat_t_total = inputmat_total.transpose_2d()
                            else:
                                inputmat_t_total = tex.fp8_transpose(
538
539
                                    inputmat_total, fp8_dtype_backward
                                )
540
                        wgrad, _ = fp8_gemm(
541
542
543
544
545
                            (
                                inputmat_t_total._data
                                if isinstance(inputmat_t_total, Float8Tensor)
                                else inputmat_t_total
                            ),
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
                            fwd_scale_inverses,
                            tex.FP8FwdTensors.GEMM1_INPUT,
                            fp8_dtype_forward,
                            grad_output_t,
                            ctx.fp8_meta["scaling_bwd"].scale_inv,
                            tex.FP8BwdTensors.GRAD_OUTPUT1,
                            fp8_dtype_backward,
                            ctx.activation_dtype,
                            get_workspace(),
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                            use_split_accumulator=_2X_ACC_WGRAD,
                        )
                    else:
                        wgrad, _, _ = gemm(
                            inputmat_total,
                            grad_output,
                            ctx.activation_dtype,
                            get_workspace(),
                            layout="NT",
                            grad=True,
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                        )
                else:
                    # WGRAD
                    wgrad, grad_bias, _ = gemm(
                        inputmat_total,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NT",
                        grad=True,
                        use_bias=ctx.use_bias,
                        accumulate=accumulate_wgrad_into_param_main_grad,
                        out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                    )
583
584
585
586

                # Deallocate input tensor
                clear_tensor_data(inputmat_total)
                clear_tensor_data(inputmat_t_total)
587
588
589
590
591
592
593
594

            # Column Parallel Linear
            if ctx.parallel_mode == "column" and ctx.tensor_parallel and handle is not None:
                handle.wait()

            if not ctx.use_bias:
                grad_bias = None

595
596
        if weight.requires_grad:
            # Handle custom DDP from mcore.
597
            if ctx.fuse_wgrad_accumulation and hasattr(weight, "grad_added_to_main_grad"):
598
                weight.grad_added_to_main_grad = True
599
600
601
602
603
604
605
                if getattr(weight, "zero_out_wgrad", False):
                    wgrad = torch.zeros(
                        weight.main_grad.shape,
                        dtype=weight.dtype,
                        device=torch.cuda.current_device(),
                        requires_grad=False,
                    )
606
                else:
607
608
609
610
611
612
                    wgrad = torch.empty(
                        weight.main_grad.shape,
                        dtype=weight.dtype,
                        device=torch.cuda.current_device(),
                        requires_grad=False,
                    )
613
614
615
616
            elif ctx.fuse_wgrad_accumulation:
                wgrad = None
        else:
            wgrad = None
617

618
        if ctx.reduce_and_update_bwd_fp8_tensors and not is_graph_capturing():
619
620
            FP8GlobalStateManager.reduce_and_update_fp8_tensors(forward=False)

621
622
623
624
        # Scatter fp8 weight buffers
        if ctx.fp8 and not isinstance(weight, Float8Tensor):
            _fsdp_scatter_tensors(ctx.fsdp_group, weight_fp8)

625
        return (
626
            wgrad,
627
            None,  # weight_fp8
628
629
            dgrad.view(ctx.inp_shape) if ctx.requires_dgrad else None,
            grad_bias,
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
            None,  # use_bias
            None,  # is_first_microbatch
            None,  # fp8
            None,  # fp8_calibration
            None,  # fp8_meta
            None,  # fuse_wgrad_accumulation
            None,  # cpu_offloading
            None,  # tp_group
            None,  # tp_size
            None,  # sequence_parallel
            None,  # tensor_parallel
            None,  # activation_dtype
            None,  # parallel_mode
            None,  # is_grad_enabled
            None,  # ub_overlap_rs
            None,  # ub_overlap_ag
            None,  # ub_name
            None,  # is_first_module_in_mha
648
            None,  # fsdp_group
649
650
651
652
        )


class Linear(TransformerEngineBaseModule):
653
    """Applies a linear transformation to the incoming data :math:`y = xA^T + b`
654
655
656
657
658
659
660
661
662
663
664
665
666
667

    On NVIDIA GPUs it is a drop-in replacement for `torch.nn.Linear`.

    Parameters
    ----------
    in_features : int
                 size of each input sample.
    out_features : int
                  size of each output sample.
    bias : bool, default = `True`
          if set to `False`, the layer will not learn an additive bias.
    init_method : Callable, default = `None`
                 used for initializing weights in the following way: `init_method(weight)`.
                 When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
668
669
670
671
    get_rng_state_tracker : Callable, default = `None`
                 used to get the random number generator state tracker for initilizeing weights.
    rng_tracker_name : str, default = `None`
                 the param passed to get_rng_state_tracker to get the specific rng tracker.
cyanguwa's avatar
cyanguwa committed
672
    parameters_split : Optional[Union[Tuple[str, ...], Dict[str, int]]], default = None
673
674
675
676
677
678
679
                      Configuration for splitting the weight and bias tensors along dim 0 into
                      multiple PyTorch parameters. If a list or tuple of strings is provided,
                      they are used to make the names of equally-sized parameters. If a dict
                      (preferably an OrderedDict) is provided, the keys are used as names and
                      values as split sizes along dim 0. The resulting parameters will have
                      names that end in `_weight` or `_bias`, so trailing underscores are
                      stripped from any provided names.
680
681
682
683
    device : Union[torch.device, str], default = "cuda"
          The device on which the parameters of the model will allocated. It is the user's
          responsibility to ensure all parameters are moved to the GPU before running the
          forward pass.
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714

    Parallelism parameters
    ----------------------
    sequence_parallel : bool, default = `False`
                       if set to `True`, uses sequence parallelism.
    tp_group : ProcessGroup, default = `None`
              tensor parallel process group.
    tp_size : int, default = 1
             used as TP (tensor parallel) world size when TP groups are not formed during
             initialization. In this case, users must call the
             `set_tensor_parallel_group(tp_group)` method on the initialized module before the
             forward pass to supply the tensor parallel group needed for tensor and sequence
             parallel collectives.
    parallel_mode : {None, 'Column', 'Row'}, default = `None`
                   used to decide whether this Linear layer is Column Parallel Linear or Row
                   Parallel Linear as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
                   When set to `None`, no communication is performed.

    Optimization parameters
    -----------------------
    fuse_wgrad_accumulation : bool, default = 'False'
                             if set to `True`, enables fusing of creation and accumulation of
                             the weight gradient. When enabled, it is assumed that the weights
                             have an additional `main_grad` attribute (used instead of the
                             regular `grad`) which is a pre-allocated buffer of the correct
                             size to accumulate gradients in.
    return_bias : bool, default = `False`
                 when set to `True`, this module will not apply the additive bias itself, but
                 instead return the bias value during the forward pass together with the
                 output of the linear transformation :math:`y = xA^T`. This is useful when
                 the bias addition can be fused to subsequent operations.
715
    params_dtype : torch.dtype, default = `torch.get_default_dtype()`
716
717
718
                  it controls the type used to allocate the initial parameters. Useful when
                  the model is trained with lower precision and the original FP32 parameters
                  would not fit in GPU memory.
719

720
721
722
723
724
725
726
727
728
729
730
    """

    def __init__(
        self,
        in_features: int,
        out_features: int,
        sequence_parallel: bool = False,
        fuse_wgrad_accumulation: bool = False,
        tp_group: Optional[dist_group_type] = None,
        tp_size: int = 1,
        get_rng_state_tracker: Optional[Callable] = None,
731
        rng_tracker_name: Optional[str] = None,
732
733
734
        init_method: Optional[Callable] = None,
        bias: bool = True,
        return_bias: bool = False,
735
        params_dtype: Optional[torch.dtype] = None,
736
        parallel_mode: Optional[str] = None,
cyanguwa's avatar
cyanguwa committed
737
        parameters_split: Optional[Union[Tuple[str, ...], Dict[str, int]]] = None,
738
        device: Union[torch.device, str] = "cuda",
739
740
        ub_overlap_rs: bool = False,
        ub_overlap_ag: bool = False,
741
        ub_name: Optional[str] = None,
742
743
    ) -> None:
        super().__init__()
744
745

        params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
746
747
748
749
750
751
        self.in_features = in_features
        self.out_features = out_features
        self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
        self.use_bias = bias
        self.return_bias = return_bias
        self.apply_bias = bias and not return_bias
752
753
754
        self.ub_overlap_rs = ub_overlap_rs
        self.ub_overlap_ag = ub_overlap_ag
        if ub_overlap_rs or ub_overlap_ag:
755
756
            assert ub_name is not None, "Userbuffer name [string] is not set."
        self.ub_name = ub_name
757
        self.get_rng_state_tracker = get_rng_state_tracker
758
759
        self.rng_tracker_name = rng_tracker_name

760
761
        if device == "meta":
            assert parameters_split is None, "Cannot split module parameters on 'meta' device."
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
        if tp_group is None:
            self.tp_size = tp_size
            if tp_size == 1:
                self.set_tensor_parallel_group(tp_group)
        else:
            self.tp_size = get_distributed_world_size(tp_group)
            self.set_tensor_parallel_group(tp_group)
        self.set_nccl_overlap_warning_if_tp()

        self.parallel_mode = parallel_mode
        assert (
            self.parallel_mode in GemmParallelModes
        ), f"parallel_mode {parallel_mode} not supported"

        if self.parallel_mode == "column":
            self.out_features = divide(self.out_features, self.tp_size)
        elif self.parallel_mode == "row":
            self.in_features = divide(self.in_features, self.tp_size)

        self.sequence_parallel = (self.tp_size > 1) and sequence_parallel

783
784
785
        # Initialize params in FP8
        with_fp8_params = FP8GlobalStateManager.with_fp8_parameters()

786
787
788
789
790
791
792
793
        # Contiguous buffers for params
        weight_tensor = torch.empty(
            self.out_features,
            self.in_features,
            device=device,
            dtype=params_dtype,
        )
        bias_tensor = None
794
        if self.use_bias:
795
796
797
798
799
            bias_tensor = torch.empty(
                self.out_features,
                device=device,
                dtype=params_dtype,
            )
800

801
802
803
804
        # Configure parameter splits
        self.weight_names = []
        self.bias_names = []
        self.parameter_split_sizes = []
805
        if parameters_split is None:
806
807
808
809
810
811
            # Split into a single parameter by default
            self.weight_names = ["weight"]
            self.bias_names = ["bias"]
            self.parameter_split_sizes = [out_features]
        elif not parameters_split:
            raise ValueError("Cannot split weight buffer into 0 parameters")
cyanguwa's avatar
cyanguwa committed
812
        elif isinstance(parameters_split, dict):
813
814
815
816
817
818
819
820
821
822
823
824
            # Split parameters with provided sizes
            for name, split_size in parameters_split.items():
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
        elif all(isinstance(name, str) for name in parameters_split):
            # Split parameters evenly
            split_size = out_features // len(parameters_split)
            for name in parameters_split:
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
cyanguwa's avatar
cyanguwa committed
825
        else:
826
            raise TypeError("Invalid configuration for parameters split")
827

828
829
830
831
832
833
        # Make sure parameter splits are valid
        if sum(self.parameter_split_sizes) != out_features:
            raise ValueError(
                f"Trying to split weight buffer ({out_features=}) "
                f"with split sizes {self.parameter_split_sizes}"
            )
834

835
836
837
838
839
840
841
842
843
844
        # Adjust parameter splits for tensor-parallel distribution
        if self.parallel_mode == "column":
            for i, size in enumerate(self.parameter_split_sizes):
                if size % self.tp_size != 0:
                    raise RuntimeError(
                        f"Attempting to distribute a parameter with out_features={size} "
                        f"between {self.tp_size} tensor-parallel processes"
                    )
                self.parameter_split_sizes[i] = size // self.tp_size

845
846
847
848
849
        # Construct weight parameters
        # Note: Register weights together so that they are adjacent to
        # each other in Linear.parameters(). This makes it more likely
        # that they will stay contiguous if the weights are
        # manipulated externally, e.g. by FSDP.
850
851
852
853
854
855
856
857
        offset = 0
        for i, split_size in enumerate(self.parameter_split_sizes):
            split_start = offset
            offset += split_size
            split_end = offset

            # Check if parameters are subviews of buffers
            is_subview = (split_start, split_end) != (0, self.out_features)
858
            if is_subview and with_fp8_params:
859
                raise RuntimeError("Splitting Float8Tensor into multiple params is not supported")
860

861
            # Construct weight parameter
862
863
864
865
866
867
868
            self.register_parameter(
                self.weight_names[i],
                torch.nn.Parameter(weight_tensor[split_start:split_end]),
                init_fn=init_method,
                get_rng_state_tracker=get_rng_state_tracker,
                fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
            )
869

870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
        # Construct bias parameters if needed
        if self.use_bias:
            offset = 0
            for i, split_size in enumerate(self.parameter_split_sizes):
                split_start = offset
                offset += split_size
                split_end = offset
                self.register_parameter(
                    self.bias_names[i],
                    torch.nn.Parameter(bias_tensor[split_start:split_end]),
                    init_fn=init_method_constant(0.0),
                )
        else:
            for name in self.bias_names:
                bias = torch.Tensor().to(dtype=params_dtype, device=device)
                setattr(self, name, bias)
cyanguwa's avatar
cyanguwa committed
886

887
        if with_fp8_params:
888
889
            self.init_fp8_metadata()

890
        self.reset_parameters(defer_init=(device == "meta"))
891

892
893
894
895
896
897
898
        # For RPL, bias has to be added after TP collectives
        # So it cannot be fused with the GEMM
        if self.parallel_mode == "row" and self.apply_bias:
            self.gemm_bias_unfused_add = True
        else:
            self.gemm_bias_unfused_add = False

899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
    def reset_parameters(self, defer_init=False):
        super().reset_parameters(defer_init=defer_init)

        if not defer_init:
            # Set parallelism attributes for linear weights
            for weight in self.weight_names:
                set_tensor_model_parallel_attributes(
                    tensor=getattr(self, weight),
                    is_parallel=True,
                    dim=1 if self.parallel_mode == "row" else 0,
                    stride=1,
                )

            # Set parallelism attributes for linear biases
            if self.use_bias:
                for bias in self.bias_names:
                    if self.parallel_mode == "row":
                        setattr(getattr(self, bias), "sequence_parallel", self.sequence_parallel)
                    elif self.parallel_mode == "column":
                        set_tensor_model_parallel_attributes(getattr(self, bias), True, 0, 1)

920
    @no_torch_dynamo()
921
922
923
924
    def forward(
        self,
        inp: torch.Tensor,
        is_first_microbatch: Optional[bool] = None,
925
        is_first_module_in_mha: Optional[bool] = False,
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
    ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
        """
        Apply the linear transformation to the input.

        Parameters
        ----------
        inp : torch.Tensor
             Input tensor.
        is_first_microbatch : {True, False, None}, default = None
                             During training using either gradient accumulation or
                             pipeline parallelism a minibatch of data is further split
                             into microbatches. Between the microbatches of the same minibatch
                             the model weights are not updated. Setting this parameter indicates
                             whether the current microbatch is the first in a minibatch or not.
                             When set, this parameter enables additional optimizations:

                             * during FP8 training, it allows caching of the FP8 versions of
                               the weights
                             * it also allows skipping gradient accumulation during the
                               first microbatch (since it is the first gradient being
                               produced)
        """

949
950
951
952
        skip_fp8_weight_update = FP8GlobalStateManager.get_skip_fp8_weight_update_tensor()
        if skip_fp8_weight_update is not None:
            is_first_microbatch = False

953
954
        with self.prepare_forward(
            inp,
955
            is_first_microbatch,
956
            allow_non_contiguous=isinstance(inp, Float8Tensor),
957
        ) as inp:
958

959
960
            is_first_module_in_mha = is_first_module_in_mha and self.fp8_meta["recipe"].fp8_mha

961
            # Get concatenated weight and bias tensors
962
963
964
965
966
            unfused_weights = [getattr(self, name) for name in self.weight_names]
            if any(isinstance(w, Float8Tensor) for w in unfused_weights):
                if self.fp8:
                    if len(unfused_weights) != 1:
                        raise RuntimeError(
967
                            "Splitting Float8Tensor into multiple params is not supported"
968
969
970
971
                        )
                else:
                    unfused_weights = [w.from_float8() for w in unfused_weights]
            weight_tensor = _noop_cat(unfused_weights)
972
973
974
            if self.use_bias:
                bias_tensor = _noop_cat(
                    [getattr(self, name) for name in self.bias_names],
975
976
                )
            else:
977
                bias_tensor = getattr(self, self.bias_names[0])  # Unused
978

979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
            # Initialize FP8 weights if needed
            weight_fp8 = None
            if self.fp8:
                with_transpose = torch.is_grad_enabled()
                if (
                    not with_transpose
                    and is_fp8_activation_recompute_enabled()
                    and not in_fp8_activation_recompute_phase()
                ):
                    with_transpose = True
                if isinstance(weight_tensor, Float8Tensor):
                    # Fill transpose cache in FP8 tensor if needed
                    update_transpose_cache = with_transpose
                    if update_transpose_cache:
                        update_transpose_cache = (
994
                            is_first_microbatch or skip_fp8_weight_update is not None
995
996
997
998
999
1000
1001
1002
                        )
                    if update_transpose_cache:
                        weight_tensor.transpose_2d(
                            fill_cache=True,
                            noop_flag=skip_fp8_weight_update,
                        )
                else:
                    # FP8 cast to workspace buffer
1003
                    update_workspace = is_first_microbatch is None or is_first_microbatch
1004
1005
1006
1007
1008
1009
1010
1011
                    weight_fp8 = self.get_fp8_workspace(
                        tensor=weight_tensor,
                        fp8_meta_forward=True,
                        fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
                        cache_name=(None if is_first_microbatch is None else "weight"),
                        update_workspace=update_workspace,
                        skip_update_flag=skip_fp8_weight_update,
                        with_transpose=with_transpose,
1012
                        fsdp_group=self.fsdp_group,
1013
                    )
1014

1015
1016
            from ..cpu_offload import CPUOffloadEnabled

1017
1018
1019
1020
1021
1022
1023
1024
            if torch.is_grad_enabled():
                linear_fn = _Linear.apply
                args = []
            else:
                linear_fn = _Linear.forward
                args = [None]
            args += (
                weight_tensor,
1025
                weight_fp8,
1026
1027
1028
1029
1030
1031
1032
1033
                inp,
                bias_tensor,
                self.apply_bias and not self.gemm_bias_unfused_add,
                is_first_microbatch,
                self.fp8,
                self.fp8_calibration,
                self.fp8_meta,
                self.fuse_wgrad_accumulation,
1034
                CPUOffloadEnabled,
1035
1036
1037
1038
1039
1040
1041
                self.tp_group,
                self.tp_size,
                self.sequence_parallel,
                self.tp_size > 1,
                self.activation_dtype,
                self.parallel_mode,
                torch.is_grad_enabled(),
1042
1043
                self.ub_overlap_rs,
                self.ub_overlap_ag,
1044
                self.ub_name,
1045
                is_first_module_in_mha,
1046
                self.fsdp_group,
1047
1048
1049
1050
1051
1052
1053
1054
1055
            )
            out = linear_fn(*args)

        if self.gemm_bias_unfused_add:
            out = out + cast_if_needed(bias_tensor, self.activation_dtype)

        if self.return_bias:
            return out, cast_if_needed(bias_tensor, self.activation_dtype)
        return out