linear.py 43.1 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.

"""Linear API"""
6
from typing import Any, Callable, Dict, Optional, Tuple, Union
7
8
9

import torch

10
import transformer_engine_torch as tex
11
12
13
14
15
16
17
18
19

from .base import (
    get_workspace,
    get_ub,
    TransformerEngineBaseModule,
    _2X_ACC_FPROP,
    _2X_ACC_DGRAD,
    _2X_ACC_WGRAD,
)
20
from ._common import _noop_cat
21
from ..fp8 import get_fp8_te_dtype, FP8GlobalStateManager
22
23
24
from ..utils import (
    divide,
    cast_if_needed,
25
    assert_dim_for_fp8_exec,
26
    clear_tensor_data,
27
    init_method_constant,
28
    requires_grad,
29
30
31
32
33
34
35
)
from ..distributed import (
    set_tensor_model_parallel_attributes,
    get_distributed_world_size,
    allreduce,
    reduce_scatter_along_first_dim,
    gather_along_first_dim,
36
    in_fp8_activation_recompute_phase,
37
38
    _fsdp_scatter_tensors,
    _fsdp_gather_tensors,
39
40
41
42
43
44
45
46
)
from ..cpp_extensions import (
    fp8_gemm,
    gemm,
    fp8_cast_transpose_fused,
    cast_to_fp8,
)
from ..constants import GemmParallelModes, dist_group_type
47
from ..jit import no_torch_dynamo
48
from ..graph import is_graph_capturing
49
from ..float8_tensor import Float8Tensor
50
from ..export import is_in_onnx_export_mode
51
from ..tensor import QuantizedTensor
52
from ..cpu_offload import is_cpu_offload_enabled
53

54
55
56
57
58
59
60
61
62
63
64
__all__ = ["Linear"]


class _Linear(torch.autograd.Function):
    """Linear semi-top level module
    Calls custom cuda extensions.
    """

    @staticmethod
    def forward(
        ctx,
65
        weight: Union[Float8Tensor, torch.Tensor],
66
        weight_fp8: Optional[Float8Tensor],
67
68
69
70
71
72
73
74
        inp: torch.Tensor,
        bias: torch.Tensor,
        use_bias: bool,
        is_first_microbatch: Union[bool, None],
        fp8: bool,
        fp8_calibration: bool,
        fp8_meta: Dict[str, Any],
        fuse_wgrad_accumulation: bool,
75
        cpu_offloading: bool,
76
77
78
79
80
81
82
        tp_group: Union[dist_group_type, None],
        tp_size: int,
        sequence_parallel: bool,
        tensor_parallel: bool,
        activation_dtype: torch.dtype,
        parallel_mode: Union[str, None],
        is_grad_enabled: bool,
83
84
        ub_overlap_rs: bool,
        ub_overlap_ag: bool,
85
        ub_name: str,
86
        fp8_output: bool,
87
        fsdp_group: Union[dist_group_type, None],
88
    ) -> torch.Tensor:
89
        # pylint: disable=missing-function-docstring
90
91
        is_input_fp8 = isinstance(inp, Float8Tensor)

92
        # Make sure input dimensions are compatible
93
94
95
        out_features, in_features = weight.shape
        inp_shape = inp.shape
        assert inp_shape[-1] == in_features, "GEMM not possible"
96
        inputmat = inp.view(-1, in_features)
97
        if fp8:
98
99
            assert_dim_for_fp8_exec(inputmat)
            assert_dim_for_fp8_exec(weight)
100

101
102
        tp_world_size = get_distributed_world_size(tp_group)
        ub_overlap_rs = False if tp_world_size == 1 else ub_overlap_rs
103
104

        # Cast input to expected dtype
105
        inputmat = cast_if_needed(inputmat, activation_dtype)
106
        inputmat_t = None
107
        inputmat_no_fp8 = inputmat
108
        inputmat_scale_inv = None
109

110
111
        if fp8:
            fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
112
            if isinstance(inputmat, Float8Tensor):
113
                inputmat_scale_inv = inputmat._scale_inv
114
            else:
115
                inputmat_scale_inv = torch.empty([1], dtype=torch.float32, device=inputmat.device)
116
117
118
119
120
121
122
123
124
125
126
127
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat, inputmat_t = fp8_cast_transpose_fused(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
128
                        scale_inv=inputmat_scale_inv,
129
130
131
132
133
134
135
136
                    )
                else:
                    # FP8 input for forward
                    inputmat = cast_to_fp8(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
137
                        scale_inv=inputmat_scale_inv,
138
                    )
139

140
141
142
143
144
145
146
147
148
149
150
151
            # Hack for ONNX export
            # Note: ONNX models are represented as a graph of tensor
            # operations, so the in-place scale-inv update doesn't fit
            # very well. We work around this by making it look like
            # the scale-inv tensor is initialized with a copy.
            # Note: ONNX export expects FP8 scales can be represented
            # with constant ops. However, copying into a buffer
            # involves an expand op for array broadcasting. We work
            # around this by filling the buffer instead.
            if is_in_onnx_export_mode():
                inputmat_scale_inv.fill_(inputmat_scale_inv.item())

152
153
154
155
156
157
        # Column Parallel Linear
        if parallel_mode == "column" and sequence_parallel:
            inputmat_total, _ = gather_along_first_dim(inputmat, tp_group)
        else:
            inputmat_total = inputmat
        if fp8:
158
            bias_dtype = torch.bfloat16 if activation_dtype == torch.float32 else activation_dtype
159
160
            bias = cast_if_needed(bias, bias_dtype) if use_bias else bias

161
162
            # Use FP8 weights
            if weight_fp8 is None:
163
                weight_fp8 = weight
164

165
            assert isinstance(weight_fp8, Float8Tensor)
166

167
            if fp8_output:
168
169
170
171
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
                    tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_meta["scaling_fwd"],
                    fp8_dtype_forward,
172
173
                    torch.uint8,
                )
174
175
            else:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
176
177
178
179
180
                    None,
                    None,
                    None,
                    activation_dtype,
                )
181

182
183
            ub_algo = None
            rs_out = None
184
            if ub_overlap_rs:
185
                ub_obj_projout = get_ub(ub_name + "_fprop")
186
187
188
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
                dim_size[0] = dim_size[0] // tp_world_size
189
                dim_size[1] = out_features
190
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
191
192
                if ub_obj_projout.is_p2p_overlap():
                    if ub_obj_projout.is_atomic_gemm():
193
                        ub_algo = tex.CommOverlapAlgo.ATOMIC_GEMM_RS_P2P
194
                    else:
195
                        ub_algo = tex.CommOverlapAlgo.SPLIT_PIPELINED_RS_P2P
196
197
                else:
                    if ub_obj_projout.is_atomic_gemm():
198
                        ub_algo = tex.CommOverlapAlgo.ATOMIC_GEMM_RS
199
                    else:
200
                        ub_algo = tex.CommOverlapAlgo.SPLIT_PIPELINED_RS
201
202
203
204
205
206
                if ub_obj_projout.is_fp8_ubuf():
                    proj_out_index = tex.FP8FwdTensors.GEMM1_OUTPUT
                    meta_tensor = fp8_meta["scaling_fwd"]
                    proj_out_tetype = fp8_dtype_forward
                    proj_out_pttype = torch.uint8
                    ub_obj_projout.set_ubuf_scale_inv(meta_tensor.scale_inv[proj_out_index])
207
208
            else:
                dim_size = list(inputmat_total.size())
209
                dim_size[1] = out_features
210
                out = torch.empty(dim_size, dtype=proj_out_pttype, device=inputmat_total.device)
211
212

            _ = fp8_gemm(
213
                weight_fp8._data,
214
215
216
                weight_fp8._scale_inv,
                0,
                weight_fp8._fp8_dtype,
217
218
219
220
221
                (
                    inputmat_total._data
                    if isinstance(inputmat_total, Float8Tensor)
                    else inputmat_total
                ),
222
223
                inputmat_scale_inv,
                0,
224
                fp8_dtype_forward,
225
                proj_out_pttype,
226
227
228
229
230
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                use_split_accumulator=_2X_ACC_FPROP,
                out=out,
231
232
233
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
234
                out_index=proj_out_index,
235
236
                fp8_meta_tensor=meta_tensor,
                D_dtype=proj_out_tetype,
237
            )
238
            if fp8_output:
239
240
                out = Float8Tensor(
                    data=out,
241
242
243
244
245
246
                    fp8_meta=fp8_meta,
                    fp8_meta_forward=True,
                    fp8_meta_index=tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_dtype=fp8_dtype_forward,
                    dtype=activation_dtype,
                )
247
248
249
250
251
252
253
        else:
            # Cast for native AMP
            weight = cast_if_needed(weight, activation_dtype)
            bias = cast_if_needed(bias, activation_dtype) if use_bias else bias

            if fp8_calibration:
                # amax of input
254
                amin, amax = inputmat_total.aminmax()
255
256
257
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = torch.max(
                    -amin, amax
                ).float()
258
                # amax of weight
259
                amin, amax = weight.aminmax()
260
261
262
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = torch.max(
                    -amin, amax
                ).float()
263

264
            if ub_overlap_rs:
265
                ub_obj_projout = get_ub(ub_name + "_fprop")
266
267
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
268
                dim_size[0] = dim_size[0] // get_distributed_world_size(tp_group)
269
                dim_size[1] = out_features
270
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
271
                if ub_obj_projout.is_p2p_overlap():
272
                    ub_algo = tex.CommOverlapAlgo.SPLIT_PIPELINED_RS_P2P
273
                else:
274
                    ub_algo = tex.CommOverlapAlgo.SPLIT_PIPELINED_RS
275
276
            else:
                dim_size = list(inputmat_total.size())
277
                dim_size[1] = out_features
278
279
                out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)

280
            _ = gemm(
281
282
283
284
285
286
287
                weight,
                inputmat_total,
                activation_dtype,
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                out=out,
288
289
290
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
291
292
293
            )

        if is_grad_enabled:
294
295
296
297
298
299
300
301
            saved_inputmat = None
            saved_inputmat_t = None
            if weight.requires_grad:
                if fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad:
                    if inputmat_t is None:
                        saved_inputmat = inputmat
                    else:
                        saved_inputmat_t = inputmat_t
302
303
                        if cpu_offloading:
                            saved_inputmat_t.activation_offloading = True
304
305
                else:
                    saved_inputmat = inputmat_no_fp8
306
307

                if cpu_offloading:
308
309
                    if fp8 and weight_fp8 is not None:
                        weight_fp8.weight_offloading = True
310
311
312
313
314
                    weight.weight_offloading = True

                    if saved_inputmat is not None:
                        saved_inputmat.activation_offloading = True

315
316
317
318
319
            # Scatter intermediate/activation tensors saved for the backward pass
            # NOTE: FSDP sharding is not valid for models initialized with primary Fp8 weights
            ctx.fsdp_group = fsdp_group
            ctx.fsdp_shapes = _fsdp_scatter_tensors(
                fsdp_group,
320
321
                saved_inputmat,  # None if fp8 == False
                saved_inputmat_t,  # None if fp8 == False AND not is_grad_enabled
322
323
324
                weight_fp8 if fp8 and not isinstance(weight, Float8Tensor) else None,
            )

325
            ctx.save_for_backward(
326
327
                saved_inputmat,
                saved_inputmat_t,
328
                inputmat_scale_inv,
329
                weight,
330
                weight_fp8,
331
                weight.main_grad if cpu_offloading and fuse_wgrad_accumulation else None,
332
            )
333

334
335
336
337
            ctx.activation_dtype = activation_dtype
            ctx.fp8 = fp8
            ctx.fp8_meta = fp8_meta
            ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
338
            ctx.cpu_offloading = cpu_offloading
339
340
341
342
            ctx.is_first_microbatch = is_first_microbatch
            ctx.use_bias = use_bias
            ctx.sequence_parallel = sequence_parallel
            ctx.tensor_parallel = tensor_parallel
343
            ctx.inp_shape = inp_shape
344
345
            ctx.parallel_mode = parallel_mode
            ctx.tp_group = tp_group
346
            ctx.ub_overlap_ag = ub_overlap_ag
347
            ctx.ub_name = ub_name
348
349
            ctx.tp_size = tp_size
            ctx.requires_dgrad = inp.requires_grad
350
            ctx.is_input_fp8 = is_input_fp8
351
352
            ctx.reduce_and_update_bwd_fp8_tensors = False
            if ctx.fp8 and requires_grad(inp, weight, bias):
353
354
355
356
                _first_fp8_module = FP8GlobalStateManager.IS_FIRST_FP8_MODULE
                ctx.reduce_and_update_bwd_fp8_tensors = FP8GlobalStateManager.is_first_fp8_module()
                if in_fp8_activation_recompute_phase():
                    FP8GlobalStateManager.IS_FIRST_FP8_MODULE = _first_fp8_module
357
358

        # Row Parallel Linear
359
        if ub_overlap_rs:
360
361
362
363
364
365
366
            out = rs_out
        elif parallel_mode == "row" and sequence_parallel:
            out, _ = reduce_scatter_along_first_dim(out, tp_group)
        elif parallel_mode == "row" and tensor_parallel:
            out, _ = allreduce(out, tp_group)

        # [*, in_features] -> [*, out_features] except first dimension changes for SP
367
        return out.view(-1, *inp_shape[1:-1], out_features)
368
369

    @staticmethod
370
    def backward(ctx, grad_output: torch.Tensor) -> Tuple[Union[torch.Tensor, None], ...]:
371
        # pylint: disable=missing-function-docstring
372
        if isinstance(grad_output, Float8Tensor):
373
            ctx.fp8_meta["scaling_bwd"].scale_inv[
374
375
                tex.FP8BwdTensors.GRAD_OUTPUT1
            ] = grad_output._scale_inv
376

377
        with torch.cuda.nvtx.range("_Linear_backward"):
378
379
380
            (
                inputmat,
                inputmat_t,
381
                inputmat_scale_inv,
382
                weight,
383
                weight_fp8,
384
                main_grad,
385
            ) = ctx.saved_tensors
386

387
388
389
390
391
392
393
394
            # Gather intermediate/activation tensors if needed
            # NOTE: weight_fp8 = weight when ctx.fp8 == False and torch.disttributed.FSDP already
            #       shards/unshards the base weights so we don't do it ourselves
            _fsdp_gather_tensors(
                ctx.fsdp_group,
                ctx.fsdp_shapes,
                inputmat,
                inputmat_t,
395
396
                weight_fp8 if ctx.fp8 and not isinstance(weight, Float8Tensor) else None,
            )
397

398
            if ctx.cpu_offloading and ctx.fuse_wgrad_accumulation:
399
                weight = torch.nn.Parameter(weight, weight.requires_grad)
400
401
                weight.main_grad = main_grad

402
403
            tp_world_size = get_distributed_world_size(ctx.tp_group)
            ctx.ub_overlap_ag = False if tp_world_size == 1 else ctx.ub_overlap_ag
404
            ub_algo = None
405
            if ctx.ub_overlap_ag:
406
407
                dim_size = list(grad_output.size())
                dim_size[0] = dim_size[0] * tp_world_size
408
                ctx.ub_obj_gradout = get_ub(ctx.ub_name + "_dgrad")
409
                if ctx.ub_obj_gradout.is_atomic_gemm():
410
                    ub_algo = tex.CommOverlapAlgo.ATOMIC_GEMM_AG_P2P
411
                else:
412
                    ub_algo = tex.CommOverlapAlgo.SPLIT_PIPELINED_AG_P2P
413

414
415
416
417
418
419
420
421
422
423
424
            (
                grad_output,
                grad_output_c,
                grad_output_t,
                grad_bias,
            ) = TransformerEngineBaseModule.grad_output_preprocess(
                ctx, grad_output, ctx.parallel_mode == "row"
            )

            # Column Parallel Linear
            # Overlap input AG with dgrad
425
426
427
            inputmat_total = None
            inputmat_t_total = None
            handle = None
428
            if weight.requires_grad and ctx.parallel_mode == "column" and ctx.sequence_parallel:
429
430
431
                inputmat_total, handle = gather_along_first_dim(
                    inputmat, ctx.tp_group, async_op=ctx.requires_dgrad
                )
432
433
            else:
                inputmat_total = inputmat
434
                inputmat_t_total = inputmat_t
435
436
437
438
439
440
441
442
443

            if ctx.is_first_microbatch is not None:
                accumulate_wgrad_into_param_main_grad = (
                    ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
                )
            else:
                accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation

            if ctx.fp8:
444
445
                fp8_dtype_forward = get_fp8_te_dtype(ctx.fp8_meta["recipe"], fprop_tensor=True)
                fp8_dtype_backward = get_fp8_te_dtype(ctx.fp8_meta["recipe"], fprop_tensor=False)
446
447
448

            if ctx.requires_dgrad:
                if ctx.fp8:
449
450
451
452
453
                    if ctx.is_input_fp8:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
                            tex.FP8BwdTensors.GRAD_INPUT1,
                            ctx.fp8_meta["scaling_bwd"],
                            fp8_dtype_backward,
454
455
                            torch.uint8,
                        )
456
457
                    else:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
458
459
460
461
462
                            None,
                            None,
                            None,
                            ctx.activation_dtype,
                        )
463
                    dgrad, _ = fp8_gemm(
464
465
466
467
                        weight_fp8.transpose_2d(),
                        weight_fp8._scale_inv,
                        0,
                        weight_fp8._fp8_dtype,
468
469
470
471
                        grad_output_c,
                        ctx.fp8_meta["scaling_bwd"].scale_inv,
                        tex.FP8BwdTensors.GRAD_OUTPUT1,
                        fp8_dtype_backward,
472
                        output_dtype,
473
474
                        get_workspace(),
                        use_split_accumulator=_2X_ACC_DGRAD,
475
476
                        ub_algo=ub_algo if ctx.ub_overlap_ag else None,
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
477
478
479
                        out_index=out_index,
                        fp8_meta_tensor=meta_tensor,
                        D_dtype=output_te_dtype,
480
                    )
481
                    if output_dtype == torch.uint8:
482
483
                        dgrad = Float8Tensor(
                            data=dgrad,
484
485
486
487
488
                            fp8_meta=ctx.fp8_meta,
                            fp8_meta_forward=False,
                            fp8_meta_index=tex.FP8BwdTensors.GRAD_INPUT1,
                            fp8_dtype=fp8_dtype_backward,
                            dtype=ctx.activation_dtype,
489
                        )
490
491
492
493
494
495
496
497
                else:
                    dgrad, _, _ = gemm(
                        weight,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NN",
                        grad=True,
498
                        ub_algo=(
499
                            tex.CommOverlapAlgo.SPLIT_PIPELINED_AG_P2P
500
501
502
                            if ctx.ub_overlap_ag
                            else None
                        ),
503
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
504
505
506
507
                    )

                # Overlap dgrad-RS/AR with wgrad
                if ctx.parallel_mode == "column" and ctx.sequence_parallel:
508
509
                    if handle is not None:
                        handle.wait()
510
511
512
513
514
515
                    dgrad, handle = reduce_scatter_along_first_dim(
                        dgrad, ctx.tp_group, async_op=True
                    )
                elif ctx.parallel_mode == "column" and ctx.tensor_parallel:
                    dgrad, handle = allreduce(dgrad, ctx.tp_group, async_op=True)

516
            wgrad = None
517
518
519
520
            if weight.requires_grad:
                if ctx.fp8:
                    # WGRAD
                    if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
521
                        if ctx.ub_overlap_ag:
522
523
524
525
                            if isinstance(grad_output_c, Float8Tensor):
                                grad_output_t = grad_output_c.transpose_2d()
                            else:
                                grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
526
                        if inputmat_t_total is None:
527
528
529
530
                            if isinstance(inputmat_total, Float8Tensor):
                                inputmat_t_total = inputmat_total.transpose_2d()
                            else:
                                inputmat_t_total = tex.fp8_transpose(
531
532
                                    inputmat_total, fp8_dtype_backward
                                )
533
                        wgrad, _ = fp8_gemm(
534
535
536
537
538
                            (
                                inputmat_t_total._data
                                if isinstance(inputmat_t_total, Float8Tensor)
                                else inputmat_t_total
                            ),
539
540
                            inputmat_scale_inv,
                            0,
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
                            fp8_dtype_forward,
                            grad_output_t,
                            ctx.fp8_meta["scaling_bwd"].scale_inv,
                            tex.FP8BwdTensors.GRAD_OUTPUT1,
                            fp8_dtype_backward,
                            ctx.activation_dtype,
                            get_workspace(),
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                            use_split_accumulator=_2X_ACC_WGRAD,
                        )
                    else:
                        wgrad, _, _ = gemm(
                            inputmat_total,
                            grad_output,
                            ctx.activation_dtype,
                            get_workspace(),
                            layout="NT",
                            grad=True,
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                        )
                else:
                    # WGRAD
                    wgrad, grad_bias, _ = gemm(
                        inputmat_total,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NT",
                        grad=True,
                        use_bias=ctx.use_bias,
                        accumulate=accumulate_wgrad_into_param_main_grad,
                        out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                    )
576
577
578
579

                # Deallocate input tensor
                clear_tensor_data(inputmat_total)
                clear_tensor_data(inputmat_t_total)
580
581
582
583
584
585
586
587

            # Column Parallel Linear
            if ctx.parallel_mode == "column" and ctx.tensor_parallel and handle is not None:
                handle.wait()

            if not ctx.use_bias:
                grad_bias = None

588
589
        if weight.requires_grad:
            # Handle custom DDP from mcore.
590
            if ctx.fuse_wgrad_accumulation and hasattr(weight, "grad_added_to_main_grad"):
591
                weight.grad_added_to_main_grad = True
592
593
594
595
596
597
598
                if getattr(weight, "zero_out_wgrad", False):
                    wgrad = torch.zeros(
                        weight.main_grad.shape,
                        dtype=weight.dtype,
                        device=torch.cuda.current_device(),
                        requires_grad=False,
                    )
599
                else:
600
601
602
603
604
605
                    wgrad = torch.empty(
                        weight.main_grad.shape,
                        dtype=weight.dtype,
                        device=torch.cuda.current_device(),
                        requires_grad=False,
                    )
606
607
608
609
            elif ctx.fuse_wgrad_accumulation:
                wgrad = None
        else:
            wgrad = None
610

611
        if ctx.reduce_and_update_bwd_fp8_tensors and not is_graph_capturing():
612
613
            FP8GlobalStateManager.reduce_and_update_fp8_tensors(forward=False)

614
615
616
617
        # Scatter fp8 weight buffers
        if ctx.fp8 and not isinstance(weight, Float8Tensor):
            _fsdp_scatter_tensors(ctx.fsdp_group, weight_fp8)

618
        return (
619
            wgrad,
620
            None,  # weight_fp8
621
622
            dgrad.view(ctx.inp_shape) if ctx.requires_dgrad else None,
            grad_bias,
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
            None,  # use_bias
            None,  # is_first_microbatch
            None,  # fp8
            None,  # fp8_calibration
            None,  # fp8_meta
            None,  # fuse_wgrad_accumulation
            None,  # cpu_offloading
            None,  # tp_group
            None,  # tp_size
            None,  # sequence_parallel
            None,  # tensor_parallel
            None,  # activation_dtype
            None,  # parallel_mode
            None,  # is_grad_enabled
            None,  # ub_overlap_rs
            None,  # ub_overlap_ag
            None,  # ub_name
640
            None,  # fp8_output
641
            None,  # fsdp_group
642
643
644
645
        )


class Linear(TransformerEngineBaseModule):
646
    """Applies a linear transformation to the incoming data :math:`y = xA^T + b`
647
648
649
650
651
652
653
654
655
656
657
658
659
660

    On NVIDIA GPUs it is a drop-in replacement for `torch.nn.Linear`.

    Parameters
    ----------
    in_features : int
                 size of each input sample.
    out_features : int
                  size of each output sample.
    bias : bool, default = `True`
          if set to `False`, the layer will not learn an additive bias.
    init_method : Callable, default = `None`
                 used for initializing weights in the following way: `init_method(weight)`.
                 When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
661
    get_rng_state_tracker : Callable, default = `None`
662
                 used to get the random number generator state tracker for initializing weights.
663
664
    rng_tracker_name : str, default = `None`
                 the param passed to get_rng_state_tracker to get the specific rng tracker.
cyanguwa's avatar
cyanguwa committed
665
    parameters_split : Optional[Union[Tuple[str, ...], Dict[str, int]]], default = None
666
667
668
669
670
671
672
                      Configuration for splitting the weight and bias tensors along dim 0 into
                      multiple PyTorch parameters. If a list or tuple of strings is provided,
                      they are used to make the names of equally-sized parameters. If a dict
                      (preferably an OrderedDict) is provided, the keys are used as names and
                      values as split sizes along dim 0. The resulting parameters will have
                      names that end in `_weight` or `_bias`, so trailing underscores are
                      stripped from any provided names.
673
    device : Union[torch.device, str], default = "cuda"
674
          The device on which the parameters of the model will be allocated. It is the user's
675
676
          responsibility to ensure all parameters are moved to the GPU before running the
          forward pass.
677
678
679
680
681
682
683
684
685
686
687
688
689

    Parallelism parameters
    ----------------------
    sequence_parallel : bool, default = `False`
                       if set to `True`, uses sequence parallelism.
    tp_group : ProcessGroup, default = `None`
              tensor parallel process group.
    tp_size : int, default = 1
             used as TP (tensor parallel) world size when TP groups are not formed during
             initialization. In this case, users must call the
             `set_tensor_parallel_group(tp_group)` method on the initialized module before the
             forward pass to supply the tensor parallel group needed for tensor and sequence
             parallel collectives.
690
    parallel_mode : {None, 'column', 'row'}, default = `None`
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
                   used to decide whether this Linear layer is Column Parallel Linear or Row
                   Parallel Linear as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
                   When set to `None`, no communication is performed.

    Optimization parameters
    -----------------------
    fuse_wgrad_accumulation : bool, default = 'False'
                             if set to `True`, enables fusing of creation and accumulation of
                             the weight gradient. When enabled, it is assumed that the weights
                             have an additional `main_grad` attribute (used instead of the
                             regular `grad`) which is a pre-allocated buffer of the correct
                             size to accumulate gradients in.
    return_bias : bool, default = `False`
                 when set to `True`, this module will not apply the additive bias itself, but
                 instead return the bias value during the forward pass together with the
                 output of the linear transformation :math:`y = xA^T`. This is useful when
                 the bias addition can be fused to subsequent operations.
708
    params_dtype : torch.dtype, default = `torch.get_default_dtype()`
709
710
711
                  it controls the type used to allocate the initial parameters. Useful when
                  the model is trained with lower precision and the original FP32 parameters
                  would not fit in GPU memory.
712

713
714
715
716
717
718
719
720
721
722
723
    """

    def __init__(
        self,
        in_features: int,
        out_features: int,
        sequence_parallel: bool = False,
        fuse_wgrad_accumulation: bool = False,
        tp_group: Optional[dist_group_type] = None,
        tp_size: int = 1,
        get_rng_state_tracker: Optional[Callable] = None,
724
        rng_tracker_name: Optional[str] = None,
725
726
727
        init_method: Optional[Callable] = None,
        bias: bool = True,
        return_bias: bool = False,
728
        params_dtype: Optional[torch.dtype] = None,
729
        parallel_mode: Optional[str] = None,
cyanguwa's avatar
cyanguwa committed
730
        parameters_split: Optional[Union[Tuple[str, ...], Dict[str, int]]] = None,
731
        device: Union[torch.device, str] = "cuda",
732
733
        ub_overlap_rs: bool = False,
        ub_overlap_ag: bool = False,
734
        ub_name: Optional[str] = None,
735
736
    ) -> None:
        super().__init__()
737
738

        params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
739
740
741
742
743
744
        self.in_features = in_features
        self.out_features = out_features
        self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
        self.use_bias = bias
        self.return_bias = return_bias
        self.apply_bias = bias and not return_bias
745
746
747
        self.ub_overlap_rs = ub_overlap_rs
        self.ub_overlap_ag = ub_overlap_ag
        if ub_overlap_rs or ub_overlap_ag:
748
749
            assert ub_name is not None, "Userbuffer name [string] is not set."
        self.ub_name = ub_name
750
        self.get_rng_state_tracker = get_rng_state_tracker
751
752
        self.rng_tracker_name = rng_tracker_name

753
754
        if device == "meta":
            assert parameters_split is None, "Cannot split module parameters on 'meta' device."
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
        if tp_group is None:
            self.tp_size = tp_size
            if tp_size == 1:
                self.set_tensor_parallel_group(tp_group)
        else:
            self.tp_size = get_distributed_world_size(tp_group)
            self.set_tensor_parallel_group(tp_group)
        self.set_nccl_overlap_warning_if_tp()

        self.parallel_mode = parallel_mode
        assert (
            self.parallel_mode in GemmParallelModes
        ), f"parallel_mode {parallel_mode} not supported"

        if self.parallel_mode == "column":
            self.out_features = divide(self.out_features, self.tp_size)
        elif self.parallel_mode == "row":
            self.in_features = divide(self.in_features, self.tp_size)

        self.sequence_parallel = (self.tp_size > 1) and sequence_parallel

776
777
778
        # Initialize params in FP8
        with_fp8_params = FP8GlobalStateManager.with_fp8_parameters()

779
780
781
782
783
784
785
786
        # Contiguous buffers for params
        weight_tensor = torch.empty(
            self.out_features,
            self.in_features,
            device=device,
            dtype=params_dtype,
        )
        bias_tensor = None
787
        if self.use_bias:
788
789
790
791
792
            bias_tensor = torch.empty(
                self.out_features,
                device=device,
                dtype=params_dtype,
            )
793

794
795
796
797
        # Configure parameter splits
        self.weight_names = []
        self.bias_names = []
        self.parameter_split_sizes = []
798
        if parameters_split is None:
799
800
801
802
803
804
            # Split into a single parameter by default
            self.weight_names = ["weight"]
            self.bias_names = ["bias"]
            self.parameter_split_sizes = [out_features]
        elif not parameters_split:
            raise ValueError("Cannot split weight buffer into 0 parameters")
cyanguwa's avatar
cyanguwa committed
805
        elif isinstance(parameters_split, dict):
806
807
808
809
810
811
812
813
814
815
816
817
            # Split parameters with provided sizes
            for name, split_size in parameters_split.items():
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
        elif all(isinstance(name, str) for name in parameters_split):
            # Split parameters evenly
            split_size = out_features // len(parameters_split)
            for name in parameters_split:
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
cyanguwa's avatar
cyanguwa committed
818
        else:
819
            raise TypeError("Invalid configuration for parameters split")
820

821
822
823
824
825
826
        # Make sure parameter splits are valid
        if sum(self.parameter_split_sizes) != out_features:
            raise ValueError(
                f"Trying to split weight buffer ({out_features=}) "
                f"with split sizes {self.parameter_split_sizes}"
            )
827

828
829
830
831
832
833
834
835
836
837
        # Adjust parameter splits for tensor-parallel distribution
        if self.parallel_mode == "column":
            for i, size in enumerate(self.parameter_split_sizes):
                if size % self.tp_size != 0:
                    raise RuntimeError(
                        f"Attempting to distribute a parameter with out_features={size} "
                        f"between {self.tp_size} tensor-parallel processes"
                    )
                self.parameter_split_sizes[i] = size // self.tp_size

838
839
840
841
842
        # Construct weight parameters
        # Note: Register weights together so that they are adjacent to
        # each other in Linear.parameters(). This makes it more likely
        # that they will stay contiguous if the weights are
        # manipulated externally, e.g. by FSDP.
843
844
845
846
847
848
849
850
        offset = 0
        for i, split_size in enumerate(self.parameter_split_sizes):
            split_start = offset
            offset += split_size
            split_end = offset

            # Check if parameters are subviews of buffers
            is_subview = (split_start, split_end) != (0, self.out_features)
851
            if is_subview and with_fp8_params:
852
                raise RuntimeError("Splitting Float8Tensor into multiple params is not supported")
853

854
            # Construct weight parameter
855
856
857
858
859
860
861
            self.register_parameter(
                self.weight_names[i],
                torch.nn.Parameter(weight_tensor[split_start:split_end]),
                init_fn=init_method,
                get_rng_state_tracker=get_rng_state_tracker,
                fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
            )
862

863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
        # Construct bias parameters if needed
        if self.use_bias:
            offset = 0
            for i, split_size in enumerate(self.parameter_split_sizes):
                split_start = offset
                offset += split_size
                split_end = offset
                self.register_parameter(
                    self.bias_names[i],
                    torch.nn.Parameter(bias_tensor[split_start:split_end]),
                    init_fn=init_method_constant(0.0),
                )
        else:
            for name in self.bias_names:
                bias = torch.Tensor().to(dtype=params_dtype, device=device)
                setattr(self, name, bias)
cyanguwa's avatar
cyanguwa committed
879

880
        if with_fp8_params:
881
882
            self.init_fp8_metadata()

883
        self.reset_parameters(defer_init=device == "meta")
884

885
886
887
888
889
890
891
        # For RPL, bias has to be added after TP collectives
        # So it cannot be fused with the GEMM
        if self.parallel_mode == "row" and self.apply_bias:
            self.gemm_bias_unfused_add = True
        else:
            self.gemm_bias_unfused_add = False

892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
    def reset_parameters(self, defer_init=False):
        super().reset_parameters(defer_init=defer_init)

        if not defer_init:
            # Set parallelism attributes for linear weights
            for weight in self.weight_names:
                set_tensor_model_parallel_attributes(
                    tensor=getattr(self, weight),
                    is_parallel=True,
                    dim=1 if self.parallel_mode == "row" else 0,
                    stride=1,
                )

            # Set parallelism attributes for linear biases
            if self.use_bias:
                for bias in self.bias_names:
                    if self.parallel_mode == "row":
                        setattr(getattr(self, bias), "sequence_parallel", self.sequence_parallel)
                    elif self.parallel_mode == "column":
                        set_tensor_model_parallel_attributes(getattr(self, bias), True, 0, 1)

913
    @no_torch_dynamo()
914
915
916
917
    def forward(
        self,
        inp: torch.Tensor,
        is_first_microbatch: Optional[bool] = None,
918
        fp8_output: Optional[bool] = False,
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
    ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
        """
        Apply the linear transformation to the input.

        Parameters
        ----------
        inp : torch.Tensor
             Input tensor.
        is_first_microbatch : {True, False, None}, default = None
                             During training using either gradient accumulation or
                             pipeline parallelism a minibatch of data is further split
                             into microbatches. Between the microbatches of the same minibatch
                             the model weights are not updated. Setting this parameter indicates
                             whether the current microbatch is the first in a minibatch or not.
                             When set, this parameter enables additional optimizations:

                             * during FP8 training, it allows caching of the FP8 versions of
                               the weights
                             * it also allows skipping gradient accumulation during the
                               first microbatch (since it is the first gradient being
                               produced)
        """
941
942
943
944
        if FP8GlobalStateManager.fp8_graph_capturing():
            skip_fp8_weight_update = FP8GlobalStateManager.get_skip_fp8_weight_update_tensor()
        else:
            skip_fp8_weight_update = None
945
946
947
        if skip_fp8_weight_update is not None:
            is_first_microbatch = False

948
949
        with self.prepare_forward(
            inp,
950
            is_first_microbatch,
951
            allow_non_contiguous=isinstance(inp, QuantizedTensor),
952
        ) as inp:
953
954

            # Get concatenated weight and bias tensors
955
            unfused_weights = [getattr(self, name) for name in self.weight_names]
956
            if any(isinstance(w, QuantizedTensor) for w in unfused_weights):
957
958
959
                if self.fp8:
                    if len(unfused_weights) != 1:
                        raise RuntimeError(
960
                            "Splitting QuantizedTensor into multiple params is not supported"
961
962
                        )
                else:
963
                    unfused_weights = [w.dequantize() for w in unfused_weights]
964
            weight_tensor = _noop_cat(unfused_weights)
965
            if self.use_bias:
966
                bias_tensor = _noop_cat([getattr(self, name) for name in self.bias_names])
967
            else:
968
                bias_tensor = getattr(self, self.bias_names[0])  # Unused
969

970
971
972
973
            # Initialize FP8 weights if needed
            weight_fp8 = None
            if self.fp8:
                if isinstance(weight_tensor, Float8Tensor):
974
975
976
977
                    # Make sure transpose cache is valid, if present
                    # Note: Transpose cache may have been invalidated
                    # externally, e.g. by optimizer.
                    if weight_tensor._transpose is not None:
978
979
980
981
982
983
                        weight_tensor.transpose_2d(
                            fill_cache=True,
                            noop_flag=skip_fp8_weight_update,
                        )
                else:
                    # FP8 cast to workspace buffer
984
                    update_workspace = is_first_microbatch is None or is_first_microbatch
985
986
987
988
989
990
991
                    weight_fp8 = self.get_fp8_workspace(
                        tensor=weight_tensor,
                        fp8_meta_forward=True,
                        fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
                        cache_name=(None if is_first_microbatch is None else "weight"),
                        update_workspace=update_workspace,
                        skip_update_flag=skip_fp8_weight_update,
992
                        fsdp_group=self.fsdp_group,
993
                    )
994

995
996
997
998
999
1000
1001
1002
            if torch.is_grad_enabled():
                linear_fn = _Linear.apply
                args = []
            else:
                linear_fn = _Linear.forward
                args = [None]
            args += (
                weight_tensor,
1003
                weight_fp8,
1004
1005
1006
1007
1008
1009
1010
1011
                inp,
                bias_tensor,
                self.apply_bias and not self.gemm_bias_unfused_add,
                is_first_microbatch,
                self.fp8,
                self.fp8_calibration,
                self.fp8_meta,
                self.fuse_wgrad_accumulation,
1012
                is_cpu_offload_enabled(),
1013
1014
1015
1016
1017
1018
1019
                self.tp_group,
                self.tp_size,
                self.sequence_parallel,
                self.tp_size > 1,
                self.activation_dtype,
                self.parallel_mode,
                torch.is_grad_enabled(),
1020
1021
                self.ub_overlap_rs,
                self.ub_overlap_ag,
1022
                self.ub_name,
1023
                fp8_output,
1024
                self.fsdp_group,
1025
1026
1027
1028
1029
1030
1031
1032
1033
            )
            out = linear_fn(*args)

        if self.gemm_bias_unfused_add:
            out = out + cast_if_needed(bias_tensor, self.activation_dtype)

        if self.return_bias:
            return out, cast_if_needed(bias_tensor, self.activation_dtype)
        return out