linear.py 43.1 KB
Newer Older
1
# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.

"""Linear API"""
6
import os
7
from typing import Union, Optional, Callable, Tuple, List, Dict, Any
8
9
10
11
12
13
14
15
16
17
18
19
20

import torch

import transformer_engine_extensions as tex

from .base import (
    get_workspace,
    get_ub,
    TransformerEngineBaseModule,
    _2X_ACC_FPROP,
    _2X_ACC_DGRAD,
    _2X_ACC_WGRAD,
)
21
from ._common import _noop_cat
22
from ..fp8 import get_fp8_te_dtype, FP8GlobalStateManager
23
24
25
from ..utils import (
    divide,
    cast_if_needed,
26
    assert_dim_for_fp8_exec,
27
    clear_tensor_data,
28
    init_method_constant,
29
    requires_grad,
30
31
32
33
34
35
36
)
from ..distributed import (
    set_tensor_model_parallel_attributes,
    get_distributed_world_size,
    allreduce,
    reduce_scatter_along_first_dim,
    gather_along_first_dim,
37
38
    is_fp8_activation_recompute_enabled,
    in_fp8_activation_recompute_phase,
39
40
41
42
43
44
45
46
)
from ..cpp_extensions import (
    fp8_gemm,
    gemm,
    fp8_cast_transpose_fused,
    cast_to_fp8,
)
from ..constants import GemmParallelModes, dist_group_type
47
from ..jit import no_torch_dynamo
48
from ..graph import is_graph_capturing
49
50
from ..float8_tensor import Float8Tensor

51
52
_NVTE_DEBUG = int(os.getenv("NVTE_DEBUG", "0"))

53
54
55
56
57
58
59
60
61
62
63
__all__ = ["Linear"]


class _Linear(torch.autograd.Function):
    """Linear semi-top level module
    Calls custom cuda extensions.
    """

    @staticmethod
    def forward(
        ctx,
64
65
66
        weight: Union[Float8Tensor, torch.Tensor],
        weight_fp8: Union[Float8Tensor, None],
        weight_t_fp8: Union[Float8Tensor, None],
67
68
69
70
        inp: torch.Tensor,
        bias: torch.Tensor,
        use_bias: bool,
        is_first_microbatch: Union[bool, None],
71
        skip_fp8_weight_update: Union[torch.Tensor, None],
72
73
74
75
        fp8: bool,
        fp8_calibration: bool,
        fp8_meta: Dict[str, Any],
        fuse_wgrad_accumulation: bool,
76
        cpu_offloading: bool,
77
78
79
80
81
82
83
        tp_group: Union[dist_group_type, None],
        tp_size: int,
        sequence_parallel: bool,
        tensor_parallel: bool,
        activation_dtype: torch.dtype,
        parallel_mode: Union[str, None],
        is_grad_enabled: bool,
84
        primary_weights_in_fp8: bool,
85
86
        ub_overlap_rs: bool,
        ub_overlap_ag: bool,
87
        ub_name: str,
88
        is_first_module_in_mha: bool,
89
    ) -> torch.Tensor:
90
91
92
93
        is_input_fp8 = isinstance(inp, Float8Tensor)
        if is_input_fp8:
            fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_INPUT] = inp._scale_inv[0]

94
95
96
        # Make sure input dimensions are compatible
        in_features = weight.shape[-1]
        assert inp.shape[-1] == in_features, "GEMM not possible"
97
        inputmat = inp.view(-1, in_features)
98
        if fp8:
99
100
            assert_dim_for_fp8_exec(inputmat)
            assert_dim_for_fp8_exec(weight)
101

102
103
104
105
106
107
        update_fp8_weights = (
            is_first_microbatch is None
            or is_first_microbatch
            or skip_fp8_weight_update is not None
        )

108
109
        tp_world_size = get_distributed_world_size(tp_group)
        ub_overlap_rs = False if tp_world_size == 1 else ub_overlap_rs
110
111

        # Cast input to expected dtype
112
        inputmat = cast_if_needed(inputmat, activation_dtype)
113
        inputmat_t = None
114
        inputmat_no_fp8 = inputmat
115

116
117
        if fp8:
            fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
118
119
120
121
122
123
124
125
126
            if isinstance(inputmat, Float8Tensor):
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat_t = inputmat.transpose_2d()
127
            else:
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat, inputmat_t = fp8_cast_transpose_fused(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
                    )
                else:
                    # FP8 input for forward
                    inputmat = cast_to_fp8(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
                    )
149
150
151
152
153
154
155

        # Column Parallel Linear
        if parallel_mode == "column" and sequence_parallel:
            inputmat_total, _ = gather_along_first_dim(inputmat, tp_group)
        else:
            inputmat_total = inputmat
        if fp8:
156
157
158
            if _NVTE_DEBUG:
                print('[Linear]: using FP8 forward')

159
160
161
162
163
164
165
            bias_dtype = (
                torch.bfloat16
                if activation_dtype == torch.float32
                else activation_dtype
            )
            bias = cast_if_needed(bias, bias_dtype) if use_bias else bias

166
167
168
169
170
171
172
173
174
175
176
            if primary_weights_in_fp8:
                # Weight is already in FP8
                weight.reset_fp8_meta_scale_inv()
                weight_fp8 = weight
            elif update_fp8_weights:
                # Need to cast weights to FP8
                weight_fp8 = Float8Tensor(
                    data=weight_fp8._data,
                    fp8_meta=fp8_meta,
                    fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
                )
177
178
179
                if (is_grad_enabled
                    or (is_fp8_activation_recompute_enabled()
                        and not in_fp8_activation_recompute_phase())):
180
181
182
183
184
                    fp8_cast_transpose_fused(
                        weight,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_WEIGHT,
                        fp8_dtype_forward,
185
186
                        cast_out=weight_fp8._data,
                        transpose_out=weight_t_fp8._data,
187
                        noop_flag=skip_fp8_weight_update,
188
189
                    )
                else:
190
                    cast_to_fp8(
191
192
193
194
                        weight,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_WEIGHT,
                        fp8_dtype_forward,
195
                        out=weight_fp8._data,
196
                    )
197
                    weight_t_fp8 = None
198

199
200
201
202
203
204
205
206
207
208
            if is_first_module_in_mha:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
                    tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_meta["scaling_fwd"],
                    fp8_dtype_forward,
                    torch.uint8)
            else:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
                    None, None, None, activation_dtype)

209
            if ub_overlap_rs:
210
                ub_obj_projout = get_ub(ub_name+"_fprop")
211
212
213
214
215
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
                dim_size[0] = dim_size[0] // tp_world_size
                dim_size[1] = weight.size(0)
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
216
217
218
219
220
221
222
223
224
225
                if ub_obj_projout.is_p2p_overlap():
                    if ub_obj_projout.is_atomic_gemm():
                        ub_algo=tex.UbufOverlapAlgo.ATOMIC_GEMM_RS_P2P
                    else:
                        ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS_P2P
                else:
                    if ub_obj_projout.is_atomic_gemm():
                        ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_RS
                    else:
                        ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS
226
227
228
229
230
231
                if ub_obj_projout.is_fp8_ubuf():
                    proj_out_index = tex.FP8FwdTensors.GEMM1_OUTPUT
                    meta_tensor = fp8_meta["scaling_fwd"]
                    proj_out_tetype = fp8_dtype_forward
                    proj_out_pttype = torch.uint8
                    ub_obj_projout.set_ubuf_scale_inv(meta_tensor.scale_inv[proj_out_index])
232
233
234
            else:
                dim_size = list(inputmat_total.size())
                dim_size[1] = weight.size(0)
235
                out = torch.empty(dim_size, dtype=proj_out_pttype, device=inputmat_total.device)
236
237

            _ = fp8_gemm(
238
                weight_fp8._data,
239
240
241
                fp8_meta["scaling_fwd"].scale_inv,
                tex.FP8FwdTensors.GEMM1_WEIGHT,
                fp8_dtype_forward,
242
243
                inputmat_total._data
                if isinstance(inputmat_total, Float8Tensor) else inputmat_total,
244
245
246
                fp8_meta["scaling_fwd"].scale_inv,
                tex.FP8FwdTensors.GEMM1_INPUT,
                fp8_dtype_forward,
247
                proj_out_pttype,
248
249
250
251
252
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                use_split_accumulator=_2X_ACC_FPROP,
                out=out,
253
254
255
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
256
257
258
                out_index=proj_out_index,
                fp8_meta_tensor = meta_tensor,
                D_dtype = proj_out_tetype,
259
            )
260
261
262
263
264
265
266
267
            if is_first_module_in_mha:
                out = Float8Tensor(data=out,
                    fp8_meta=fp8_meta,
                    fp8_meta_forward=True,
                    fp8_meta_index=tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_dtype=fp8_dtype_forward,
                    dtype=activation_dtype,
                )
268
        else:
269
270
271
            if _NVTE_DEBUG:
                print('[Linear]: using non-FP8 forward')

272
273
274
275
276
277
            # Cast for native AMP
            weight = cast_if_needed(weight, activation_dtype)
            bias = cast_if_needed(bias, activation_dtype) if use_bias else bias

            if fp8_calibration:
                # amax of input
278
                amin, amax = inputmat_total.aminmax()
279
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = \
280
                    torch.max(-amin, amax).float()
281
                # amax of weight
282
                amin, amax = weight.aminmax()
283
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = \
284
                    torch.max(-amin, amax).float()
285

286
287
            if ub_overlap_rs:
                ub_obj_projout = get_ub(ub_name+"_fprop")
288
289
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
290
                dim_size[0] = dim_size[0] // get_distributed_world_size(tp_group)
291
292
                dim_size[1] = weight.size(0)
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
293
294
295
296
                if ub_obj_projout.is_p2p_overlap():
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS_P2P
                else:
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS
297
298
299
300
301
            else:
                dim_size = list(inputmat_total.size())
                dim_size[1] = weight.size(0)
                out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)

302
            _ = gemm(
303
304
305
306
307
308
309
                weight,
                inputmat_total,
                activation_dtype,
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                out=out,
310
311
312
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
313
314
315
            )

        if is_grad_enabled:
316
317
318
319
320
321
322
323
            saved_inputmat = None
            saved_inputmat_t = None
            if weight.requires_grad:
                if fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad:
                    if inputmat_t is None:
                        saved_inputmat = inputmat
                    else:
                        saved_inputmat_t = inputmat_t
324
325
                        if cpu_offloading:
                            saved_inputmat_t.activation_offloading = True
326
327
                else:
                    saved_inputmat = inputmat_no_fp8
328
329
330
331

                if cpu_offloading:
                    if fuse_wgrad_accumulation:
                        weight.main_grad.weight_offloading = True
332
                    if fp8 and weight_t_fp8 is not None:
333
334
335
336
337
338
                        weight_t_fp8.weight_offloading = True
                    weight.weight_offloading = True

                    if saved_inputmat is not None:
                        saved_inputmat.activation_offloading = True

339
            ctx.save_for_backward(
340
341
                saved_inputmat,
                saved_inputmat_t,
342
                weight,
343
                weight.main_grad if cpu_offloading and fuse_wgrad_accumulation else None,
344
345
                weight_t_fp8 if fp8 else None,
                fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None,
346
                skip_fp8_weight_update.clone() if skip_fp8_weight_update is not None else None,
347
348
349
350
351
            )
            ctx.activation_dtype = activation_dtype
            ctx.fp8 = fp8
            ctx.fp8_meta = fp8_meta
            ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
352
            ctx.cpu_offloading = cpu_offloading
353
354
355
356
357
358
359
            ctx.is_first_microbatch = is_first_microbatch
            ctx.use_bias = use_bias
            ctx.sequence_parallel = sequence_parallel
            ctx.tensor_parallel = tensor_parallel
            ctx.inp_shape = inp.shape
            ctx.parallel_mode = parallel_mode
            ctx.tp_group = tp_group
360
            ctx.ub_overlap_ag = ub_overlap_ag
361
            ctx.ub_name = ub_name
362
363
            ctx.tp_size = tp_size
            ctx.requires_dgrad = inp.requires_grad
364
            ctx.is_input_fp8 = is_input_fp8
365
            ctx.primary_weights_in_fp8 = primary_weights_in_fp8
366
367
368
369
370
            ctx.reduce_and_update_bwd_fp8_tensors = False
            if ctx.fp8 and requires_grad(inp, weight, bias):
                ctx.reduce_and_update_bwd_fp8_tensors = (
                    ctx.reduce_and_update_bwd_fp8_tensors or
                    FP8GlobalStateManager.is_first_fp8_module())
371
372

        # Row Parallel Linear
373
        if ub_overlap_rs:
374
375
376
377
378
379
380
381
382
383
384
385
386
387
            out = rs_out
        elif parallel_mode == "row" and sequence_parallel:
            out, _ = reduce_scatter_along_first_dim(out, tp_group)
        elif parallel_mode == "row" and tensor_parallel:
            out, _ = allreduce(out, tp_group)

        # [*, in_features] -> [*, out_features] except first dimension changes for SP
        return out.view(-1, *inp.shape[1:-1], out.shape[-1])


    @staticmethod
    def backward(
        ctx, grad_output: torch.Tensor
    ) -> Tuple[Union[torch.Tensor, None], ...]:
388
        if isinstance(grad_output, Float8Tensor):
389
390
391
            ctx.fp8_meta["scaling_bwd"].scale_inv[
                tex.FP8BwdTensors.GRAD_OUTPUT1] = grad_output._scale_inv

392
        with torch.cuda.nvtx.range("_Linear_backward"):
393
394
395
396
            (
                inputmat,
                inputmat_t,
                weight,
397
                main_grad,
398
399
                weight_t_fp8,
                fwd_scale_inverses,
400
                skip_fp8_weight_update,
401
            ) = ctx.saved_tensors
402

403
404
405
406
            if ctx.cpu_offloading and ctx.fuse_wgrad_accumulation:
                weight = torch.nn.Parameter(weight, False)
                weight.main_grad = main_grad

407
            # Primary weights are in FP8.
408
409
410
411
            if ctx.primary_weights_in_fp8:
                weight_t_fp8 = weight.transpose_2d(
                    cache=ctx.is_first_microbatch is not None,
                    noop_flag=skip_fp8_weight_update,
412
                )
413
414
415
            elif ctx.fp8:
                weight_t_fp8 = weight_t_fp8._data

416
417
418
            tp_world_size = get_distributed_world_size(ctx.tp_group)
            ctx.ub_overlap_ag = False if tp_world_size == 1 else ctx.ub_overlap_ag
            if ctx.ub_overlap_ag:
419
420
                dim_size = list(grad_output.size())
                dim_size[0] = dim_size[0] * tp_world_size
421
                ctx.ub_obj_gradout = get_ub(ctx.ub_name+"_dgrad")
422
423
424
425
                if ctx.ub_obj_gradout.is_atomic_gemm():
                    ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_AG_P2P
                else:
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG_P2P
426

427
428
429
430
431
432
433
434
435
436
437
            (
                grad_output,
                grad_output_c,
                grad_output_t,
                grad_bias,
            ) = TransformerEngineBaseModule.grad_output_preprocess(
                ctx, grad_output, ctx.parallel_mode == "row"
            )

            # Column Parallel Linear
            # Overlap input AG with dgrad
438
439
440
            inputmat_total = None
            inputmat_t_total = None
            handle = None
441
            if weight.requires_grad and ctx.parallel_mode == "column" and ctx.sequence_parallel:
442
443
444
                inputmat_total, handle = gather_along_first_dim(
                    inputmat, ctx.tp_group, async_op=ctx.requires_dgrad
                )
445
446
            else:
                inputmat_total = inputmat
447
                inputmat_t_total = inputmat_t
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465

            if ctx.is_first_microbatch is not None:
                accumulate_wgrad_into_param_main_grad = (
                    ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
                )
            else:
                accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation

            if ctx.fp8:
                fp8_dtype_forward = get_fp8_te_dtype(
                    ctx.fp8_meta["recipe"], fprop_tensor=True
                )
                fp8_dtype_backward = get_fp8_te_dtype(
                    ctx.fp8_meta["recipe"], fprop_tensor=False
                )

            if ctx.requires_dgrad:
                if ctx.fp8:
466
467
468
469
470
471
472
473
474
475
476
477
                    if _NVTE_DEBUG:
                        print('[Linear]: using FP8 backward')

                    if ctx.is_input_fp8:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
                            tex.FP8BwdTensors.GRAD_INPUT1,
                            ctx.fp8_meta["scaling_bwd"],
                            fp8_dtype_backward,
                            torch.uint8)
                    else:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
                            None, None, None, ctx.activation_dtype)
478
                    dgrad, _ = fp8_gemm(
479
                        weight_t_fp8,
480
481
482
483
484
485
486
                        fwd_scale_inverses,
                        tex.FP8FwdTensors.GEMM1_WEIGHT,
                        fp8_dtype_forward,
                        grad_output_c,
                        ctx.fp8_meta["scaling_bwd"].scale_inv,
                        tex.FP8BwdTensors.GRAD_OUTPUT1,
                        fp8_dtype_backward,
487
                        output_dtype,
488
489
                        get_workspace(),
                        use_split_accumulator=_2X_ACC_DGRAD,
490
491
                        ub_algo=ub_algo if ctx.ub_overlap_ag else None,
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
492
493
494
                        out_index=out_index,
                        fp8_meta_tensor=meta_tensor,
                        D_dtype=output_te_dtype,
495
                    )
496
497
498
499
500
501
502
503
                    if output_dtype == torch.uint8:
                        dgrad = Float8Tensor(data=dgrad,
                            fp8_meta=ctx.fp8_meta,
                            fp8_meta_forward=False,
                            fp8_meta_index=tex.FP8BwdTensors.GRAD_INPUT1,
                            fp8_dtype=fp8_dtype_backward,
                            dtype=ctx.activation_dtype,
                            )
504
                else:
505
506
507
                    if _NVTE_DEBUG:
                        print('[Linear]: using non-FP8 backward')

508
509
510
511
512
513
514
                    dgrad, _, _ = gemm(
                        weight,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NN",
                        grad=True,
515
516
517
                        ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG_P2P \
                            if ctx.ub_overlap_ag else None,
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
518
519
520
521
                    )

                # Overlap dgrad-RS/AR with wgrad
                if ctx.parallel_mode == "column" and ctx.sequence_parallel:
522
523
                    if handle is not None:
                        handle.wait()
524
525
526
527
528
529
530
531
532
533
                    dgrad, handle = reduce_scatter_along_first_dim(
                        dgrad, ctx.tp_group, async_op=True
                    )
                elif ctx.parallel_mode == "column" and ctx.tensor_parallel:
                    dgrad, handle = allreduce(dgrad, ctx.tp_group, async_op=True)

            if weight.requires_grad:
                if ctx.fp8:
                    # WGRAD
                    if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
534
                        if ctx.ub_overlap_ag:
535
536
537
538
                            if isinstance(grad_output_c, Float8Tensor):
                                grad_output_t = grad_output_c.transpose_2d()
                            else:
                                grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
539
                        if inputmat_t_total is None:
540
541
542
543
544
                            if isinstance(inputmat_total, Float8Tensor):
                                inputmat_t_total = inputmat_total.transpose_2d()
                            else:
                                inputmat_t_total = tex.fp8_transpose(
                                    inputmat_total, fp8_dtype_backward)
545
                        wgrad, _ = fp8_gemm(
546
547
                            inputmat_t_total._data
                            if isinstance(inputmat_t_total, Float8Tensor) else inputmat_t_total,
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
                            fwd_scale_inverses,
                            tex.FP8FwdTensors.GEMM1_INPUT,
                            fp8_dtype_forward,
                            grad_output_t,
                            ctx.fp8_meta["scaling_bwd"].scale_inv,
                            tex.FP8BwdTensors.GRAD_OUTPUT1,
                            fp8_dtype_backward,
                            ctx.activation_dtype,
                            get_workspace(),
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                            use_split_accumulator=_2X_ACC_WGRAD,
                        )
                    else:
                        wgrad, _, _ = gemm(
                            inputmat_total,
                            grad_output,
                            ctx.activation_dtype,
                            get_workspace(),
                            layout="NT",
                            grad=True,
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                        )
                else:
                    # WGRAD
                    wgrad, grad_bias, _ = gemm(
                        inputmat_total,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NT",
                        grad=True,
                        use_bias=ctx.use_bias,
                        accumulate=accumulate_wgrad_into_param_main_grad,
                        out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                    )
585
586
587
588

                # Deallocate input tensor
                clear_tensor_data(inputmat_total)
                clear_tensor_data(inputmat_t_total)
589
590
591
592
593
594
595
596

            # Column Parallel Linear
            if ctx.parallel_mode == "column" and ctx.tensor_parallel and handle is not None:
                handle.wait()

            if not ctx.use_bias:
                grad_bias = None

597
598
599
600
        if weight.requires_grad:
            # Handle custom DDP from mcore.
            if ctx.fuse_wgrad_accumulation and hasattr(weight, 'grad_added_to_main_grad'):
                weight.grad_added_to_main_grad = True
601
602
603
604
605
606
607
608
609
610
611
612
                if getattr(weight, 'zero_out_wgrad', False):
                    wgrad = torch.zeros(weight.main_grad.shape,
                                        dtype=weight.dtype,
                                        device=torch.cuda.current_device(),
                                        requires_grad=False
                                       )
                else:
                    wgrad = torch.empty(weight.main_grad.shape,
                                        dtype=weight.dtype,
                                        device=torch.cuda.current_device(),
                                        requires_grad=False
                                       )
613
614
615
616
            elif ctx.fuse_wgrad_accumulation:
                wgrad = None
        else:
            wgrad = None
617

618
        if ctx.reduce_and_update_bwd_fp8_tensors and not is_graph_capturing():
619
620
            FP8GlobalStateManager.reduce_and_update_fp8_tensors(forward=False)

621
        return (
622
            wgrad,
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
            None,
            None,
            dgrad.view(ctx.inp_shape) if ctx.requires_dgrad else None,
            grad_bias,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
            None,
642
643
            None,
            None,
644
            None,
645
            None,
646
            None,
647
648
649
650
        )


class Linear(TransformerEngineBaseModule):
651
    """Applies a linear transformation to the incoming data :math:`y = xA^T + b`
652
653
654
655
656
657
658
659
660
661
662
663
664
665

    On NVIDIA GPUs it is a drop-in replacement for `torch.nn.Linear`.

    Parameters
    ----------
    in_features : int
                 size of each input sample.
    out_features : int
                  size of each output sample.
    bias : bool, default = `True`
          if set to `False`, the layer will not learn an additive bias.
    init_method : Callable, default = `None`
                 used for initializing weights in the following way: `init_method(weight)`.
                 When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
666
667
668
669
    get_rng_state_tracker : Callable, default = `None`
                 used to get the random number generator state tracker for initilizeing weights.
    rng_tracker_name : str, default = `None`
                 the param passed to get_rng_state_tracker to get the specific rng tracker.
cyanguwa's avatar
cyanguwa committed
670
    parameters_split : Optional[Union[Tuple[str, ...], Dict[str, int]]], default = None
671
672
673
674
675
676
677
                      Configuration for splitting the weight and bias tensors along dim 0 into
                      multiple PyTorch parameters. If a list or tuple of strings is provided,
                      they are used to make the names of equally-sized parameters. If a dict
                      (preferably an OrderedDict) is provided, the keys are used as names and
                      values as split sizes along dim 0. The resulting parameters will have
                      names that end in `_weight` or `_bias`, so trailing underscores are
                      stripped from any provided names.
678
679
680
681
    device : Union[torch.device, str], default = "cuda"
          The device on which the parameters of the model will allocated. It is the user's
          responsibility to ensure all parameters are moved to the GPU before running the
          forward pass.
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712

    Parallelism parameters
    ----------------------
    sequence_parallel : bool, default = `False`
                       if set to `True`, uses sequence parallelism.
    tp_group : ProcessGroup, default = `None`
              tensor parallel process group.
    tp_size : int, default = 1
             used as TP (tensor parallel) world size when TP groups are not formed during
             initialization. In this case, users must call the
             `set_tensor_parallel_group(tp_group)` method on the initialized module before the
             forward pass to supply the tensor parallel group needed for tensor and sequence
             parallel collectives.
    parallel_mode : {None, 'Column', 'Row'}, default = `None`
                   used to decide whether this Linear layer is Column Parallel Linear or Row
                   Parallel Linear as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
                   When set to `None`, no communication is performed.

    Optimization parameters
    -----------------------
    fuse_wgrad_accumulation : bool, default = 'False'
                             if set to `True`, enables fusing of creation and accumulation of
                             the weight gradient. When enabled, it is assumed that the weights
                             have an additional `main_grad` attribute (used instead of the
                             regular `grad`) which is a pre-allocated buffer of the correct
                             size to accumulate gradients in.
    return_bias : bool, default = `False`
                 when set to `True`, this module will not apply the additive bias itself, but
                 instead return the bias value during the forward pass together with the
                 output of the linear transformation :math:`y = xA^T`. This is useful when
                 the bias addition can be fused to subsequent operations.
713
    params_dtype : torch.dtype, default = `torch.get_default_dtype()`
714
715
716
                  it controls the type used to allocate the initial parameters. Useful when
                  the model is trained with lower precision and the original FP32 parameters
                  would not fit in GPU memory.
717

718
719
720
721
722
723
724
725
726
727
728
    """

    def __init__(
        self,
        in_features: int,
        out_features: int,
        sequence_parallel: bool = False,
        fuse_wgrad_accumulation: bool = False,
        tp_group: Optional[dist_group_type] = None,
        tp_size: int = 1,
        get_rng_state_tracker: Optional[Callable] = None,
729
        rng_tracker_name: Optional[str] = None,
730
731
732
        init_method: Optional[Callable] = None,
        bias: bool = True,
        return_bias: bool = False,
733
        params_dtype: Optional[torch.dtype] = None,
734
        parallel_mode: Optional[str] = None,
cyanguwa's avatar
cyanguwa committed
735
        parameters_split: Optional[Union[Tuple[str, ...], Dict[str, int]]] = None,
736
        device: Union[torch.device, str] = "cuda",
737
738
        ub_overlap_rs: bool = False,
        ub_overlap_ag: bool = False,
739
        ub_name: Optional[str] = None,
740
741
    ) -> None:
        super().__init__()
742
743

        params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
744
745
746
747
748
749
        self.in_features = in_features
        self.out_features = out_features
        self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
        self.use_bias = bias
        self.return_bias = return_bias
        self.apply_bias = bias and not return_bias
750
        self.primary_weights_in_fp8 = FP8GlobalStateManager.with_fp8_parameters()
751
752
753
        self.ub_overlap_rs = ub_overlap_rs
        self.ub_overlap_ag = ub_overlap_ag
        if ub_overlap_rs or ub_overlap_ag:
754
            assert ub_name is not None, "Userbuffer name [string] is not set."
755
756
757
            assert (
                tex.userbuf_comm_available()
            ), "Userbuffer communication backend not available."
758
        self.ub_name = ub_name
759
        self.get_rng_state_tracker = get_rng_state_tracker
760
761
        self.rng_tracker_name = rng_tracker_name

762
763
764
        if device == 'meta':
            assert parameters_split is None, ("Cannot split module parameters "
                                              "on 'meta' device.")
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
        if tp_group is None:
            self.tp_size = tp_size
            if tp_size == 1:
                self.set_tensor_parallel_group(tp_group)
        else:
            self.tp_size = get_distributed_world_size(tp_group)
            self.set_tensor_parallel_group(tp_group)
        self.set_nccl_overlap_warning_if_tp()

        self.parallel_mode = parallel_mode
        assert (
            self.parallel_mode in GemmParallelModes
        ), f"parallel_mode {parallel_mode} not supported"

        if self.parallel_mode == "column":
            self.out_features = divide(self.out_features, self.tp_size)
        elif self.parallel_mode == "row":
            self.in_features = divide(self.in_features, self.tp_size)

        self.sequence_parallel = (self.tp_size > 1) and sequence_parallel

786
787
788
789
790
791
792
793
        # Contiguous buffers for params
        weight_tensor = torch.empty(
            self.out_features,
            self.in_features,
            device=device,
            dtype=params_dtype,
        )
        bias_tensor = None
794
        if self.use_bias:
795
796
797
798
799
            bias_tensor = torch.empty(
                self.out_features,
                device=device,
                dtype=params_dtype,
            )
800

801
802
803
804
        # Configure parameter splits
        self.weight_names = []
        self.bias_names = []
        self.parameter_split_sizes = []
805
        if parameters_split is None:
806
807
808
809
810
811
            # Split into a single parameter by default
            self.weight_names = ["weight"]
            self.bias_names = ["bias"]
            self.parameter_split_sizes = [out_features]
        elif not parameters_split:
            raise ValueError("Cannot split weight buffer into 0 parameters")
cyanguwa's avatar
cyanguwa committed
812
        elif isinstance(parameters_split, dict):
813
814
815
816
817
818
819
820
821
822
823
824
            # Split parameters with provided sizes
            for name, split_size in parameters_split.items():
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
        elif all(isinstance(name, str) for name in parameters_split):
            # Split parameters evenly
            split_size = out_features // len(parameters_split)
            for name in parameters_split:
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
cyanguwa's avatar
cyanguwa committed
825
        else:
826
            raise TypeError("Invalid configuration for parameters split")
827

828
829
830
831
832
833
        # Make sure parameter splits are valid
        if sum(self.parameter_split_sizes) != out_features:
            raise ValueError(
                f"Trying to split weight buffer ({out_features=}) "
                f"with split sizes {self.parameter_split_sizes}"
            )
834

835
836
837
838
839
840
841
842
843
844
        # Adjust parameter splits for tensor-parallel distribution
        if self.parallel_mode == "column":
            for i, size in enumerate(self.parameter_split_sizes):
                if size % self.tp_size != 0:
                    raise RuntimeError(
                        f"Attempting to distribute a parameter with out_features={size} "
                        f"between {self.tp_size} tensor-parallel processes"
                    )
                self.parameter_split_sizes[i] = size // self.tp_size

845
846
847
848
849
        # Construct weight parameters
        # Note: Register weights together so that they are adjacent to
        # each other in Linear.parameters(). This makes it more likely
        # that they will stay contiguous if the weights are
        # manipulated externally, e.g. by FSDP.
850
851
852
853
854
855
856
857
858
859
860
861
862
        offset = 0
        for i, split_size in enumerate(self.parameter_split_sizes):
            split_start = offset
            offset += split_size
            split_end = offset

            # Check if parameters are subviews of buffers
            is_subview = (split_start, split_end) != (0, self.out_features)
            if is_subview and self.primary_weights_in_fp8:
                raise RuntimeError(
                    "Splitting Float8Tensor into multiple params "
                    "is not supported"
                )
863

864
            # Construct weight parameter
865
866
867
868
869
870
871
            self.register_parameter(
                self.weight_names[i],
                torch.nn.Parameter(weight_tensor[split_start:split_end]),
                init_fn=init_method,
                get_rng_state_tracker=get_rng_state_tracker,
                fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
            )
872

873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
        # Construct bias parameters if needed
        if self.use_bias:
            offset = 0
            for i, split_size in enumerate(self.parameter_split_sizes):
                split_start = offset
                offset += split_size
                split_end = offset
                self.register_parameter(
                    self.bias_names[i],
                    torch.nn.Parameter(bias_tensor[split_start:split_end]),
                    init_fn=init_method_constant(0.0),
                )
        else:
            for name in self.bias_names:
                bias = torch.Tensor().to(dtype=params_dtype, device=device)
                setattr(self, name, bias)
cyanguwa's avatar
cyanguwa committed
889

890
891
892
893
894
        if self.primary_weights_in_fp8:
            self.init_fp8_metadata()

        self.reset_parameters(defer_init=(device == 'meta'))

895
896
897
898
899
900
901
902
903
        self.fp8_weight_shapes.append(torch.Size((self.out_features, self.in_features)))

        # For RPL, bias has to be added after TP collectives
        # So it cannot be fused with the GEMM
        if self.parallel_mode == "row" and self.apply_bias:
            self.gemm_bias_unfused_add = True
        else:
            self.gemm_bias_unfused_add = False

904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
    def reset_parameters(self, defer_init=False):
        super().reset_parameters(defer_init=defer_init)

        if not defer_init:
            # Set parallelism attributes for linear weights
            for weight in self.weight_names:
                set_tensor_model_parallel_attributes(
                    tensor=getattr(self, weight),
                    is_parallel=True,
                    dim=1 if self.parallel_mode == "row" else 0,
                    stride=1,
                )

            # Set parallelism attributes for linear biases
            if self.use_bias:
                for bias in self.bias_names:
                    if self.parallel_mode == "row":
                        setattr(getattr(self, bias), "sequence_parallel", self.sequence_parallel)
                    elif self.parallel_mode == "column":
                        set_tensor_model_parallel_attributes(getattr(self, bias), True, 0, 1)

925
926
927
    def get_fp8_weights_scratchpad(
        self,
        is_first_microbatch: Union[bool, None],
928
    ) -> List[Float8Tensor]:
929
930
931
932
933
        """
        Fetch the fp8 weight tensor placeholders if they exist (when
        `is_first_microbatch` is not `None`) or return empty fp8 weight
        tensors (if `is_first_microbatch is None`)
        """
934
        if not self.fp8 or self.primary_weights_in_fp8:
935
936
937
938
939
940
941
942
943
944
945
946
947
948
            return [None, None]

        if is_first_microbatch is None:
            # Return empty weight placeholders for each fwd/bwd pass
            fp8_weight_tensors = self.get_fp8_weights_empty_tensors(
                is_first_microbatch
            )
        else:
            # These persistent weight placeholders should've been created in
            # `set_fp8_weights` method
            fp8_weight_tensors = [self.weight1_fp8, self.weight1_t_fp8]

        return fp8_weight_tensors

949
    @no_torch_dynamo()
950
951
952
953
    def forward(
        self,
        inp: torch.Tensor,
        is_first_microbatch: Optional[bool] = None,
954
        is_first_module_in_mha: Optional[bool] = False,
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
    ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
        """
        Apply the linear transformation to the input.

        Parameters
        ----------
        inp : torch.Tensor
             Input tensor.
        is_first_microbatch : {True, False, None}, default = None
                             During training using either gradient accumulation or
                             pipeline parallelism a minibatch of data is further split
                             into microbatches. Between the microbatches of the same minibatch
                             the model weights are not updated. Setting this parameter indicates
                             whether the current microbatch is the first in a minibatch or not.
                             When set, this parameter enables additional optimizations:

                             * during FP8 training, it allows caching of the FP8 versions of
                               the weights
                             * it also allows skipping gradient accumulation during the
                               first microbatch (since it is the first gradient being
                               produced)
        """

978
979
980
981
        skip_fp8_weight_update = FP8GlobalStateManager.get_skip_fp8_weight_update_tensor()
        if skip_fp8_weight_update is not None:
            is_first_microbatch = False

982
983
984
        with self.prepare_forward(inp,
            is_first_microbatch,
            allow_non_contiguous=isinstance(inp,Float8Tensor)) as inp:
985
986
            assert self.fp8 or not self.primary_weights_in_fp8, \
                   "Need to run inside fp8_autocast region when weights are stored in FP8."
987

988
989
            is_first_module_in_mha = is_first_module_in_mha and self.fp8_meta["recipe"].fp8_mha

990
            # Get concatenated weight and bias tensors
991
992
993
994
995
996
            weight_tensor = _noop_cat(
                [getattr(self, name) for name in self.weight_names],
            )
            if self.use_bias:
                bias_tensor = _noop_cat(
                    [getattr(self, name) for name in self.bias_names],
997
998
                )
            else:
999
                bias_tensor = getattr(self, self.bias_names[0])  # Unused
1000

1001
1002
1003
1004
1005
            # Fetch the fp8 weights placeholders (for linear/gemm)
            weight1_fp8, weight1_t_fp8 = self.get_fp8_weights_scratchpad(
                is_first_microbatch
            )

1006
1007
            from ..cpu_offload import CPUOffloadEnabled

1008
1009
1010
1011
1012
1013
1014
1015
            if torch.is_grad_enabled():
                linear_fn = _Linear.apply
                args = []
            else:
                linear_fn = _Linear.forward
                args = [None]
            args += (
                weight_tensor,
1016
1017
                weight1_fp8,
                weight1_t_fp8,
1018
1019
1020
1021
                inp,
                bias_tensor,
                self.apply_bias and not self.gemm_bias_unfused_add,
                is_first_microbatch,
1022
                skip_fp8_weight_update,
1023
1024
1025
1026
                self.fp8,
                self.fp8_calibration,
                self.fp8_meta,
                self.fuse_wgrad_accumulation,
1027
                CPUOffloadEnabled,
1028
1029
1030
1031
1032
1033
1034
                self.tp_group,
                self.tp_size,
                self.sequence_parallel,
                self.tp_size > 1,
                self.activation_dtype,
                self.parallel_mode,
                torch.is_grad_enabled(),
1035
                self.primary_weights_in_fp8,
1036
1037
                self.ub_overlap_rs,
                self.ub_overlap_ag,
1038
                self.ub_name,
1039
                is_first_module_in_mha,
1040
1041
1042
1043
1044
1045
1046
1047
1048
            )
            out = linear_fn(*args)

        if self.gemm_bias_unfused_add:
            out = out + cast_if_needed(bias_tensor, self.activation_dtype)

        if self.return_bias:
            return out, cast_if_needed(bias_tensor, self.activation_dtype)
        return out