linear.py 42.1 KB
Newer Older
1
# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.

"""Linear API"""
6
import os
7
from typing import Any, Callable, Dict, Optional, Tuple, Union
8
9
10

import torch

11
import transformer_engine_torch as tex
12
13
14
15
16
17
18
19
20

from .base import (
    get_workspace,
    get_ub,
    TransformerEngineBaseModule,
    _2X_ACC_FPROP,
    _2X_ACC_DGRAD,
    _2X_ACC_WGRAD,
)
21
from ._common import _noop_cat
22
from ..fp8 import get_fp8_te_dtype, FP8GlobalStateManager
23
24
25
from ..utils import (
    divide,
    cast_if_needed,
26
    assert_dim_for_fp8_exec,
27
    clear_tensor_data,
28
    init_method_constant,
29
    requires_grad,
30
31
32
33
34
35
36
)
from ..distributed import (
    set_tensor_model_parallel_attributes,
    get_distributed_world_size,
    allreduce,
    reduce_scatter_along_first_dim,
    gather_along_first_dim,
37
38
    is_fp8_activation_recompute_enabled,
    in_fp8_activation_recompute_phase,
39
40
41
42
43
44
45
46
)
from ..cpp_extensions import (
    fp8_gemm,
    gemm,
    fp8_cast_transpose_fused,
    cast_to_fp8,
)
from ..constants import GemmParallelModes, dist_group_type
47
from ..jit import no_torch_dynamo
48
from ..graph import is_graph_capturing
49
50
from ..float8_tensor import Float8Tensor

51
52
_NVTE_DEBUG = int(os.getenv("NVTE_DEBUG", "0"))

53
54
55
56
57
58
59
60
61
62
63
__all__ = ["Linear"]


class _Linear(torch.autograd.Function):
    """Linear semi-top level module
    Calls custom cuda extensions.
    """

    @staticmethod
    def forward(
        ctx,
64
        weight: Union[Float8Tensor, torch.Tensor],
65
        weight_fp8: Optional[Float8Tensor],
66
67
68
69
70
71
72
73
        inp: torch.Tensor,
        bias: torch.Tensor,
        use_bias: bool,
        is_first_microbatch: Union[bool, None],
        fp8: bool,
        fp8_calibration: bool,
        fp8_meta: Dict[str, Any],
        fuse_wgrad_accumulation: bool,
74
        cpu_offloading: bool,
75
76
77
78
79
80
81
        tp_group: Union[dist_group_type, None],
        tp_size: int,
        sequence_parallel: bool,
        tensor_parallel: bool,
        activation_dtype: torch.dtype,
        parallel_mode: Union[str, None],
        is_grad_enabled: bool,
82
83
        ub_overlap_rs: bool,
        ub_overlap_ag: bool,
84
        ub_name: str,
85
        is_first_module_in_mha: bool,
86
    ) -> torch.Tensor:
87
88
89
90
        is_input_fp8 = isinstance(inp, Float8Tensor)
        if is_input_fp8:
            fp8_meta["scaling_fwd"].scale_inv[tex.FP8FwdTensors.GEMM1_INPUT] = inp._scale_inv[0]

91
92
93
        # Make sure input dimensions are compatible
        in_features = weight.shape[-1]
        assert inp.shape[-1] == in_features, "GEMM not possible"
94
        inputmat = inp.view(-1, in_features)
95
        if fp8:
96
97
            assert_dim_for_fp8_exec(inputmat)
            assert_dim_for_fp8_exec(weight)
98

99
100
        tp_world_size = get_distributed_world_size(tp_group)
        ub_overlap_rs = False if tp_world_size == 1 else ub_overlap_rs
101
102

        # Cast input to expected dtype
103
        inputmat = cast_if_needed(inputmat, activation_dtype)
104
        inputmat_t = None
105
        inputmat_no_fp8 = inputmat
106

107
108
        if fp8:
            fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
109
110
111
112
113
114
115
116
117
            if isinstance(inputmat, Float8Tensor):
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat_t = inputmat.transpose_2d()
118
            else:
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
                if (
                    not fp8_meta["recipe"].override_linear_precision.wgrad
                    and is_grad_enabled
                    and weight.requires_grad
                    and not sequence_parallel
                ):
                    # FP8 input for forward, FP8 input transpose for backward wgrad
                    inputmat, inputmat_t = fp8_cast_transpose_fused(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
                    )
                else:
                    # FP8 input for forward
                    inputmat = cast_to_fp8(
                        inputmat,
                        fp8_meta["scaling_fwd"],
                        tex.FP8FwdTensors.GEMM1_INPUT,
                        fp8_dtype_forward,
                    )
140
141
142
143
144
145
146

        # Column Parallel Linear
        if parallel_mode == "column" and sequence_parallel:
            inputmat_total, _ = gather_along_first_dim(inputmat, tp_group)
        else:
            inputmat_total = inputmat
        if fp8:
147
148
149
            if _NVTE_DEBUG:
                print('[Linear]: using FP8 forward')

150
151
152
153
154
155
156
            bias_dtype = (
                torch.bfloat16
                if activation_dtype == torch.float32
                else activation_dtype
            )
            bias = cast_if_needed(bias, bias_dtype) if use_bias else bias

157
158
            # Use FP8 weights
            if weight_fp8 is None:
159
                weight_fp8 = weight
160
            assert isinstance(weight_fp8, Float8Tensor)
161

162
163
164
165
166
167
168
169
170
171
            if is_first_module_in_mha:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
                    tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_meta["scaling_fwd"],
                    fp8_dtype_forward,
                    torch.uint8)
            else:
                proj_out_index, meta_tensor, proj_out_tetype, proj_out_pttype = (
                    None, None, None, activation_dtype)

172
            if ub_overlap_rs:
173
                ub_obj_projout = get_ub(ub_name+"_fprop")
174
175
176
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
                dim_size[0] = dim_size[0] // tp_world_size
177
                dim_size[1] = weight_fp8.size(0)
178
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
179
180
181
182
183
184
185
186
187
188
                if ub_obj_projout.is_p2p_overlap():
                    if ub_obj_projout.is_atomic_gemm():
                        ub_algo=tex.UbufOverlapAlgo.ATOMIC_GEMM_RS_P2P
                    else:
                        ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS_P2P
                else:
                    if ub_obj_projout.is_atomic_gemm():
                        ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_RS
                    else:
                        ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS
189
190
191
192
193
194
                if ub_obj_projout.is_fp8_ubuf():
                    proj_out_index = tex.FP8FwdTensors.GEMM1_OUTPUT
                    meta_tensor = fp8_meta["scaling_fwd"]
                    proj_out_tetype = fp8_dtype_forward
                    proj_out_pttype = torch.uint8
                    ub_obj_projout.set_ubuf_scale_inv(meta_tensor.scale_inv[proj_out_index])
195
196
            else:
                dim_size = list(inputmat_total.size())
197
                dim_size[1] = weight_fp8.size(0)
198
                out = torch.empty(dim_size, dtype=proj_out_pttype, device=inputmat_total.device)
199
200

            _ = fp8_gemm(
201
                weight_fp8._data,
202
203
204
                weight_fp8._scale_inv,
                0,
                weight_fp8._fp8_dtype,
205
206
                inputmat_total._data
                if isinstance(inputmat_total, Float8Tensor) else inputmat_total,
207
208
209
                fp8_meta["scaling_fwd"].scale_inv,
                tex.FP8FwdTensors.GEMM1_INPUT,
                fp8_dtype_forward,
210
                proj_out_pttype,
211
212
213
214
215
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                use_split_accumulator=_2X_ACC_FPROP,
                out=out,
216
217
218
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
219
220
221
                out_index=proj_out_index,
                fp8_meta_tensor = meta_tensor,
                D_dtype = proj_out_tetype,
222
            )
223
224
225
226
227
228
229
230
            if is_first_module_in_mha:
                out = Float8Tensor(data=out,
                    fp8_meta=fp8_meta,
                    fp8_meta_forward=True,
                    fp8_meta_index=tex.FP8FwdTensors.GEMM1_OUTPUT,
                    fp8_dtype=fp8_dtype_forward,
                    dtype=activation_dtype,
                )
231
        else:
232
233
234
            if _NVTE_DEBUG:
                print('[Linear]: using non-FP8 forward')

235
236
237
238
239
240
            # Cast for native AMP
            weight = cast_if_needed(weight, activation_dtype)
            bias = cast_if_needed(bias, activation_dtype) if use_bias else bias

            if fp8_calibration:
                # amax of input
241
                amin, amax = inputmat_total.aminmax()
242
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = \
243
                    torch.max(-amin, amax).float()
244
                # amax of weight
245
                amin, amax = weight.aminmax()
246
                fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = \
247
                    torch.max(-amin, amax).float()
248

249
250
            if ub_overlap_rs:
                ub_obj_projout = get_ub(ub_name+"_fprop")
251
252
                out = ub_obj_projout.get_ubuf_output(1)
                dim_size = list(inputmat_total.size())
253
                dim_size[0] = dim_size[0] // get_distributed_world_size(tp_group)
254
255
                dim_size[1] = weight.size(0)
                rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
256
257
258
259
                if ub_obj_projout.is_p2p_overlap():
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS_P2P
                else:
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS
260
261
262
263
264
            else:
                dim_size = list(inputmat_total.size())
                dim_size[1] = weight.size(0)
                out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)

265
            _ = gemm(
266
267
268
269
270
271
272
                weight,
                inputmat_total,
                activation_dtype,
                get_workspace(),
                bias=bias,
                use_bias=use_bias,
                out=out,
273
274
275
                ub_algo=ub_algo if ub_overlap_rs else None,
                ub=ub_obj_projout if ub_overlap_rs else None,
                extra_output_tensor=rs_out if ub_overlap_rs else None,
276
277
278
            )

        if is_grad_enabled:
279
280
281
282
283
284
285
286
            saved_inputmat = None
            saved_inputmat_t = None
            if weight.requires_grad:
                if fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad:
                    if inputmat_t is None:
                        saved_inputmat = inputmat
                    else:
                        saved_inputmat_t = inputmat_t
287
288
                        if cpu_offloading:
                            saved_inputmat_t.activation_offloading = True
289
290
                else:
                    saved_inputmat = inputmat_no_fp8
291
292
293
294

                if cpu_offloading:
                    if fuse_wgrad_accumulation:
                        weight.main_grad.weight_offloading = True
295
296
                    if fp8 and weight_fp8 is not None:
                        weight_fp8.weight_offloading = True
297
298
299
300
301
                    weight.weight_offloading = True

                    if saved_inputmat is not None:
                        saved_inputmat.activation_offloading = True

302
            ctx.save_for_backward(
303
304
                saved_inputmat,
                saved_inputmat_t,
305
                weight,
306
                weight_fp8,
307
                weight.main_grad if cpu_offloading and fuse_wgrad_accumulation else None,
308
309
310
311
312
313
                fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None,
            )
            ctx.activation_dtype = activation_dtype
            ctx.fp8 = fp8
            ctx.fp8_meta = fp8_meta
            ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
314
            ctx.cpu_offloading = cpu_offloading
315
316
317
318
319
320
321
            ctx.is_first_microbatch = is_first_microbatch
            ctx.use_bias = use_bias
            ctx.sequence_parallel = sequence_parallel
            ctx.tensor_parallel = tensor_parallel
            ctx.inp_shape = inp.shape
            ctx.parallel_mode = parallel_mode
            ctx.tp_group = tp_group
322
            ctx.ub_overlap_ag = ub_overlap_ag
323
            ctx.ub_name = ub_name
324
325
            ctx.tp_size = tp_size
            ctx.requires_dgrad = inp.requires_grad
326
            ctx.is_input_fp8 = is_input_fp8
327
328
329
330
331
            ctx.reduce_and_update_bwd_fp8_tensors = False
            if ctx.fp8 and requires_grad(inp, weight, bias):
                ctx.reduce_and_update_bwd_fp8_tensors = (
                    ctx.reduce_and_update_bwd_fp8_tensors or
                    FP8GlobalStateManager.is_first_fp8_module())
332
333

        # Row Parallel Linear
334
        if ub_overlap_rs:
335
336
337
338
339
340
341
342
343
344
345
346
347
348
            out = rs_out
        elif parallel_mode == "row" and sequence_parallel:
            out, _ = reduce_scatter_along_first_dim(out, tp_group)
        elif parallel_mode == "row" and tensor_parallel:
            out, _ = allreduce(out, tp_group)

        # [*, in_features] -> [*, out_features] except first dimension changes for SP
        return out.view(-1, *inp.shape[1:-1], out.shape[-1])


    @staticmethod
    def backward(
        ctx, grad_output: torch.Tensor
    ) -> Tuple[Union[torch.Tensor, None], ...]:
349
        if isinstance(grad_output, Float8Tensor):
350
351
352
            ctx.fp8_meta["scaling_bwd"].scale_inv[
                tex.FP8BwdTensors.GRAD_OUTPUT1] = grad_output._scale_inv

353
        with torch.cuda.nvtx.range("_Linear_backward"):
354
355
356
357
            (
                inputmat,
                inputmat_t,
                weight,
358
                weight_fp8,
359
                main_grad,
360
361
                fwd_scale_inverses,
            ) = ctx.saved_tensors
362

363
364
365
366
            if ctx.cpu_offloading and ctx.fuse_wgrad_accumulation:
                weight = torch.nn.Parameter(weight, False)
                weight.main_grad = main_grad

367
368
369
            tp_world_size = get_distributed_world_size(ctx.tp_group)
            ctx.ub_overlap_ag = False if tp_world_size == 1 else ctx.ub_overlap_ag
            if ctx.ub_overlap_ag:
370
371
                dim_size = list(grad_output.size())
                dim_size[0] = dim_size[0] * tp_world_size
372
                ctx.ub_obj_gradout = get_ub(ctx.ub_name+"_dgrad")
373
374
375
376
                if ctx.ub_obj_gradout.is_atomic_gemm():
                    ub_algo = tex.UbufOverlapAlgo.ATOMIC_GEMM_AG_P2P
                else:
                    ub_algo = tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG_P2P
377

378
379
380
381
382
383
384
385
386
387
388
            (
                grad_output,
                grad_output_c,
                grad_output_t,
                grad_bias,
            ) = TransformerEngineBaseModule.grad_output_preprocess(
                ctx, grad_output, ctx.parallel_mode == "row"
            )

            # Column Parallel Linear
            # Overlap input AG with dgrad
389
390
391
            inputmat_total = None
            inputmat_t_total = None
            handle = None
392
            if weight.requires_grad and ctx.parallel_mode == "column" and ctx.sequence_parallel:
393
394
395
                inputmat_total, handle = gather_along_first_dim(
                    inputmat, ctx.tp_group, async_op=ctx.requires_dgrad
                )
396
397
            else:
                inputmat_total = inputmat
398
                inputmat_t_total = inputmat_t
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416

            if ctx.is_first_microbatch is not None:
                accumulate_wgrad_into_param_main_grad = (
                    ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
                )
            else:
                accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation

            if ctx.fp8:
                fp8_dtype_forward = get_fp8_te_dtype(
                    ctx.fp8_meta["recipe"], fprop_tensor=True
                )
                fp8_dtype_backward = get_fp8_te_dtype(
                    ctx.fp8_meta["recipe"], fprop_tensor=False
                )

            if ctx.requires_dgrad:
                if ctx.fp8:
417
418
419
420
421
422
423
424
425
426
427
428
                    if _NVTE_DEBUG:
                        print('[Linear]: using FP8 backward')

                    if ctx.is_input_fp8:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
                            tex.FP8BwdTensors.GRAD_INPUT1,
                            ctx.fp8_meta["scaling_bwd"],
                            fp8_dtype_backward,
                            torch.uint8)
                    else:
                        out_index, meta_tensor, output_te_dtype, output_dtype = (
                            None, None, None, ctx.activation_dtype)
429
                    dgrad, _ = fp8_gemm(
430
431
432
433
                        weight_fp8.transpose_2d(),
                        weight_fp8._scale_inv,
                        0,
                        weight_fp8._fp8_dtype,
434
435
436
437
                        grad_output_c,
                        ctx.fp8_meta["scaling_bwd"].scale_inv,
                        tex.FP8BwdTensors.GRAD_OUTPUT1,
                        fp8_dtype_backward,
438
                        output_dtype,
439
440
                        get_workspace(),
                        use_split_accumulator=_2X_ACC_DGRAD,
441
442
                        ub_algo=ub_algo if ctx.ub_overlap_ag else None,
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
443
444
445
                        out_index=out_index,
                        fp8_meta_tensor=meta_tensor,
                        D_dtype=output_te_dtype,
446
                    )
447
448
449
450
451
452
453
454
                    if output_dtype == torch.uint8:
                        dgrad = Float8Tensor(data=dgrad,
                            fp8_meta=ctx.fp8_meta,
                            fp8_meta_forward=False,
                            fp8_meta_index=tex.FP8BwdTensors.GRAD_INPUT1,
                            fp8_dtype=fp8_dtype_backward,
                            dtype=ctx.activation_dtype,
                            )
455
                else:
456
457
458
                    if _NVTE_DEBUG:
                        print('[Linear]: using non-FP8 backward')

459
460
461
462
463
464
465
                    dgrad, _, _ = gemm(
                        weight,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NN",
                        grad=True,
466
467
468
                        ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG_P2P \
                            if ctx.ub_overlap_ag else None,
                        ub=ctx.ub_obj_gradout if ctx.ub_overlap_ag else None,
469
470
471
472
                    )

                # Overlap dgrad-RS/AR with wgrad
                if ctx.parallel_mode == "column" and ctx.sequence_parallel:
473
474
                    if handle is not None:
                        handle.wait()
475
476
477
478
479
480
481
482
483
484
                    dgrad, handle = reduce_scatter_along_first_dim(
                        dgrad, ctx.tp_group, async_op=True
                    )
                elif ctx.parallel_mode == "column" and ctx.tensor_parallel:
                    dgrad, handle = allreduce(dgrad, ctx.tp_group, async_op=True)

            if weight.requires_grad:
                if ctx.fp8:
                    # WGRAD
                    if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
485
                        if ctx.ub_overlap_ag:
486
487
488
489
                            if isinstance(grad_output_c, Float8Tensor):
                                grad_output_t = grad_output_c.transpose_2d()
                            else:
                                grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
490
                        if inputmat_t_total is None:
491
492
493
494
495
                            if isinstance(inputmat_total, Float8Tensor):
                                inputmat_t_total = inputmat_total.transpose_2d()
                            else:
                                inputmat_t_total = tex.fp8_transpose(
                                    inputmat_total, fp8_dtype_backward)
496
                        wgrad, _ = fp8_gemm(
497
498
                            inputmat_t_total._data
                            if isinstance(inputmat_t_total, Float8Tensor) else inputmat_t_total,
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
                            fwd_scale_inverses,
                            tex.FP8FwdTensors.GEMM1_INPUT,
                            fp8_dtype_forward,
                            grad_output_t,
                            ctx.fp8_meta["scaling_bwd"].scale_inv,
                            tex.FP8BwdTensors.GRAD_OUTPUT1,
                            fp8_dtype_backward,
                            ctx.activation_dtype,
                            get_workspace(),
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                            use_split_accumulator=_2X_ACC_WGRAD,
                        )
                    else:
                        wgrad, _, _ = gemm(
                            inputmat_total,
                            grad_output,
                            ctx.activation_dtype,
                            get_workspace(),
                            layout="NT",
                            grad=True,
                            accumulate=accumulate_wgrad_into_param_main_grad,
                            out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                        )
                else:
                    # WGRAD
                    wgrad, grad_bias, _ = gemm(
                        inputmat_total,
                        grad_output,
                        ctx.activation_dtype,
                        get_workspace(),
                        layout="NT",
                        grad=True,
                        use_bias=ctx.use_bias,
                        accumulate=accumulate_wgrad_into_param_main_grad,
                        out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
                    )
536
537
538
539

                # Deallocate input tensor
                clear_tensor_data(inputmat_total)
                clear_tensor_data(inputmat_t_total)
540
541
542
543
544
545
546
547

            # Column Parallel Linear
            if ctx.parallel_mode == "column" and ctx.tensor_parallel and handle is not None:
                handle.wait()

            if not ctx.use_bias:
                grad_bias = None

548
549
550
551
        if weight.requires_grad:
            # Handle custom DDP from mcore.
            if ctx.fuse_wgrad_accumulation and hasattr(weight, 'grad_added_to_main_grad'):
                weight.grad_added_to_main_grad = True
552
553
554
555
556
557
558
559
560
561
562
563
                if getattr(weight, 'zero_out_wgrad', False):
                    wgrad = torch.zeros(weight.main_grad.shape,
                                        dtype=weight.dtype,
                                        device=torch.cuda.current_device(),
                                        requires_grad=False
                                       )
                else:
                    wgrad = torch.empty(weight.main_grad.shape,
                                        dtype=weight.dtype,
                                        device=torch.cuda.current_device(),
                                        requires_grad=False
                                       )
564
565
566
567
            elif ctx.fuse_wgrad_accumulation:
                wgrad = None
        else:
            wgrad = None
568

569
        if ctx.reduce_and_update_bwd_fp8_tensors and not is_graph_capturing():
570
571
            FP8GlobalStateManager.reduce_and_update_fp8_tensors(forward=False)

572
        return (
573
            wgrad,
574
            None,  # weight_fp8
575
576
            dgrad.view(ctx.inp_shape) if ctx.requires_dgrad else None,
            grad_bias,
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
            None,  # use_bias
            None,  # is_first_microbatch
            None,  # fp8
            None,  # fp8_calibration
            None,  # fp8_meta
            None,  # fuse_wgrad_accumulation
            None,  # cpu_offloading
            None,  # tp_group
            None,  # tp_size
            None,  # sequence_parallel
            None,  # tensor_parallel
            None,  # activation_dtype
            None,  # parallel_mode
            None,  # is_grad_enabled
            None,  # ub_overlap_rs
            None,  # ub_overlap_ag
            None,  # ub_name
            None,  # is_first_module_in_mha
595
596
597
598
        )


class Linear(TransformerEngineBaseModule):
599
    """Applies a linear transformation to the incoming data :math:`y = xA^T + b`
600
601
602
603
604
605
606
607
608
609
610
611
612
613

    On NVIDIA GPUs it is a drop-in replacement for `torch.nn.Linear`.

    Parameters
    ----------
    in_features : int
                 size of each input sample.
    out_features : int
                  size of each output sample.
    bias : bool, default = `True`
          if set to `False`, the layer will not learn an additive bias.
    init_method : Callable, default = `None`
                 used for initializing weights in the following way: `init_method(weight)`.
                 When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
614
615
616
617
    get_rng_state_tracker : Callable, default = `None`
                 used to get the random number generator state tracker for initilizeing weights.
    rng_tracker_name : str, default = `None`
                 the param passed to get_rng_state_tracker to get the specific rng tracker.
cyanguwa's avatar
cyanguwa committed
618
    parameters_split : Optional[Union[Tuple[str, ...], Dict[str, int]]], default = None
619
620
621
622
623
624
625
                      Configuration for splitting the weight and bias tensors along dim 0 into
                      multiple PyTorch parameters. If a list or tuple of strings is provided,
                      they are used to make the names of equally-sized parameters. If a dict
                      (preferably an OrderedDict) is provided, the keys are used as names and
                      values as split sizes along dim 0. The resulting parameters will have
                      names that end in `_weight` or `_bias`, so trailing underscores are
                      stripped from any provided names.
626
627
628
629
    device : Union[torch.device, str], default = "cuda"
          The device on which the parameters of the model will allocated. It is the user's
          responsibility to ensure all parameters are moved to the GPU before running the
          forward pass.
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660

    Parallelism parameters
    ----------------------
    sequence_parallel : bool, default = `False`
                       if set to `True`, uses sequence parallelism.
    tp_group : ProcessGroup, default = `None`
              tensor parallel process group.
    tp_size : int, default = 1
             used as TP (tensor parallel) world size when TP groups are not formed during
             initialization. In this case, users must call the
             `set_tensor_parallel_group(tp_group)` method on the initialized module before the
             forward pass to supply the tensor parallel group needed for tensor and sequence
             parallel collectives.
    parallel_mode : {None, 'Column', 'Row'}, default = `None`
                   used to decide whether this Linear layer is Column Parallel Linear or Row
                   Parallel Linear as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
                   When set to `None`, no communication is performed.

    Optimization parameters
    -----------------------
    fuse_wgrad_accumulation : bool, default = 'False'
                             if set to `True`, enables fusing of creation and accumulation of
                             the weight gradient. When enabled, it is assumed that the weights
                             have an additional `main_grad` attribute (used instead of the
                             regular `grad`) which is a pre-allocated buffer of the correct
                             size to accumulate gradients in.
    return_bias : bool, default = `False`
                 when set to `True`, this module will not apply the additive bias itself, but
                 instead return the bias value during the forward pass together with the
                 output of the linear transformation :math:`y = xA^T`. This is useful when
                 the bias addition can be fused to subsequent operations.
661
    params_dtype : torch.dtype, default = `torch.get_default_dtype()`
662
663
664
                  it controls the type used to allocate the initial parameters. Useful when
                  the model is trained with lower precision and the original FP32 parameters
                  would not fit in GPU memory.
665

666
667
668
669
670
671
672
673
674
675
676
    """

    def __init__(
        self,
        in_features: int,
        out_features: int,
        sequence_parallel: bool = False,
        fuse_wgrad_accumulation: bool = False,
        tp_group: Optional[dist_group_type] = None,
        tp_size: int = 1,
        get_rng_state_tracker: Optional[Callable] = None,
677
        rng_tracker_name: Optional[str] = None,
678
679
680
        init_method: Optional[Callable] = None,
        bias: bool = True,
        return_bias: bool = False,
681
        params_dtype: Optional[torch.dtype] = None,
682
        parallel_mode: Optional[str] = None,
cyanguwa's avatar
cyanguwa committed
683
        parameters_split: Optional[Union[Tuple[str, ...], Dict[str, int]]] = None,
684
        device: Union[torch.device, str] = "cuda",
685
686
        ub_overlap_rs: bool = False,
        ub_overlap_ag: bool = False,
687
        ub_name: Optional[str] = None,
688
689
    ) -> None:
        super().__init__()
690
691

        params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
692
693
694
695
696
697
        self.in_features = in_features
        self.out_features = out_features
        self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
        self.use_bias = bias
        self.return_bias = return_bias
        self.apply_bias = bias and not return_bias
698
699
700
        self.ub_overlap_rs = ub_overlap_rs
        self.ub_overlap_ag = ub_overlap_ag
        if ub_overlap_rs or ub_overlap_ag:
701
            assert ub_name is not None, "Userbuffer name [string] is not set."
702
703
704
            assert (
                tex.userbuf_comm_available()
            ), "Userbuffer communication backend not available."
705
        self.ub_name = ub_name
706
        self.get_rng_state_tracker = get_rng_state_tracker
707
708
        self.rng_tracker_name = rng_tracker_name

709
710
711
        if device == 'meta':
            assert parameters_split is None, ("Cannot split module parameters "
                                              "on 'meta' device.")
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
        if tp_group is None:
            self.tp_size = tp_size
            if tp_size == 1:
                self.set_tensor_parallel_group(tp_group)
        else:
            self.tp_size = get_distributed_world_size(tp_group)
            self.set_tensor_parallel_group(tp_group)
        self.set_nccl_overlap_warning_if_tp()

        self.parallel_mode = parallel_mode
        assert (
            self.parallel_mode in GemmParallelModes
        ), f"parallel_mode {parallel_mode} not supported"

        if self.parallel_mode == "column":
            self.out_features = divide(self.out_features, self.tp_size)
        elif self.parallel_mode == "row":
            self.in_features = divide(self.in_features, self.tp_size)

        self.sequence_parallel = (self.tp_size > 1) and sequence_parallel

733
734
735
        # Initialize params in FP8
        with_fp8_params = FP8GlobalStateManager.with_fp8_parameters()

736
737
738
739
740
741
742
743
        # Contiguous buffers for params
        weight_tensor = torch.empty(
            self.out_features,
            self.in_features,
            device=device,
            dtype=params_dtype,
        )
        bias_tensor = None
744
        if self.use_bias:
745
746
747
748
749
            bias_tensor = torch.empty(
                self.out_features,
                device=device,
                dtype=params_dtype,
            )
750

751
752
753
754
        # Configure parameter splits
        self.weight_names = []
        self.bias_names = []
        self.parameter_split_sizes = []
755
        if parameters_split is None:
756
757
758
759
760
761
            # Split into a single parameter by default
            self.weight_names = ["weight"]
            self.bias_names = ["bias"]
            self.parameter_split_sizes = [out_features]
        elif not parameters_split:
            raise ValueError("Cannot split weight buffer into 0 parameters")
cyanguwa's avatar
cyanguwa committed
762
        elif isinstance(parameters_split, dict):
763
764
765
766
767
768
769
770
771
772
773
774
            # Split parameters with provided sizes
            for name, split_size in parameters_split.items():
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
        elif all(isinstance(name, str) for name in parameters_split):
            # Split parameters evenly
            split_size = out_features // len(parameters_split)
            for name in parameters_split:
                self.weight_names.append(f"{name.rstrip('_')}_weight")
                self.bias_names.append(f"{name.rstrip('_')}_bias")
                self.parameter_split_sizes.append(split_size)
cyanguwa's avatar
cyanguwa committed
775
        else:
776
            raise TypeError("Invalid configuration for parameters split")
777

778
779
780
781
782
783
        # Make sure parameter splits are valid
        if sum(self.parameter_split_sizes) != out_features:
            raise ValueError(
                f"Trying to split weight buffer ({out_features=}) "
                f"with split sizes {self.parameter_split_sizes}"
            )
784

785
786
787
788
789
790
791
792
793
794
        # Adjust parameter splits for tensor-parallel distribution
        if self.parallel_mode == "column":
            for i, size in enumerate(self.parameter_split_sizes):
                if size % self.tp_size != 0:
                    raise RuntimeError(
                        f"Attempting to distribute a parameter with out_features={size} "
                        f"between {self.tp_size} tensor-parallel processes"
                    )
                self.parameter_split_sizes[i] = size // self.tp_size

795
796
797
798
799
        # Construct weight parameters
        # Note: Register weights together so that they are adjacent to
        # each other in Linear.parameters(). This makes it more likely
        # that they will stay contiguous if the weights are
        # manipulated externally, e.g. by FSDP.
800
801
802
803
804
805
806
807
        offset = 0
        for i, split_size in enumerate(self.parameter_split_sizes):
            split_start = offset
            offset += split_size
            split_end = offset

            # Check if parameters are subviews of buffers
            is_subview = (split_start, split_end) != (0, self.out_features)
808
            if is_subview and with_fp8_params:
809
810
811
812
                raise RuntimeError(
                    "Splitting Float8Tensor into multiple params "
                    "is not supported"
                )
813

814
            # Construct weight parameter
815
816
817
818
819
820
821
            self.register_parameter(
                self.weight_names[i],
                torch.nn.Parameter(weight_tensor[split_start:split_end]),
                init_fn=init_method,
                get_rng_state_tracker=get_rng_state_tracker,
                fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
            )
822

823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
        # Construct bias parameters if needed
        if self.use_bias:
            offset = 0
            for i, split_size in enumerate(self.parameter_split_sizes):
                split_start = offset
                offset += split_size
                split_end = offset
                self.register_parameter(
                    self.bias_names[i],
                    torch.nn.Parameter(bias_tensor[split_start:split_end]),
                    init_fn=init_method_constant(0.0),
                )
        else:
            for name in self.bias_names:
                bias = torch.Tensor().to(dtype=params_dtype, device=device)
                setattr(self, name, bias)
cyanguwa's avatar
cyanguwa committed
839

840
        if with_fp8_params:
841
842
843
844
            self.init_fp8_metadata()

        self.reset_parameters(defer_init=(device == 'meta'))

845
846
847
848
849
850
851
        # For RPL, bias has to be added after TP collectives
        # So it cannot be fused with the GEMM
        if self.parallel_mode == "row" and self.apply_bias:
            self.gemm_bias_unfused_add = True
        else:
            self.gemm_bias_unfused_add = False

852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
    def reset_parameters(self, defer_init=False):
        super().reset_parameters(defer_init=defer_init)

        if not defer_init:
            # Set parallelism attributes for linear weights
            for weight in self.weight_names:
                set_tensor_model_parallel_attributes(
                    tensor=getattr(self, weight),
                    is_parallel=True,
                    dim=1 if self.parallel_mode == "row" else 0,
                    stride=1,
                )

            # Set parallelism attributes for linear biases
            if self.use_bias:
                for bias in self.bias_names:
                    if self.parallel_mode == "row":
                        setattr(getattr(self, bias), "sequence_parallel", self.sequence_parallel)
                    elif self.parallel_mode == "column":
                        set_tensor_model_parallel_attributes(getattr(self, bias), True, 0, 1)

873
    @no_torch_dynamo()
874
875
876
877
    def forward(
        self,
        inp: torch.Tensor,
        is_first_microbatch: Optional[bool] = None,
878
        is_first_module_in_mha: Optional[bool] = False,
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
    ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
        """
        Apply the linear transformation to the input.

        Parameters
        ----------
        inp : torch.Tensor
             Input tensor.
        is_first_microbatch : {True, False, None}, default = None
                             During training using either gradient accumulation or
                             pipeline parallelism a minibatch of data is further split
                             into microbatches. Between the microbatches of the same minibatch
                             the model weights are not updated. Setting this parameter indicates
                             whether the current microbatch is the first in a minibatch or not.
                             When set, this parameter enables additional optimizations:

                             * during FP8 training, it allows caching of the FP8 versions of
                               the weights
                             * it also allows skipping gradient accumulation during the
                               first microbatch (since it is the first gradient being
                               produced)
        """

902
903
904
905
        skip_fp8_weight_update = FP8GlobalStateManager.get_skip_fp8_weight_update_tensor()
        if skip_fp8_weight_update is not None:
            is_first_microbatch = False

906
907
        with self.prepare_forward(
            inp,
908
            is_first_microbatch,
909
910
            allow_non_contiguous=isinstance(inp,Float8Tensor),
        ) as inp:
911

912
913
            is_first_module_in_mha = is_first_module_in_mha and self.fp8_meta["recipe"].fp8_mha

914
            # Get concatenated weight and bias tensors
915
916
917
918
919
920
921
922
923
924
925
            unfused_weights = [getattr(self, name) for name in self.weight_names]
            if any(isinstance(w, Float8Tensor) for w in unfused_weights):
                if self.fp8:
                    if len(unfused_weights) != 1:
                        raise RuntimeError(
                            "Splitting Float8Tensor into multiple params "
                            "is not supported"
                        )
                else:
                    unfused_weights = [w.from_float8() for w in unfused_weights]
            weight_tensor = _noop_cat(unfused_weights)
926
927
928
            if self.use_bias:
                bias_tensor = _noop_cat(
                    [getattr(self, name) for name in self.bias_names],
929
930
                )
            else:
931
                bias_tensor = getattr(self, self.bias_names[0])  # Unused
932

933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
            # Initialize FP8 weights if needed
            weight_fp8 = None
            if self.fp8:
                with_transpose = torch.is_grad_enabled()
                if (
                    not with_transpose
                    and is_fp8_activation_recompute_enabled()
                    and not in_fp8_activation_recompute_phase()
                ):
                    with_transpose = True
                if isinstance(weight_tensor, Float8Tensor):
                    # Fill transpose cache in FP8 tensor if needed
                    update_transpose_cache = with_transpose
                    if update_transpose_cache:
                        update_transpose_cache = (
                            is_first_microbatch
                            or skip_fp8_weight_update is not None
                        )
                    if update_transpose_cache:
                        weight_tensor.transpose_2d(
                            fill_cache=True,
                            noop_flag=skip_fp8_weight_update,
                        )
                else:
                    # FP8 cast to workspace buffer
                    update_workspace = (
                        is_first_microbatch is None
                        or is_first_microbatch
                    )
                    weight_fp8 = self.get_fp8_workspace(
                        tensor=weight_tensor,
                        fp8_meta_forward=True,
                        fp8_meta_index=tex.FP8FwdTensors.GEMM1_WEIGHT,
                        cache_name=(None if is_first_microbatch is None else "weight"),
                        update_workspace=update_workspace,
                        skip_update_flag=skip_fp8_weight_update,
                        with_transpose=with_transpose,
                    )
971

972
973
            from ..cpu_offload import CPUOffloadEnabled

974
975
976
977
978
979
980
981
            if torch.is_grad_enabled():
                linear_fn = _Linear.apply
                args = []
            else:
                linear_fn = _Linear.forward
                args = [None]
            args += (
                weight_tensor,
982
                weight_fp8,
983
984
985
986
987
988
989
990
                inp,
                bias_tensor,
                self.apply_bias and not self.gemm_bias_unfused_add,
                is_first_microbatch,
                self.fp8,
                self.fp8_calibration,
                self.fp8_meta,
                self.fuse_wgrad_accumulation,
991
                CPUOffloadEnabled,
992
993
994
995
996
997
998
                self.tp_group,
                self.tp_size,
                self.sequence_parallel,
                self.tp_size > 1,
                self.activation_dtype,
                self.parallel_mode,
                torch.is_grad_enabled(),
999
1000
                self.ub_overlap_rs,
                self.ub_overlap_ag,
1001
                self.ub_name,
1002
                is_first_module_in_mha,
1003
1004
1005
1006
1007
1008
1009
1010
1011
            )
            out = linear_fn(*args)

        if self.gemm_bias_unfused_add:
            out = out + cast_if_needed(bias_tensor, self.activation_dtype)

        if self.return_bias:
            return out, cast_if_needed(bias_tensor, self.activation_dtype)
        return out