base.py 43.6 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.

"""Base modules and utilities for TransformerEngine PyTorch API"""
6
import io
7
8
9
10
import os
import pickle
import warnings
from abc import ABC, abstractmethod
11
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Union
12
13
14
15
16
from contextlib import contextmanager

import torch
import torch.nn.functional as F

17
import transformer_engine_torch as tex
18
19
from transformer_engine.common.recipe import Recipe

20
from ._common import _ParameterInitMeta
21
from ..fp8 import (
22
23
    MXFP8BlockScalingRecipeState,
    DelayedScalingRecipeState,
24
    Float8CurrentScalingRecipeState,
25
    FP8GlobalStateManager,
26
    RecipeState,
27
28
29
30
31
)
from ..distributed import (
    gather_along_first_dim,
    is_fp8_activation_recompute_enabled,
    in_fp8_activation_recompute_phase,
32
    _fsdp_gather_tensors,
33
34
)
from ..constants import dist_group_type
35
36
37
from ..tensor import QuantizedTensor, Quantizer
from ..tensor._internal.float8_tensor_base import Float8TensorBase
from ..tensor._internal.mxfp8_tensor_base import MXFP8TensorBase
yuguo's avatar
yuguo committed
38
from torch.utils.cpp_extension import IS_HIP_EXTENSION
39

40
41
__all__ = ["initialize_ub", "destroy_ub"]

42
43
44
_2X_ACC_FPROP = False
_2X_ACC_DGRAD = True
_2X_ACC_WGRAD = True
45
_multi_stream_cublas_workspace = []
yuguo's avatar
yuguo committed
46
_multi_stream_cublas_batchgemm_workspace = []
47
48
49
_cublas_workspace = None
_ub_communicators = None
_NUM_MAX_UB_STREAMS = 3
50
_MIN_STREAM_PRIORITY, _MAX_STREAM_PRIORITY = None, None
51
layers_atomic_ring_exchange = []
52
53
54
55


def get_cublas_workspace_size_bytes() -> None:
    """Return 32 MiB if using hopper, 4 MiB for all other architectures."""
yuguo's avatar
yuguo committed
56
57
58
59
60
61
62
    # Add env for control the padding for blaslt
    if IS_HIP_EXTENSION:
        nvte_blaslt_nopad = int(os.environ.get("NVTE_BLASLT_NOPAD", 0)) 
        if(nvte_blaslt_nopad):
            return 536_870_912
        else:
            return 1_073_741_824
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    if torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 9:
        return 33_554_432
    return 4_194_304


def get_workspace() -> torch.Tensor:
    """Returns workspace for cublas."""
    global _cublas_workspace
    if _cublas_workspace is None:
        _cublas_workspace = torch.empty(
            get_cublas_workspace_size_bytes(), dtype=torch.uint8, device="cuda"
        )
    return _cublas_workspace


78
79
80
81
def get_multi_stream_cublas_workspace() -> List[torch.Tensor]:
    """Returns workspace for multi-stream cublas."""
    global _multi_stream_cublas_workspace
    if not _multi_stream_cublas_workspace:
82
        for _ in range(tex._num_cublas_streams):
83
84
85
86
87
            _multi_stream_cublas_workspace.append(
                torch.empty(get_cublas_workspace_size_bytes(), dtype=torch.uint8, device="cuda")
            )
    return _multi_stream_cublas_workspace

yuguo's avatar
yuguo committed
88
89
90
91
92
93
94
95
96
97
def get_multi_stream_cublas_batchgemm_workspace() -> List[torch.Tensor]:
    """Returns workspace for multi-stream cublas."""
    global _multi_stream_cublas_batchgemm_workspace
    if not _multi_stream_cublas_batchgemm_workspace:
        for _ in range(tex._num_cublas_batchgemm_streams):
            _multi_stream_cublas_batchgemm_workspace.append(
                torch.empty(get_cublas_workspace_size_bytes(), dtype=torch.uint8, device="cuda")
            )
    return _multi_stream_cublas_batchgemm_workspace

98

99
100
def initialize_ub(
    shape: list,
101
    tp_size: int,
102
    use_fp8: bool = False,
103
    dtype: torch.dtype = torch.bfloat16,
104
    ub_cfgs: Optional[dict] = None,
105
    bootstrap_backend: Union[str, torch.distributed.Backend] = None,
106
) -> None:
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
    r"""
    Initialize the Userbuffers communicator for overlapping tensor-parallel communications with
    GEMM compute in te.Linear, te.LayerNormLinear and te.LayerNormMLP modules.

    Parameters
    ----------
    shape : list
            shape of the communication buffer, typically set to be the same as the global shape of
            the input tensor to a te.TransformerLayer forward pass, with the sequence and batch
            dimensions collapsed together -- i.e.: `(sequence_length * batch_size, hidden_size)`
    tp_size : int
              number of GPUs in the tensor-parallel process group
    use_fp8 : bool = False
              allocate the communication buffer for FP8 GEMM inputs/outputs
    dtype : torch.dtype = torch.bfloat16
            non-FP8 data type of the communication buffer when `use_fp8 = False`
    ub_cfgs: dict = None
             Configuration dictionary with the structure
             ```
             {
                <gemm_name> : {
                    "method": <"ring_exchange" or "pipeline">,
                    "is_reduce_scatter": bool,
                    "num_sm": int,
                    "cga_size": int,
                    "set_sm_margin": bool,
                    "num_splits": int,
                    "aggregate": bool,
                    "atomic_gemm": bool,
                    "use_ce": bool,
                    "fp8_buf": bool,
                }
             }
             ```
             for `te.TransformerLayer` GEMM layers in `["qkv_fprop", "qkv_dgrad", "qkv_wgrad",
             "proj_fprop", "proj_dgrad", "proj_wgrad", "fc1_fprop", "fc1_dgrad", "fc2_dgrad",
             "fc2_fprop", "fc2_dgrad"]`.
    bootstrap_backend : str = None
                        `torch.distributed` communication backend for the all-gather, broadcast and
                        barrier collectives during Userbuffers initialization. Not all backends are
                        valid for every cluster configuration and distributed launch method even if
                        they are available in PyTorch. When left unset, the initialization prefers
                        to use the MPI backend, falling back first on Gloo and then NCCL if MPI is
                        not available. Setting `NVTE_UB_WITH_MPI=1` when building TE overrides this
                        option and always initializes Userbuffers with direct MPI calls in C++,
                        which also requires `MPI_HOME=/path/to/mpi/root` to be set at compile time.
    """
154
    if not tex.device_supports_multicast():
155
        assert bool(int(os.getenv("UB_SKIPMC", "0"))), (
156
157
158
159
            "CUDA device, driver and/or toolkit version does not support comm+GEMM overlap with "
            + "CUDA Multicast. Launch app with UB_SKIPMC=1 to try CUDA IPC instead."
        )

160
161
162
    global _ub_communicators
    assert _ub_communicators is None, "UB communicators are already initialized."
    _ub_communicators = {}
163
164

    if tex.ubuf_built_with_mpi():
165
166
        # We're bootstrapping with direct calls to MPI in Userbuffers code so we need to force
        # an MPI_Init() here by creating a new MPI process group...
167
        assert torch.distributed.is_mpi_available()
168
169
        _ = torch.distributed.new_group(backend="mpi")
        helper = tex.CommOverlapHelper()
170
    else:
171
172
        # Bootstrapping with torch.distributed API, so check backend and construct
        # intra/inter-node process groups...
173
174
175
176
177
        assert (
            torch.distributed.is_initialized()
        ), "torch.distributed must be initialized before Userbuffers"
        if bootstrap_backend is None:
            bootstrap_backend = "nccl"
178
            if torch.distributed.is_mpi_available():
179
                bootstrap_backend = "mpi"
180
181
            elif torch.distributed.is_gloo_available():
                bootstrap_backend = "gloo"
182
        else:
183
184
185
186
187
188
189
190
191
            assert bootstrap_backend in [
                "gloo",
                "mpi",
                "nccl",
            ], "Invalid torch.distributed backend for bootstrapping Userbuffers!"
            assert torch.distributed.is_backend_available(bootstrap_backend), (
                f"PyTorch must be compiled with '{bootstrap_backend}' support in order to "
                f"bootstrap Userbuffers with '{bootstrap_backend}' collectives."
            )
192
193
194
195
196

        world_group = torch.distributed.new_group(backend=bootstrap_backend)
        world_rank = torch.distributed.get_rank(world_group)
        world_size = torch.distributed.get_world_size(world_group)

197
198
        num_domains = world_size // tp_size
        mydomain_idx = world_rank // tp_size
199
        if num_domains > 1:
200
201
202
203
            ranks_per_domain_list = [
                [i * tp_size + t for t in range(tp_size)] for i in range(num_domains)
            ]
            tp_domain_group, _ = torch.distributed.new_subgroups_by_enumeration(
204
205
                ranks_per_domain_list, backend=bootstrap_backend
            )
206
207
            local_rank = torch.distributed.get_rank(tp_domain_group)
            tp_domain_ranks = torch.distributed.get_process_group_ranks(tp_domain_group)
208

209
            helper = tex.CommOverlapHelper(world_group, tp_domain_group)
210
        else:
211
212
            # TP model on single NVLink domain, no replication, no data-parallelism
            mydomain_idx = 0
213
            local_rank = world_rank
214
            tp_domain_ranks = list(range(world_size))
215
216

            helper = tex.CommOverlapHelper(world_group)
217

218
        if world_rank == 0:
219
            print(f"!!! [UB] Number of TP domains: {num_domains}\n", end="", flush=True)
220
221
        if local_rank == 0:
            print(
222
                f"!!! [UB] Global ranks on TP domain {mydomain_idx}: {tp_domain_ranks}\n",
223
224
225
226
                end="",
                flush=True,
            )

227
228
229
230
231
    # Increase the workspace by the number of maximum concurrent streams
    global _cublas_workspace
    _cublas_workspace = get_workspace().repeat(_NUM_MAX_UB_STREAMS)

    # Default buffer precision: AllGather buffers use fp8 when using fp8 recipe
232
    layers_all_gather_overlap = [
233
234
235
236
237
238
        "qkv_fprop",
        "qkv_dgrad",
        "proj_dgrad",
        "fc1_fprop",
        "fc1_dgrad",
        "fc2_dgrad",
239
    ]
240
    layers_reduce_scatter_overlap = ["proj_fprop", "fc2_fprop", "qkv_wgrad", "fc1_wgrad"]
Jaemin Choi's avatar
Jaemin Choi committed
241
    dgrad_reduce_scatter_overlap = ["qkv_dgrad", "fc1_dgrad"]
242
243
    # Default overlap methods for layers
    methods = {
244
245
246
        "ring_exchange": ["qkv_fprop", "fc1_fprop", "proj_dgrad", "fc2_dgrad"],
        "pipeline": ["proj_fprop", "fc2_fprop"],
        "bulk": ["qkv_dgrad", "qkv_wgrad", "fc1_dgrad", "fc1_wgrad"],
247
248
    }

249
    # AG-RS overlap pairs of layers forming a tensor-parallel block
250
251
    ag_rs_pairs = {"qkv_fprop": "proj_fprop", "fc1_fprop": "fc2_fprop"}
    rs_ag_pairs = {v: k for k, v in ag_rs_pairs.items()}
252
253
254
    global layers_atomic_ring_exchange
    layers_atomic_ring_exchange = []

255
256
257
258
259
260
    def get_method(name):
        for method, names in methods.items():
            if name in names:
                return method
        raise KeyError(f"Given layer name {name} does not exist.")

261
    def get_default_config(name):
262
        global _MIN_STREAM_PRIORITY, _MAX_STREAM_PRIORITY
263
264
        method = get_method(name)
        is_reduce_scatter = name in layers_reduce_scatter_overlap
265
266
        if _MIN_STREAM_PRIORITY is None or _MAX_STREAM_PRIORITY is None:
            _MIN_STREAM_PRIORITY, _MAX_STREAM_PRIORITY = tex.get_stream_priority_range()
267
268
269
270
271
        default_cfg = {
            "method": method,
            "is_reduce_scatter": is_reduce_scatter,
            "num_sm": 1 if method == "ring_exchange" else 16,
            "cga_size": 1 if method == "ring_exchange" else 2,
272
273
            "set_sm_margin": not method == "ring_exchange",
            "num_splits": tp_size if method == "ring_exchange" else 4,
274
275
276
277
            "aggregate": False,
            "atomic_gemm": False,
            "use_ce": True,
            "fp8_buf": name in layers_all_gather_overlap,
278
279
280
            "comm_priority": _MAX_STREAM_PRIORITY,
            "gemm_priority": _MIN_STREAM_PRIORITY,
            "pipeline_rs_overlap_first_gemm": False,
281
282
283
        }
        return default_cfg

284
285
286
    def add_ub(
        name: str,
        method: str,
287
        is_reduce_scatter: bool,
288
289
        num_sm: int = 16,
        cga_size: int = 2,
290
        set_sm_margin: bool = False,
291
        num_splits: int = 0,
292
293
        aggregate: bool = False,
        atomic_gemm: bool = False,
294
        use_ce: bool = True,
295
        fp8_buf: bool = False,
296
297
298
        comm_priority: int = 0,
        gemm_priority: int = 0,
        pipeline_rs_overlap_first_gemm: bool = False,
299
    ) -> None:
300
301
302
303
304
        if atomic_gemm:
            warnings.warn(
                "Atomic GEMM uses a beta API from cublas and is not tested for all use cases."
            )
            assert use_fp8, "Atomic GEMM overlap supported only for FP8 GEMM."
305
            if method == "bulk":
306
                warnings.warn(
307
                    f"At {name}, atoimic GEMM not is supported for a bulk overlap."
308
309
310
                    "Defaulting to `atomic_gemm=False`."
                )
                atomic_gemm = 0
311
        if not is_reduce_scatter and method == "pipeline":
312
            raise ValueError(
313
                f"At {name}, `pipeline` overlap method is not supported for AllGather."
314
            )
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
        # Check if both AG and RS overlaps use `atomic GEMM`` + `p2p ring-exchange`.
        # Using atomic GEMM + p2p ring-exchange in only one of the pair breaks functionality.
        global layers_atomic_ring_exchange
        if atomic_gemm and method == "ring_exchange" and name in ag_rs_pairs:
            layers_atomic_ring_exchange += [name, ag_rs_pairs[name]]
        if name in rs_ag_pairs:
            assert_message = (
                f"At {name}, atomic AG-GEMM overlap with `ring_exchange` shuffles GEMM chunk "
                "outputs, and  RS-GEMM overlap un-suffle them. When one of the GEMM-AG and "
                "GEMM-RS overlaps forming a TP block (e.g., qkv_fprop and proj_fprop) uses "
                "`atomic gemm` and `ring_exhcnage`, its pair must use the same overlap config "
                "for functionality."
            )
            if name in layers_atomic_ring_exchange:
                assert atomic_gemm and method == "ring_exchange", assert_message
            else:
                if atomic_gemm and method == "ring_exchange":
                    assert rs_ag_pairs[name] in layers_atomic_ring_exchange, assert_message

334
        buffer_dtype = torch.uint8 if (use_fp8 and fp8_buf) else dtype
335
        if method == "ring_exchange":
336
337
338
339
            ub_obj = tex.CommOverlapP2P(
                shape,  # Communication buffer shape
                buffer_dtype,  # Communication buffer data type
                helper,  # Helper for torch.distributed callbacks during bootstrapping
340
                tp_size,  # Tensor-parallel group size (may be different than local_size)
341
342
343
344
345
346
347
348
                tex.CommOverlapType.RS if is_reduce_scatter else tex.CommOverlapType.AG,
                num_max_streams=_NUM_MAX_UB_STREAMS,
                comm_cga_size=cga_size,
                num_comm_sm=num_sm,
                set_sm_margin=set_sm_margin,
                atomic_gemm=atomic_gemm,
                use_ce=use_ce,
                aggregate=aggregate,
349
350
                gemm_priority=gemm_priority,
                comm_priority=comm_priority,
351
            )
352
        else:
353
354
355
356
            ub_obj = tex.CommOverlap(
                shape,  # Communication buffer shape
                buffer_dtype,  # Communication buffer data type
                helper,  # Helper for torch.distributed callbacks during bootstrapping
357
                tp_size,  # Tensor-parallel group size (may be different than local_size)
358
359
360
361
362
363
                num_splits=num_splits,
                num_max_streams=_NUM_MAX_UB_STREAMS,
                comm_cga_size=cga_size,
                num_comm_sm=num_sm,
                set_sm_margin=set_sm_margin,
                atomic_gemm=atomic_gemm,
364
365
366
                gemm_priority=gemm_priority,
                comm_priority=comm_priority,
                rs_overlap_first_gemm=pipeline_rs_overlap_first_gemm,
367
            )
368
369
        _ub_communicators[name] = ub_obj

Jaemin Choi's avatar
Jaemin Choi committed
370
371
    if ub_cfgs is not None:
        for name in dgrad_reduce_scatter_overlap:
372
373
            if name in ub_cfgs and "method" in ub_cfgs[name] and ub_cfgs[name]["method"] != "bulk":
                wgrad_name = name.replace("dgrad", "wgrad")
Jaemin Choi's avatar
Jaemin Choi committed
374
375
                assert wgrad_name not in ub_cfgs
                layers_reduce_scatter_overlap.remove(wgrad_name)
376
                layers_all_gather_overlap.remove(name)
Jaemin Choi's avatar
Jaemin Choi committed
377
                layers_reduce_scatter_overlap.append(name)
378
379
380
                methods["bulk"].remove(name)
                new_method = ub_cfgs[name]["method"]
                methods[new_method].append(name)
Jaemin Choi's avatar
Jaemin Choi committed
381

382
    for name in methods["ring_exchange"] + methods["pipeline"] + methods["bulk"]:
383
        ub_cfg = get_default_config(name)
384
        if ub_cfgs is not None and name in ub_cfgs:
385
            fp8_buf = (name in layers_all_gather_overlap) or (
386
                ub_cfgs[name].get("fp8_buf", False) and name in methods["pipeline"]
387
            )
388
389
390
            ub_cfg.update(ub_cfgs[name])
            ub_cfg["fp8_buf"] = fp8_buf
        add_ub(name, **ub_cfg)
391
392
393
394
395
396
397
398


def get_ub(name: str):
    """Get userbuffer communicator corresponding to give key."""
    assert _ub_communicators is not None, "UB manager is not initialized."
    assert name in _ub_communicators, f"UB for {name} is not registered."
    return _ub_communicators[name]

399

400
401
402
403
404
405
406
def destroy_ub():
    """Destroy all allocated userbuffer communicators."""
    global _ub_communicators
    _ub_communicators = None
    global layers_atomic_ring_exchange
    layers_atomic_ring_exchange = []

407
408
409
410
411
412
413
414
415
416
417

class TransformerEngineBaseModule(torch.nn.Module, ABC):
    """Base TE module."""

    def __init__(self) -> None:
        super().__init__()
        assert torch.cuda.is_available(), "TransformerEngine needs CUDA."
        self.fp8_initialized = False
        self.fp8 = False
        self.fp8_calibration = False
        self.fp8_meta = {}
418
        self.fp8_meta["fp8_checkpoint"] = False
419
420
        self.fp8_meta["fp8_group"] = None
        self.fp8_meta_tensors_initialized = False
421
        self.quantizers = {"scaling_fwd": {}, "scaling_bwd": {}}
422
423
424
        self.tp_group = None
        self.tp_size = 1
        self.sequence_parallel = False
425
426
        self.param_init_meta = {}
        self.primary_weights_in_fp8 = FP8GlobalStateManager.with_fp8_parameters()
427
428
        self.fsdp_wrapped = False
        self.fsdp_group = None
429
        self._fp8_workspaces: Dict[str, QuantizedTensor] = {}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
        self.activation_dtype: Optional[torch.dtype] = None

    # Names of attributes that can be set quickly (see __setattr__
    # method)
    _fast_setattr_names: Set[str] = {
        "activation_dtype",
        "fp8",
        "fp8_initialized",
        "fp8_calibration",
        "fp8_parameters",
    }

    def __setattr__(self, name: str, value: Any) -> None:
        if name in TransformerEngineBaseModule._fast_setattr_names:
            # torch.nn.Module has a custom __setattr__ that handles
            # modules, parameters, and buffers. This is unnecessary
            # overhead when setting plain attrs.
            self.__dict__[name] = value
        else:
            # Default case
            super().__setattr__(name, value)
451

452
    def adjust_amax_history_length(self, length: int, fwd: Optional[bool] = None) -> None:
453
454
455
456
        """
        Delayed scaling only.

        Increase or decrease size of amax history based on given `length`.
457
458
459
460
461
462
463
464
465
466

        .. warning::
            This changes the underlying amax memory location.
        """
        if fwd is None:
            fp8_meta_tensor_keys = ("scaling_fwd", "scaling_bwd")
        else:
            fp8_meta_tensor_keys = ("scaling_fwd" if fwd else "scaling_bwd",)

        for meta_key in fp8_meta_tensor_keys:
467
468
469
            if meta_key not in self.fp8_meta:
                # Handles non-parameter FP8 modules, e.g. DPA.
                continue
470
471
472
473
474
            curr_len = self.fp8_meta[meta_key].amax_history.shape[0]
            if length == curr_len:
                continue
            if length < curr_len:
                self.fp8_meta[meta_key].amax_history = (
475
476
                    self.fp8_meta[meta_key].amax_history[:length].clone()
                )
477
478
479
480
481
482
            elif length > curr_len:
                extra_rows = length - curr_len
                self.fp8_meta[meta_key].amax_history = F.pad(
                    self.fp8_meta[meta_key].amax_history, pad=(0, 0, 0, extra_rows)
                )

483
484
485
            # Update quantizers with new amax pointers.
            self.quantizers[meta_key] = self.fp8_meta[meta_key].make_quantizers()

486
487
            # Update the global buffers with new amax and history pointers.
            if FP8GlobalStateManager.get_buffer_info() in self.fp8_meta:
488
489
490
                fwd_pos, fwd_key, bwd_pos, bwd_key = self.fp8_meta[
                    FP8GlobalStateManager.get_buffer_info()
                ]
491
492
493
494
495
                for pos, buffer_key in zip((fwd_pos, bwd_pos), (fwd_key, bwd_key)):
                    if buffer_key in FP8GlobalStateManager.global_amax_buffer:
                        assert (
                            buffer_key in FP8GlobalStateManager.global_amax_history_buffer
                        ), "TE internal error during amax history change."
496
497
498
                        FP8GlobalStateManager.global_amax_buffer[buffer_key][pos] = self.fp8_meta[
                            meta_key
                        ].amax_history[0]
499
                        FP8GlobalStateManager.global_amax_history_buffer[buffer_key][pos] = (
500
501
                            self.fp8_meta[meta_key].amax_history
                        )
502

503
    def set_meta_tensor(self, fwd: bool, recipe: Recipe) -> None:
504
505
506
        """Init scales and amaxes for fwd | bwd."""
        fp8_meta_tensor_key = "scaling_fwd" if fwd else "scaling_bwd"

507
        # Return early if recipe state matches recipe
508
        if self.fp8_meta_tensors_initialized:
509
510
511
512
513
514
            recipe_state = self.fp8_meta[fp8_meta_tensor_key]
            if recipe.delayed() and isinstance(recipe_state, DelayedScalingRecipeState):
                self.adjust_amax_history_length(recipe.amax_history_len, fwd=fwd)
                return
            if recipe.mxfp8() and isinstance(recipe_state, MXFP8BlockScalingRecipeState):
                return
515
516
517
518
            if recipe.float8_current_scaling() and isinstance(
                recipe_state, Float8CurrentScalingRecipeState
            ):
                return
519
520
521

        # Max. number of fp8 tensors per GEMM = 3 (input, weight, output) for fwd and
        # 2 (grad_output and grad_input) for bwd
522
        num_fp8_tensors = self.fp8_meta["num_gemms"] * 3 if fwd else self.fp8_meta["num_gemms"] * 2
523

524
525
526
527
528
        # Initialize recipe state and quantizers
        recipe_state = RecipeState.create(
            recipe,
            mode=("forward" if fwd else "backward"),
            num_quantizers=num_fp8_tensors,
529
530
        )

531
532
533
534
        self.fp8_meta[fp8_meta_tensor_key] = recipe_state
        self.quantizers[fp8_meta_tensor_key] = recipe_state.make_quantizers()

    def init_fp8_meta_tensors(self, recipe: Recipe) -> None:
535
        """Init scales and amaxes."""
536
537
538
        self.set_meta_tensor(True, recipe)
        self.set_meta_tensor(False, recipe)

539
540
        self.fp8_meta_tensors_initialized = True

541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
    def get_fp8_meta_tensors(self) -> None:
        """Get scales and amaxes."""
        fwd_key, bwd_key = "scaling_fwd", "scaling_bwd"
        if fwd_key not in self.fp8_meta or bwd_key not in self.fp8_meta:
            return None

        fp8_meta_tensors = {fwd_key: [], bwd_key: []}
        with torch.no_grad():
            for key in (fwd_key, bwd_key):
                fp8_meta_tensors[key].append(self.fp8_meta[key].scale.clone())
                fp8_meta_tensors[key].append(self.fp8_meta[key].amax_history.clone())
        return fp8_meta_tensors

    def reset_fp8_meta_tensors(self, fp8_meta_tensors=None) -> None:
        """Reset scales and amaxes."""
556

557
558
559
560
561
        def reset(key):
            if key in self.fp8_meta:
                if fp8_meta_tensors is None:
                    self.fp8_meta[key].scale.copy_(torch.ones_like(self.fp8_meta[key].scale))
                    self.fp8_meta[key].amax_history.copy_(
562
563
                        torch.zeros_like(self.fp8_meta[key].amax_history)
                    )
564
565
566
                else:
                    assert key in fp8_meta_tensors, "Cannot reset fp8 tensors."
                    self.fp8_meta[key].scale.copy_(fp8_meta_tensors[key][0])
567
                    self.fp8_meta[key].amax_history.copy_(fp8_meta_tensors[key][1])
568

569
570
571
572
        with torch.no_grad():
            reset("scaling_fwd")
            reset("scaling_bwd")

573
574
    def get_extra_state(self) -> torch.Tensor:
        """Save before checkpointing."""
575

576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
        # This implementation is working around a few issues:
        #
        # (1) PyTorch's "extra state" infrastructure might be able to
        #     support any picklable type, but they make no guarantees.
        #     We have experienced problems (e.g. in ONNX export) with
        #     non-tensor extra state.
        # (2) PyTorch's checkpointing infrastructure does not remap
        #     devices for "extra state" like it does for "state dict".
        #     Thus, we want to avoid putting extra state on the GPU
        #     since it may be loaded on the wrong device.
        # (3) The extra state consists of many small tensors. If we
        #     want to copy them all to CPU, then we need to avoid the
        #     overhead of many GPU-CPU memory transfers.
        #
        # See: https://github.com/NVIDIA/TransformerEngine/pull/351
        # See: https://github.com/NVIDIA/TransformerEngine/pull/363

        def to_cpu(src: torch.Tensor) -> torch.Tensor:
            """Helper function to make CPU copy of tensor

            Memory transfer is asynchronous w.r.t. host, so GPU should
            be synchronized before using result.

            """
            dst = torch.empty_like(src, device="cpu")
            dst.copy_(src, non_blocking=True)
            return dst

        # Store FP8 state if needed
        state = None
606
        fp8_checkpoint = self.fp8_meta["fp8_checkpoint"] or self.fp8 or self.fp8_calibration
607
        if fp8_checkpoint:
608
609

            # Copy tensors to CPU and store
610
            state = {}
611
612
613
614
615
616
            state["recipe"] = self.fp8_meta["recipe"]
            if state["recipe"].delayed():
                state["scale_fwd"] = to_cpu(self.fp8_meta["scaling_fwd"].scale)
                state["amax_history_fwd"] = to_cpu(self.fp8_meta["scaling_fwd"].amax_history)
                state["scale_bwd"] = to_cpu(self.fp8_meta["scaling_bwd"].scale)
                state["amax_history_bwd"] = to_cpu(self.fp8_meta["scaling_bwd"].amax_history)
617
618

            # Store other pickelable values
619
620
            extra = {}
            for k, v in self.fp8_meta.items():
621
622
623
                if k != "buffer_index_and_autocast_key" and isinstance(
                    v, (bool, int, float, str, tuple, list)
                ):
624
625
626
                    extra[k] = v
            state["extra_fp8_variables"] = extra

627
628
629
630
        # Serialize state into byte tensor
        torch.cuda.synchronize()
        state_serialized = bytearray(pickle.dumps(state))
        state_serialized = torch.frombuffer(state_serialized, dtype=torch.uint8)
631
        return state_serialized
632
633
634
635
636
637

    def set_extra_state(self, state: torch.Tensor) -> None:
        """Load previous state."""
        if state is None:
            return

638
        # Load state
639
        if isinstance(state, torch.Tensor):
640
            # Default format: byte tensor with pickled data
641
            state = pickle.loads(state.detach().cpu().numpy().tobytes())
642
        elif isinstance(state, io.BytesIO):
643
            # Deprecated format with io.BytesIO
644
            state.seek(0)
645
            state = torch.load(state, map_location="cuda")
646
647
        else:
            raise RuntimeError("Unsupported checkpoint format.")
648
649
650

        if state is None:
            return
651

652
        # Load extra items
653
        self.fp8_meta.update(state["extra_fp8_variables"])
654
        self.fp8_meta["recipe"] = state["recipe"]
655
656
657
        if "global_fp8_buffer_pos_fwd_recompute" in self.fp8_meta:
            del self.fp8_meta["global_fp8_buffer_pos_fwd_recompute"]

658
        # Initialize before loading
659
        self.init_fp8_meta_tensors(self.fp8_meta["recipe"])
660
661
662
663
664
665
666
667
668
669
670

        def copy_tensor(src: torch.Tensor, dst: torch.Tensor) -> None:
            """Helper function to copy tensor from CPU

            Memory transfer is asynchronous w.r.t. host, so GPU should
            be synchronized before using result.

            """
            dst.copy_(src, non_blocking=True)

        # Load tensors
671
672
673
674
675
        if self.fp8_meta["recipe"].delayed():
            copy_tensor(state["scale_fwd"], self.fp8_meta["scaling_fwd"].scale)
            copy_tensor(state["amax_history_fwd"], self.fp8_meta["scaling_fwd"].amax_history)
            copy_tensor(state["scale_bwd"], self.fp8_meta["scaling_bwd"].scale)
            copy_tensor(state["amax_history_bwd"], self.fp8_meta["scaling_bwd"].amax_history)
676
        torch.cuda.synchronize()
677
678
679
680
681
682
683
684
685

    def set_activation_dtype(self, inp: torch.Tensor) -> None:
        """Get activation data type for AMP."""
        # Native AMP (`torch.autocast`) gets highest priority
        if torch.is_autocast_enabled():
            self.activation_dtype = torch.get_autocast_gpu_dtype()
            return

        # All checks after this have already been performed once, thus skip
686
        if self.activation_dtype == inp.dtype:
687
688
            return

689
690
691
692
693
694
695
696
        dtype = inp.dtype
        for name, param in self.named_parameters():
            if param is not None:
                assert dtype == param.dtype, (
                    "Data types for parameters must match when outside of autocasted region. "
                    f" Found input dtype: {dtype} and {name!r} dtype: {param.dtype}"
                )
        self.activation_dtype = dtype
697
698

    def set_tensor_parallel_group(self, tp_group: Union[dist_group_type, None]) -> None:
699
700
701
702
703
704
705
706
707
        """
        Set the tensor parallel group for the given
        module before executing the forward pass.

        Parameters
        ----------
        tp_group : ProcessGroup, default = `None`
                  tensor parallel process group.
        """
708
709
710
        self.tp_group = tp_group
        self.tp_group_initialized = True

711
712
713
    def _get_fp8_params(self) -> Union[List[torch.Tensor], None]:
        """returns the FP8 weights."""
        fp8_params = []
714
        for param in self.parameters(recurse=False):
715
            if isinstance(param, QuantizedTensor) and param.requires_grad:
716
717
718
719
720
                fp8_params.append(param)
        if len(fp8_params) == 0:
            return None
        return fp8_params

721
722
    # This routine is shared across FP8 and FP8_calibration paths so should not actually
    # assume FP8 execution.
723
    def init_fp8_metadata(self, num_gemms: int = 1) -> None:
724
        """Initialize fp8 related metadata and tensors during fprop."""
725
        self.fp8_parameters = FP8GlobalStateManager.with_fp8_parameters()
726
727
        self.fp8 = FP8GlobalStateManager.is_fp8_enabled()
        self.fp8_calibration = FP8GlobalStateManager.is_fp8_calibration()
728
        fp8_enabled = self.fp8 or self.fp8_calibration
729
        self.fp8_meta["fp8_checkpoint"] = self.fp8 or self.fp8_calibration
730

731
        if self.fp8_parameters or fp8_enabled:
732
733
734
735
            if (
                self.fp8_initialized
                and FP8GlobalStateManager.get_fp8_recipe() == self.fp8_meta["recipe"]
            ):
736
                # FP8 init has already been run and recipe is the same, don't do anything.
737
                return
738
            self.fp8_meta["recipe"] = FP8GlobalStateManager.get_fp8_recipe()
739
740
741
742
743
744
745
746
747
748
749
        else:
            # If fp8 isn't enabled, turn off and return.
            self.fp8_initialized = False
            return

        if self.fp8_parameters and not self.fp8_initialized:
            self.fp8_meta["num_gemms"] = num_gemms
            self.init_fp8_meta_tensors(self.fp8_meta["recipe"])

        if fp8_enabled:
            # Set FP8 and other FP8 metadata
750
            self.fp8_meta["num_gemms"] = num_gemms
751
            self.fp8_meta["fp8_group"] = FP8GlobalStateManager.get_fp8_group()
752
753
754
755
756
757

            # Set FP8_MAX per tensor according to recipe
            self.fp8_meta["fp8_max_fwd"] = self.fp8_meta["recipe"].fp8_format.value.max_fwd
            self.fp8_meta["fp8_max_bwd"] = self.fp8_meta["recipe"].fp8_format.value.max_bwd

            # Allocate scales and amaxes
758
            self.init_fp8_meta_tensors(self.fp8_meta["recipe"])
759
            self.fp8_initialized = True
760
761

            self.fp8_meta["recipe"] = FP8GlobalStateManager.get_fp8_recipe()
762
763
764
765
766
767

    @contextmanager
    def prepare_forward(
        self,
        inp: torch.Tensor,
        num_gemms: int = 1,
768
        allow_non_contiguous: bool = False,
Jan Bielak's avatar
Jan Bielak committed
769
    ) -> Generator[torch.Tensor, None, None]:
770
771
772
773
774
775
776
777
        """Checks and prep for FWD.
        The context manager is needed because there isn't a way for a module to know
        if it's the last FP8 module in the forward autocast. It is useful
        to setup the forward aggregated amax reduction for every module
        just in case. The autocast exit will pick up the most recent one.
        """
        # Activation recomputation is used and this is the second forward phase.
        if self.fp8 and in_fp8_activation_recompute_phase():
778
            FP8GlobalStateManager.get_old_fp8_meta_tensors_for_recompute(self.fp8_meta)
779
780
781
782
783
784
785
        else:
            assert inp.is_cuda, "TransformerEngine needs CUDA."

            if self.tp_size > 1:
                assert self.tp_group_initialized, "TP group not initialized."

            self.set_activation_dtype(inp)
786
            self.init_fp8_metadata(num_gemms=num_gemms)
787

788
            if self.fp8 and self.sequence_parallel and self.fp8_meta["recipe"].delayed():
789
790
791
792
                assert self.fp8_meta["recipe"].reduce_amax, (
                    "Amax reduction across tensor parallel group is "
                    "necessary when using sequence parallelism with FP8."
                )
793

794
            if self.fp8 and not FP8GlobalStateManager.fp8_graph_capturing():
795
                FP8GlobalStateManager.add_fp8_tensors_to_global_buffer(self.fp8_meta)
796
797

            # Activation recomputation is used and this is the first forward phase.
798
            if self.fp8 and self.training and is_fp8_activation_recompute_enabled():
799
                FP8GlobalStateManager.copy_forward_fp8_meta_tensors_for_recompute(self.fp8_meta)
800
801

        with torch.cuda.nvtx.range(self.__class__.__name__ + " forward"):
802
803
804
            if not allow_non_contiguous and not inp.is_contiguous():
                inp = inp.contiguous()
            yield inp
805
806

        if self.fp8 and in_fp8_activation_recompute_phase():
807
            FP8GlobalStateManager.restore_fp8_meta_tensors(self.fp8_meta)
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827

    def set_nccl_overlap_warning_if_tp(self) -> None:
        """When using TP, the NCCL communication needs to be scheduled
        before the GEMM for there to be a guaranteed overlap. From the
        host side in TE, the comm calls are always launched first, but
        to ensure that the GEMM isn't scheduled first, the environment
        variable `CUDA_DEVICE_MAX_CONNECTIONS` needs to be set to 1 to
        force a single channel.
        """
        if self.tp_size == 1:
            return
        num_cuda_work_queues = int(os.getenv("CUDA_DEVICE_MAX_CONNECTIONS", "0"))
        if num_cuda_work_queues != 1:
            warnings.warn(
                "To guarantee overlapping TP and SP collectives with the backward"
                "GEMMs, set environment variable CUDA_DEVICE_MAX_CONNECTIONS = 1"
            )

    @staticmethod
    def grad_output_preprocess(
828
829
830
831
        ctx,
        grad_output: torch.Tensor,
        row_parallel_mode: bool,
        quantizer: Optional[Quantizer],
832
833
834
    ) -> Tuple[Union[torch.Tensor, None], ...]:
        """Utility function for backward.
        Returns tuple in order (all optional/None based on training precion/recipe):
835
836
            R1: gathered `grad_output`.
            R2: bias gradient on R1.
837
838

        """
839
840
        grad_output = grad_output.reshape((-1, grad_output.shape[-1]))
        grad_output = grad_output.contiguous()
841
842
        gather_grad_output = row_parallel_mode and ctx.sequence_parallel

843
        # Non-FP8 case: bgrad is fused with wgrad for this case.
844
845
        if not ctx.fp8:
            if gather_grad_output:
846
                if not ctx.ub_overlap_ag:
847
                    grad_output, _ = gather_along_first_dim(grad_output, ctx.tp_group)
848
                else:
849
850
851
852
853
854
855
                    ctx.ub_obj_gradout.copy_into_buffer(grad_output, quantizer, local_chunk=True)
                    grad_output = ctx.ub_obj_gradout.get_buffer(quantizer)
            return grad_output, None

        # FP8 with all-gather: unfused bgrad, fused cast + transpose
        if gather_grad_output:
            grad_bias = None
856
            if ctx.use_bias:
857
                grad_bias = grad_output.view(-1, grad_output.shape[-1]).sum(dim=0)
858
            if ctx.ub_overlap_ag:
859
860
861
862
863
864
865
866
867
                # Quantize the gradient if needed
                if not isinstance(
                    grad_output, (QuantizedTensor, Float8TensorBase, MXFP8TensorBase)
                ):
                    grad_output = quantizer(grad_output)

                # Copy into communication buffer, and replace original gradient with it
                ctx.ub_obj_gradout.copy_into_buffer(grad_output, quantizer, local_chunk=True)
                grad_output = ctx.ub_obj_gradout.get_buffer(quantizer)
868
            else:
869
870
871
872
                grad_output, _ = gather_along_first_dim(
                    grad_output,
                    ctx.tp_group,
                    quantizer=quantizer,
873
                )
874
            return grad_output, grad_bias
875

876
877
        # FP8 without all-gather: fused bgrad + cast + transpose
        grad_bias = None
878
        if ctx.use_bias:
879
880
            if isinstance(grad_output, (QuantizedTensor, Float8TensorBase, MXFP8TensorBase)):
                grad_bias = grad_output.dequantize().view(-1, grad_output.shape[-1]).sum(dim=0)
881
            else:
882
883
884
885
                grad_bias, grad_output = tex.bgrad_quantize(grad_output, quantizer)
        if not isinstance(grad_output, (QuantizedTensor, Float8TensorBase, MXFP8TensorBase)):
            grad_output = quantizer(grad_output)
        return grad_output, grad_bias
886

887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
    def register_parameter(self, name, param, **kwargs):
        """
        Thin wrapper around PyTorch parameter registration to stash additional parameter
        metedata used in deferred initialization.
        """
        super().register_parameter(name, param)
        self.param_init_meta[name] = _ParameterInitMeta(**kwargs)

    def reset_parameters(self, defer_init: Optional[bool] = False) -> None:
        """
        Reset all module parameters to initial values. Unless deferred initialization
        is specified, all parameters on a 'meta' device are also materialized on a real cuda
        device before the values are reset to initial.
        """
        if defer_init:
            return

        for name, param in self.named_parameters(recurse=False):
            # Ensure parameter is on a real device
906
907
            if param.device == torch.device("meta"):
                param = torch.empty_like(param, device="cuda")
908
909
910
911
912
913
914

            # Initialize the parameter values on device
            init_fn = self.param_init_meta[name].init_fn
            get_rng_state_tracker = self.param_init_meta[name].get_rng_state_tracker
            if get_rng_state_tracker is None:
                init_fn(param)
            else:
915
916
917
918
919
920
                if hasattr(self, "rng_tracker_name") and self.rng_tracker_name:
                    with get_rng_state_tracker().fork(self.rng_tracker_name):
                        init_fn(param)
                else:
                    with get_rng_state_tracker().fork():
                        init_fn(param)
921

922
            # If primary weights are in fp8, wrap the parameter as FP8Tensor
923
924
            fp8_meta_index = self.param_init_meta[name].fp8_meta_index
            if self.primary_weights_in_fp8 and fp8_meta_index is not None:
925
926
927
928
929
930
                quantizer = self.quantizers["scaling_fwd"][fp8_meta_index]
                assert (
                    quantizer is not None
                )  # to use primary fp8 weight one needs to use FP8 autocast with specific recipe.
                quantizer.internal = False
                param = quantizer(param)
931
932
933
934
935
936
937

            # Redo parameter wrap in case we broke it above
            # NOTE: Currently this can only be broken when primary weights are in Fp8 but
            #       re-applying the nn.Parameter() wrap is a no-op when the input is already
            #       a parameter so we always re-apply it just for extra safety.
            setattr(self, name, torch.nn.Parameter(param))

938
939
940
    @abstractmethod
    def forward(self):
        """Needs override."""
941

942
    def get_weight_workspace(
943
        self,
944
945
        *,
        tensor: Optional[torch.Tensor] = None,
946
        quantizer: Optional[Quantizer] = None,
947
948
949
        cache_name: Optional[str] = None,
        update_workspace: bool = True,
        skip_update_flag: Optional[torch.Tensor] = None,
950
951
        fsdp_group: Optional[dist_group_type] = None,
    ) -> QuantizedTensor:
952
953
954
955
956
957
958
959
960
        """Get FP8 workspace buffer and maybe update its values

        The workspace buffer may be cached for future function calls.

        Parameters
        ----------
        tensor : torch.Tensor, optional
            Values to copy into workspace. Required if the workspace
            is being constructed or updated.
961
962
963
        quantizer: Quantizer, optional
            Quantizer used to cast the weights. Required if the
            workspace is being constructed or updated.
964
965
966
967
968
969
970
        cache_name: str, optional
            Key for caching.
        update_workspace: bool, default = `True`
            Update workspace with values from `tensor`.
        skip_update_flag: torch.Tensor, optional
            GPU flag to skip updating the workspace. Take precedence
            over `update_workspace` if provided.
971
972
        fsdp_group: bool, default = None
            FSDP process group that the weights are distributed over.
973
974
        """

975
        # Try getting workspace from cache
976
977
978
        out = None
        if cache_name is not None:
            out = self._fp8_workspaces.get(cache_name, None)
979

980
981
982
983
984
        # Gather cached Fp8 workspace if it's distributed
        # NOTE: FSDP sharding is supported only for Fp8 buffers and will not work
        #       for models initialized with Fp8 primary weights.
        if (
            out is not None
985
            and tensor is not None
986
            and fsdp_group is not None
987
            and out.data.shape != tensor.data.shape
988
989
990
991
        ):
            _fsdp_gather_tensors(fsdp_group, [tensor.data.shape], out)

        # Construct workspace if needed
992
        if out is None:
993
            if tensor is None or quantizer is None:
994
                raise ValueError(
995
                    "tensor and quantizer kwargs must be provided to construct FP8 workspace"
996
                )
997
            out = quantizer(tensor)
998
999

            # Update cache
1000
1001
            if cache_name is not None:
                self._fp8_workspaces[cache_name] = out
1002
            return out
1003
1004
1005
1006
1007
1008

        # Update workspace if needed
        if skip_update_flag is not None:
            update_workspace = True
        if update_workspace:
            if tensor is None:
1009
                raise ValueError("tensor kwarg must be provided to update FP8 workspace")
1010
            if hasattr(out, "quantize_"):
1011
                out.quantize_(tensor, noop_flag=skip_update_flag)
1012
1013
            else:
                tex.quantize(tensor, quantizer, out, skip_update_flag)
1014
1015

        return out
1016

1017
1018
1019
    def _load_from_state_dict(
        self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
    ):
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
        """
        This function loads tensors and extra state including fp8 metadata.
        This metadata is essential for copying fp8 tensors, as the copy_ function
        uses the scale_inv parameter from fp8_meta to set the correct scaling factor
        for the new tensor.
        Hence, this extra state must be loaded before the tensor copying process,
        not after, as is typically done in _load_from_state_dict.
        Tensors are copied into fp8 tensors only when self.primary_weights_in_fp8=True,
        otherwise, this behavior is not required.
        """
        if self.primary_weights_in_fp8:
            extra_state_key = prefix + torch.nn.modules.module._EXTRA_STATE_KEY_SUFFIX
            if extra_state_key in state_dict:
                self.set_extra_state(state_dict[extra_state_key])
1034
1035
1036
        super()._load_from_state_dict(
            state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
        )