fp8.py 25.8 KB
Newer Older
1
# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
2
3
4
#
# See LICENSE for license information.

5
"""FP8 utilities for TransformerEngine"""
Sangkug Lym's avatar
Sangkug Lym committed
6
import os
Przemek Tredak's avatar
Przemek Tredak committed
7
from contextlib import contextmanager
8
from collections import deque
9
from typing import Callable, List, Optional, Dict, Any, Tuple, Union
Przemek Tredak's avatar
Przemek Tredak committed
10
11
12
13
14
15

import torch
import transformer_engine_extensions as tex
from transformer_engine.common.recipe import DelayedScaling, Format

from .constants import dist_group_type
16
from .utils import get_device_compute_capability
17
from .jit import jit_fuser
Przemek Tredak's avatar
Przemek Tredak committed
18

19

20
__all__ = ["fp8_autocast", "fp8_model_init"]
21
22
23


def check_fp8_support() -> Tuple[bool, str]:
24
    """Return if fp8 support is available"""
Tim Moon's avatar
Tim Moon committed
25
    if get_device_compute_capability() >= (9, 0): # hopper and above
26
        return True, ""
Tim Moon's avatar
Tim Moon committed
27
    if get_device_compute_capability() < (8, 9): # pre-ada
28
29
30
31
32
33
34
35
        return False, "Device compute capability 8.9 or higher required for FP8 execution."
    if tex.get_cublasLt_version() < 120103:
        return False, "CublasLt version 12.1.3.x or higher required for FP8 execution on Ada."
    if float(torch.version.cuda) < 12.1:
        return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
    return True, ""


36
37
38
def get_default_fp8_recipe() -> DelayedScaling:
    """FP8 recipe if not provided by user
    Margin = 0, interval = 1, E4M3
39
    """
40
    return DelayedScaling()
41
42


43
44
45
46
47
48
49
50
51
def get_fp8_te_dtype(
    fp8_recipe: DelayedScaling, fprop_tensor: bool = True
) -> tex.DType:
    """Get fp8 data type according to recipe and tensor"""
    if fp8_recipe.fp8_format == Format.E4M3 or (
        fp8_recipe.fp8_format == Format.HYBRID and fprop_tensor
    ):
        return tex.DType.kFloat8E4M3
    return tex.DType.kFloat8E5M2
52
53


54
55
56
57
58
59
60
61
62
63
64
def get_fp8_max(
    fp8_recipe: DelayedScaling, fprop_tensor: bool = True
) -> tex.DType:
    """Get max representible FP8 value."""
    if fp8_recipe.fp8_format == Format.E4M3 or (
        fp8_recipe.fp8_format == Format.HYBRID and fprop_tensor
    ):
        return Format.E4M3.value.max_fwd
    return Format.E5M2.value.max_fwd


65
66
67
class FP8GlobalStateManager:
    """Class to keep track of and manipulate the global
    FP8 state at different stages of execution.
68
    """
69
70
71
72
    FP8_ENABLED = False
    FP8_CALIBRATION = False
    FP8_RECIPE = None
    FP8_DISTRIBUTED_GROUP = None
73
    FP8_PARAMETERS = False
74
    IS_FIRST_FP8_MODULE = False
75
    FP8_GRAPH_CAPTURING = False
76
    FP8_AUTOCAST_DEPTH = 0
77
78
79
80
    global_amax_buffer = {}
    global_amax_history_buffer = {}
    global_scale_buffer = {}
    global_scale_inv_buffer = {}
81
82
83
    fp8_tensors_recompute_buffer = []
    fp8_available = None
    reason_for_no_fp8 = ""
84
85
86
87
    autocast_arguments = {}
    autocast_to_fp8_params = {}
    fp8_param_to_autocast = {}
    skip_fp8_weight_update_tensor = None
88

89
90
91
92
93
94
95
    @classmethod
    def reset(cls) -> None:
        """Reset the global state"""
        cls.FP8_ENABLED = False
        cls.FP8_CALIBRATION = False
        cls.FP8_RECIPE = None
        cls.FP8_DISTRIBUTED_GROUP = None
96
        cls.FP8_PARAMETERS = False
97
        cls.IS_FIRST_FP8_MODULE = False
98
        cls.FP8_GRAPH_CAPTURING = False
99
        cls.FP8_AUTOCAST_DEPTH = 0
100
101
102
103
        cls.global_amax_buffer = {}
        cls.global_amax_history_buffer = {}
        cls.global_scale_buffer = {}
        cls.global_scale_inv_buffer = {}
104
105
106
        cls.fp8_tensors_recompute_buffer = []
        cls.fp8_available = None
        cls.reason_for_no_fp8 = ""
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
        cls.autocast_arguments = {}
        cls.autocast_to_fp8_params = {}
        cls.fp8_param_to_autocast = {}
        cls.skip_fp8_weight_update_tensor = None

    @classmethod
    def set_skip_fp8_weight_update_tensor(cls, skip: bool) -> None:
        """`skip_fp8_weight_update_tensor` inplace setter."""
        if cls.skip_fp8_weight_update_tensor is None:
            cls.skip_fp8_weight_update_tensor = torch.empty(1, dtype=torch.float32, device="cuda")
        cls.skip_fp8_weight_update_tensor.fill_(skip)

    @classmethod
    def get_skip_fp8_weight_update_tensor(cls) -> None:
        """`skip_fp8_weight_update_tensor` getter."""
        return cls.skip_fp8_weight_update_tensor
123

124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
    @classmethod
    def is_fp8_available(cls) -> Tuple[bool, str]:
        """Return if fp8 support is available"""
        if cls.fp8_available is None:
            cls.fp8_available, cls.reason_for_no_fp8 = check_fp8_support()
        return cls.fp8_available, cls.reason_for_no_fp8

    @staticmethod
    def get_meta_tensor_key(forward: bool = True) -> str:
        """Returns scaling key in `fp8_meta`."""
        if forward:
            return "scaling_fwd"
        return "scaling_bwd"

    @staticmethod
139
140
141
    def get_fwd_bwd_key(forward: bool = True) -> str:
        """Convert bool `forward` to string."""
        return "forward" if forward else "backward"
142
143

    @classmethod
144
145
146
147
148
149
    def get_buffer_info(cls) -> str:
        """
        Returns a key for `fp8_meta` that stores the module's index
        in the global buffers along with autocast information.
        """
        return "buffer_index_and_autocast_key"
150
151

    @classmethod
152
153
154
155
156
157
158
159
160
161
162
    def get_key_in_buffer(
        cls,
        forward: bool,
        fp8_weights: bool,
        fp8_recipe: DelayedScaling,
        fp8_group: dist_group_type,
    ) -> str:
        """Returns a key into the global FP8 buffers."""
        autocast_key = cls.get_unique_autocast_key(fp8_recipe, fp8_group)
        fwd_bwd_key = cls.get_fwd_bwd_key(forward)
        return f"{fwd_bwd_key}_{fp8_weights}_{autocast_key}"
163
164

    @classmethod
165
166
167
168
169
170
    def split_key_in_buffer(cls, key: str) -> Tuple[bool, bool, str]:
        """Splits buffer key into relevant parts."""
        forward, fp8_weights, autocast_key = key.split("_", 2)
        forward = forward == "forward"
        fp8_weights = fp8_weights == "True"
        return forward, fp8_weights, autocast_key
171
172

    @classmethod
173
174
175
176
    def add_fp8_tensors_to_global_buffer(
        cls,
        fp8_meta: Dict[str, Any],
        fp8_weights: Optional[List[torch.Tensor]] = None,
177
    ) -> None:
178
179
180
181
182
183
184
185
186
187
188
189
190
        """
        The amax reduction process happens completely outside the FP8 modules.
        To participate in the reduction, the only role played by a module is
        to call this function in order to append it's FP8 tensor into a global
        buffer. There are 5 global buffers maintained, one each for amax, amax
        history, scale, scale-inverse, and non-weight-mask. Each buffer has
        keys that hold FP8 tensors. Keys have a `forward_` or `backward_` prefix
        to indicate the type of FP8 tensor, since the forward and backward
        reductions happen separately.

        Note: For CG capture, this method is called from the graphed
        wrapper. For non CG case, it's called from within the module.
        """
191

192
193
194
195
196
        # Every module must call this function exactly once since
        # the amax tensors are static. Ensures that compatibility
        # with non-graphed modules is maintained.
        index_in_buffer = cls.get_buffer_info()  # Same index for fwd/bwd fp8 tensors.
        if index_in_buffer in fp8_meta:
197
198
            return

199
200
201
202
203
204
        fp8_meta[index_in_buffer] = []
        for forward in (True, False):
            # This algorithm creates a two-way map with `autocast_to_fp8_params` and
            # `fp8_param_to_autocast`. This is used for keeping track of FP8 weights
            # in an autocasted region and cross reference them in `float8_tensor.py`
            # to perform the forward amax reduction.
205
206
207
208
209
            fp8_meta_tensor_key = cls.get_meta_tensor_key(forward=forward)
            if fp8_meta_tensor_key not in fp8_meta:
                # Handles non-parameter FP8 modules, e.g. DPA.
                continue

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
            if forward and fp8_weights is not None:
                autocast_key = cls.get_unique_autocast_key(
                                    fp8_meta["recipe"], fp8_meta["fp8_group"])
                fp8_weight_set = {id(w._data) for w in fp8_weights}
                if autocast_key not in cls.autocast_to_fp8_params:
                    cls.autocast_to_fp8_params[autocast_key] = fp8_weight_set
                else:
                    cls.autocast_to_fp8_params[autocast_key] = (
                        cls.autocast_to_fp8_params[autocast_key].union(fp8_weight_set))
                # Identify correct autocast key for a given param.
                for w in fp8_weight_set:
                    cls.fp8_param_to_autocast[w] = autocast_key

            key = cls.get_key_in_buffer(
                forward, fp8_weights is not None, fp8_meta["recipe"], fp8_meta["fp8_group"])

            if key not in cls.global_amax_buffer:
                cls.global_amax_buffer[key] = [fp8_meta[fp8_meta_tensor_key].amax_history[0]]
                cls.global_amax_history_buffer[key] = [fp8_meta[fp8_meta_tensor_key].amax_history]
                cls.global_scale_buffer[key] = [fp8_meta[fp8_meta_tensor_key].scale]
                cls.global_scale_inv_buffer[key] = [fp8_meta[fp8_meta_tensor_key].scale_inv]
            else:
                cls.global_amax_buffer[key].append(fp8_meta[fp8_meta_tensor_key].amax_history[0])
                cls.global_amax_history_buffer[key].append(
                    fp8_meta[fp8_meta_tensor_key].amax_history)
                cls.global_scale_buffer[key].append(fp8_meta[fp8_meta_tensor_key].scale)
                cls.global_scale_inv_buffer[key].append(fp8_meta[fp8_meta_tensor_key].scale_inv)
            fp8_meta[index_in_buffer].append(len(cls.global_amax_buffer[key]) - 1)
            fp8_meta[index_in_buffer].append(key)
239
240
241
242
243
244
245
246
247
248
249

    @classmethod
    def is_fp8_enabled(cls) -> bool:
        """Is FP8 enabled"""
        return cls.FP8_ENABLED

    @classmethod
    def is_fp8_calibration(cls) -> bool:
        """Is FP8 calibration"""
        return cls.FP8_CALIBRATION

250
251
252
253
254
    @classmethod
    def with_fp8_parameters(cls) -> bool:
        """Should the parameters be stored as FP8"""
        return cls.FP8_PARAMETERS

255
256
257
258
259
    @classmethod
    def fp8_graph_capturing(cls) -> bool:
        """Is CUDA graph capture under way?"""
        return cls.FP8_GRAPH_CAPTURING or torch.cuda.is_current_stream_capturing()

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
    @classmethod
    def is_first_fp8_module(cls):
        """Returns `True` only the first time when called multiple
        times from within the same `fp8_autocast` context.
        """
        tmp = cls.IS_FIRST_FP8_MODULE
        cls.IS_FIRST_FP8_MODULE = False
        return tmp

    @classmethod
    def get_fp8_recipe(cls) -> DelayedScaling:
        """Return the fp8 recipe"""
        return cls.FP8_RECIPE

    @classmethod
    def get_fp8_group(cls) -> Union[dist_group_type, None]:
        """Return the fp8 group for scale/amax comm"""
        return cls.FP8_DISTRIBUTED_GROUP

    @classmethod
    def get_fp8_autocast_state(cls) -> Tuple[bool, bool, DelayedScaling, dist_group_type, bool]:
        """FP8 autocast state getter"""
        return (
            cls.FP8_ENABLED,
            cls.FP8_CALIBRATION,
            cls.FP8_RECIPE,
            cls.FP8_DISTRIBUTED_GROUP,
287
288
            cls.IS_FIRST_FP8_MODULE,
            cls.FP8_GRAPH_CAPTURING)
289
290
291
292
293
294
295
296
297
298
299

    @classmethod
    def set_fp8_autocast_state(
        cls,
        fp8_state: Tuple[bool, bool, DelayedScaling, dist_group_type, bool]
    ) -> None:
        """FP8 autocast state setter"""
        (cls.FP8_ENABLED,
         cls.FP8_CALIBRATION,
         cls.FP8_RECIPE,
         cls.FP8_DISTRIBUTED_GROUP,
300
301
         cls.IS_FIRST_FP8_MODULE,
         cls.FP8_GRAPH_CAPTURING) = fp8_state
302
303
304

    @staticmethod
    def reduce_tensor_across_group_op_max(
305
        tensor: torch.Tensor, group: dist_group_type
306
307
308
    ) -> None:
        """Reduce tensor across given group."""
        if torch.distributed.is_initialized():
309
            torch.distributed.all_reduce(
310
311
312
                tensor,
                op=torch.distributed.ReduceOp.MAX,
                group=group,
313
                async_op=False,
314
            )
315

316
    @classmethod
317
    def reduce_and_update_fp8_tensors(
318
319
        cls,
        forward: bool = True,
320
        fp8_weights: bool = False,
321
322
    ) -> None:
        """Concatenate, reduce, and split amaxes in the global buffer."""
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
        for buffer_key, amax_buffer in cls.global_amax_buffer.items():
            # Check for forward or backward reduction.
            fwd_update, fp8_weights_update, autocast_key = cls.split_key_in_buffer(buffer_key)
            if fwd_update != forward:
                continue
            # Only skip a forward update when `fp8_weights` is explicitly set to `True`
            # (inside optimizer) and the current key is not an `fp8_weight_update` key.
            # For other cases, we need to reduce because of activation tensors.
            # TODO(ksivaman) consider separate weight and activation fp8_tensors.
            if fwd_update and fp8_weights and not fp8_weights_update:
                continue
            if len(amax_buffer) == 0:
                continue

            # Retrieve autocast specific args and concat amaxes.
            recipe, group = cls.autocast_arguments[autocast_key]
            contiguous_amax = torch.cat(amax_buffer)

            # Reduction.
            if (recipe.reduce_amax
                and torch.distributed.is_initialized()
                and torch.distributed.get_world_size(group=group) > 1):
                cls.reduce_tensor_across_group_op_max(contiguous_amax, group)

            # Amax and scale update.
            unfused_update = (bool(int(os.getenv("NVTE_UNFUSED_FP8_UPDATE", "0")))
                              or callable(recipe.amax_compute_algo)
                              or callable(recipe.scaling_factor_compute_algo))

            if not unfused_update:
                tex.fused_amax_and_scale_update_after_reduction(
                    contiguous_amax,
                    cls.global_amax_history_buffer[buffer_key],
                    cls.global_scale_buffer[buffer_key],
                    cls.global_scale_inv_buffer[buffer_key],
                    recipe.amax_compute_algo,
                    get_fp8_te_dtype(recipe, forward),
                    recipe.margin,
                )
362
            else:
363
                split_and_copy(contiguous_amax, amax_buffer, [x.numel() for x in amax_buffer])
364

365
366
367
368
369
370
371
                for amax_history, scale, scale_inv in zip(
                    cls.global_amax_history_buffer[buffer_key],
                    cls.global_scale_buffer[buffer_key],
                    cls.global_scale_inv_buffer[buffer_key],
                ):
                    _amax_and_scale_update(
                        amax_history, scale, scale_inv, get_fp8_max(recipe, forward), recipe)
372

373
374
375
376
377
378
379
380
381
382
383
    @classmethod
    def get_unique_autocast_key(
        cls,
        recipe: Optional[DelayedScaling] = None,
        group: Optional[dist_group_type] = None,
    ):
        """
        For FP8, each autocast can be uniquely identified by the recipe and fp8 group.
        Safely using `hash` as we never cross checkpoint boundaries.
        """
        return f"{str(recipe)}:{hash(group)}"
Przemek Tredak's avatar
Przemek Tredak committed
384

385
386
387
388
389
390
391
    @classmethod
    def fp8_autocast_enter(
        cls,
        enabled: bool = False,
        calibrating: bool = False,
        fp8_recipe: Optional[DelayedScaling] = None,
        fp8_group: Optional[dist_group_type] = None,
392
        _graph: bool = False,
393
394
    ) -> None:
        """Set state and tracking variables for entry into FP8 region."""
395
396
397
398
399

        fp8_recipe = get_default_fp8_recipe() if fp8_recipe is None else fp8_recipe
        autocast_key = cls.get_unique_autocast_key(fp8_recipe, fp8_group)
        cls.autocast_arguments[autocast_key] = (fp8_recipe, fp8_group)

400
401
        cls.FP8_ENABLED = enabled
        cls.FP8_CALIBRATION = calibrating
402
        cls.FP8_RECIPE = fp8_recipe
403
        cls.FP8_DISTRIBUTED_GROUP = fp8_group
404
        cls.FP8_GRAPH_CAPTURING = _graph
405
406
407
408

        if cls.FP8_AUTOCAST_DEPTH == 0:
            cls.IS_FIRST_FP8_MODULE = True
        cls.FP8_AUTOCAST_DEPTH += 1
Przemek Tredak's avatar
Przemek Tredak committed
409

410
411
412
        if enabled:
            fp8_available, reason_for_no_fp8 = cls.is_fp8_available()
            assert fp8_available, reason_for_no_fp8
Przemek Tredak's avatar
Przemek Tredak committed
413

414
    @classmethod
415
    def fp8_autocast_exit(cls, enabled: bool, _graph: bool) -> None:
416
417
        """Set state and tracking variables for exit from FP8 region."""
        cls.FP8_AUTOCAST_DEPTH -= 1
418
419
420
421
422
        # Reduce only the non-FP8 weight modules here.
        # FP8 weight modules are reduced at the end of the optimizer
        # step after the weight amax is populated.
        if enabled and cls.FP8_AUTOCAST_DEPTH == 0 and not _graph and torch.is_grad_enabled():
            cls.reduce_and_update_fp8_tensors(forward=True, fp8_weights=False)
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474

    @classmethod
    def copy_forward_fp8_meta_tensors_for_recompute(cls, fp8_meta: Dict[str, Any]) -> None:
        """Copy the scaling factors and amaxes for recompute forward phase
        to ensure both forward steps are numerically same.
        """
        buffer_position_key = "global_fp8_buffer_pos_fwd_recompute"

        to_copy = [
            fp8_meta["scaling_fwd"].amax_history.clone(),
            fp8_meta["scaling_fwd"].scale.clone(),
            fp8_meta["scaling_fwd"].scale_inv.clone(),
        ]

        if buffer_position_key in fp8_meta:
            cls.fp8_tensors_recompute_buffer[fp8_meta[buffer_position_key]].append(to_copy)
        else:
            if len(cls.fp8_tensors_recompute_buffer) == 0:
                cls.fp8_tensors_recompute_buffer = [deque()]
            else:
                cls.fp8_tensors_recompute_buffer.append(deque())
            cls.fp8_tensors_recompute_buffer[-1].append(to_copy)
            fp8_meta[buffer_position_key] = len(cls.fp8_tensors_recompute_buffer) - 1

    @classmethod
    def get_old_fp8_meta_tensors_for_recompute(cls, fp8_meta: Dict[str, Any]) -> None:
        """Switch to the copied scaling factors and amaxes from phase
        1 forward for indentical numerical outputs.
        """

        # Store updated amaxes and scales from phase 1 post forward.
        fp8_meta["updated_amax_history_fwd"] = fp8_meta["scaling_fwd"].amax_history
        fp8_meta["updated_scale_fwd"] = fp8_meta["scaling_fwd"].scale
        fp8_meta["updated_scale_inv_fwd"] = fp8_meta["scaling_fwd"].scale_inv

        # Retrieve stashed amaxes and scales from phase 1 pre forward.
        buffer_position_key = "global_fp8_buffer_pos_fwd_recompute"
        stashed_fp8_meta = cls.fp8_tensors_recompute_buffer[
            fp8_meta[buffer_position_key]
        ].popleft()

        # Replace amaxes and scales with stashed values for phase 2 forward
        fp8_meta["scaling_fwd"].amax_history = stashed_fp8_meta[0]
        fp8_meta["scaling_fwd"].scale = stashed_fp8_meta[1]
        fp8_meta["scaling_fwd"].scale_inv = stashed_fp8_meta[2]

    @staticmethod
    def restore_fp8_meta_tensors(fp8_meta: Dict[str, Any]) -> None:
        """Restore latest scaling factors and amaxes after recompute forward run."""
        fp8_meta["scaling_fwd"].amax_history = fp8_meta["updated_amax_history_fwd"]
        fp8_meta["scaling_fwd"].scale = fp8_meta["updated_scale_fwd"]
        fp8_meta["scaling_fwd"].scale_inv = fp8_meta["updated_scale_inv_fwd"]
Przemek Tredak's avatar
Przemek Tredak committed
475
476


477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
@contextmanager
def fp8_model_init(enabled: bool = True) -> None:
    """
    Context manager for FP8 initialization of parameters.

    Example usage:

    .. code-block:: python

        with fp8_model_init(enabled=True):
            model = transformer_engine.pytorch.Linear(768, 768)

    Parameters
    ----------
    enabled: bool, default = `True`
             when enabled, Transformer Engine modules created inside this `fp8_model_init`
             region will hold only FP8 copies of its parameters, as opposed to the default
             behavior where both higher precision and FP8 copies are present. Setting this
             option to `True` may result in lower memory consumption and is especially
             useful for scenarios like:

             * full model training using optimizer with master weights, where the high
               precision copies of weights are already present in the optimizer.
             * inference, where only the FP8 copies of the parameters are used.
             * LoRA-like fine-tuning, where the main parameters of the model do not change.

             This functionality is *EXPERIMENTAL*.
    """
505
506
    _fp8_parameters = FP8GlobalStateManager.FP8_PARAMETERS
    FP8GlobalStateManager.FP8_PARAMETERS = enabled
507
508
509
    try:
        yield
    finally:
510
        FP8GlobalStateManager.FP8_PARAMETERS = _fp8_parameters
511
512


Przemek Tredak's avatar
Przemek Tredak committed
513
514
@contextmanager
def fp8_autocast(
515
    enabled: bool = True,
schetlur-nv's avatar
schetlur-nv committed
516
    calibrating: bool = False,
Przemek Tredak's avatar
Przemek Tredak committed
517
518
    fp8_recipe: Optional[DelayedScaling] = None,
    fp8_group: Optional[dist_group_type] = None,
519
    _graph: bool = False,
Przemek Tredak's avatar
Przemek Tredak committed
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
) -> None:
    """
    Context manager for FP8 usage.

    .. code-block:: python

        with fp8_autocast(enabled=True):
            out = model(inp)

    .. note::

        Support for FP8 in the Linear layer of Transformer Engine is currently limited to tensors
        with shapes where both dimensions are divisible by 16. In terms of the input to the full
        Transformer network, this typically requires padding sequence length to be multiple of 16.

535
536
537
538
539
540
541
542
    .. note::

        When :attr:`fp8_recipe.reduce_amax==True`, any module must not be invoked more than once
        inside a single `fp8_autocast` region. This is unsupported behavior because the amax
        reduction is handled during the exit of the `fp8_autocast` context. Calling the same
        module more than once inside an `fp8_autocast` region overrides the amax tensors
        before reduction can occur.

Przemek Tredak's avatar
Przemek Tredak committed
543
544
    Parameters
    ----------
545
    enabled: bool, default = `True`
Przemek Tredak's avatar
Przemek Tredak committed
546
             whether or not to enable fp8
547
548
549
550
551
    calibrating: bool, default = `False`
                 calibration mode allows collecting statistics such as amax and scale
                 data of fp8 tensors even when executing without fp8 enabled. This is
                 useful for saving an inference ready fp8 checkpoint while training
                 using a higher precision.
Przemek Tredak's avatar
Przemek Tredak committed
552
553
554
555
556
557
    fp8_recipe: recipe.DelayedScaling, default = `None`
                recipe used for FP8 training.
    fp8_group: torch._C._distributed_c10d.ProcessGroup, default = `None`
               distributed group over which amaxes for the fp8 tensors
               are reduced at the end of each training step.
    """
558
559
560
561
562
563
    fp8_state = FP8GlobalStateManager.get_fp8_autocast_state()
    FP8GlobalStateManager.fp8_autocast_enter(enabled=enabled,
                                             calibrating=calibrating,
                                             fp8_recipe=fp8_recipe,
                                             fp8_group=fp8_group,
                                             _graph=_graph)
Przemek Tredak's avatar
Przemek Tredak committed
564
565
566
    try:
        yield
    finally:
567
        FP8GlobalStateManager.set_fp8_autocast_state(fp8_state)
568
        FP8GlobalStateManager.fp8_autocast_exit(enabled, _graph=_graph)
Przemek Tredak's avatar
Przemek Tredak committed
569
570


571
def _update_amax_history(amax_history: torch.Tensor) -> torch.Tensor:
Przemek Tredak's avatar
Przemek Tredak committed
572
    """Update amax history and set next amax to zero."""
573
    if amax_history.shape[0] > 1:
574
575
        new_amax_history = torch.roll(amax_history, -1, 0)
        amax_history.copy_(new_amax_history)
Przemek Tredak's avatar
Przemek Tredak committed
576
577
578
579
    amax_history[0].fill_(0.0)
    return amax_history


580
@torch.jit.script
581
def _default_get_amax_and_update_history(
Przemek Tredak's avatar
Przemek Tredak committed
582
583
584
585
586
587
588
    amax_history: torch.Tensor,
    amax_compute_algo: str,
) -> Tuple[torch.Tensor, torch.Tensor]:
    """Default function to obtain amax from history."""
    if amax_compute_algo == "max":
        amax = torch.max(amax_history, dim=0).values
    else:  # amax_compute_algo == "most_recent"
589
        amax = amax_history[0].clone()
Przemek Tredak's avatar
Przemek Tredak committed
590

591
    amax_history = _update_amax_history(amax_history)
Przemek Tredak's avatar
Przemek Tredak committed
592
593
594
    return amax_history, amax


595
@jit_fuser
Przemek Tredak's avatar
Przemek Tredak committed
596
597
598
599
600
def _default_sf_compute(
    amax: torch.Tensor,
    scale: torch.Tensor,
    fp8_max: float,
    margin: int,
601
    _fp32_max: float = torch.finfo(torch.float32).max,  # finfo not available in jitter
Przemek Tredak's avatar
Przemek Tredak committed
602
) -> torch.Tensor:
603
604
605
606
607
608
609
610
611
612
613
614
    """Default function to convert amax to scaling factor.
    Computing the scaling factor requires consideration of the following scenarios:
    1. amax == 0:
       No action is possible, set scale to the previous scale (or 1).
    2. 0 < amax < tiny_amax
       The amax is too tiny that the scale becomes infinite in FP32.
       Set scale = FP32_max
    3. tiny_amax <= amax < FP32_max:
       Set scale = FP8_max (or scaled_max) / amax
    4. When amax == inf or amax == nan:
       No action is possible, set scale to the previous scale (or 1).
    """
615
    sf = (fp8_max / amax) / (2 ** margin)
Przemek Tredak's avatar
Przemek Tredak committed
616
617
    sf = torch.where(amax > 0.0, sf, scale)
    sf = torch.where(torch.isfinite(amax), sf, scale)
618
    sf = torch.where(torch.isinf(sf), torch.full_like(sf, _fp32_max), sf)
619
620
    scale.copy_(sf)
    return scale
621

Przemek Tredak's avatar
Przemek Tredak committed
622

623
def _compute_amax_and_update_history(
Przemek Tredak's avatar
Przemek Tredak committed
624
    amax_history: torch.Tensor,
625
    amax_compute_algo: Union[Callable, str],
Przemek Tredak's avatar
Przemek Tredak committed
626
627
628
) -> Tuple[torch.Tensor, torch.Tensor]:
    """Obtain the amax from the history."""

629
630
    if callable(amax_compute_algo):
        amax = amax_compute_algo(amax_history)
631
        amax_history = _update_amax_history(amax_history)
Przemek Tredak's avatar
Przemek Tredak committed
632
        return amax_history, amax
633
    return _default_get_amax_and_update_history(
Przemek Tredak's avatar
Przemek Tredak committed
634
        amax_history,
635
        amax_compute_algo,
Przemek Tredak's avatar
Przemek Tredak committed
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
    )


def _compute_scaling_factor(
    amax: torch.Tensor,
    scale: torch.Tensor,
    fp8_max: float,
    recipe: DelayedScaling,
) -> torch.Tensor:
    """Convert amax to scaling factor."""

    if recipe.scaling_factor_compute_algo is None:
        return _default_sf_compute(
            amax,
            scale,
            fp8_max,
            recipe.margin,
        )
    return recipe.scaling_factor_compute_algo(amax, scale, fp8_max, recipe)


657
658
659
660
661
662
def _amax_and_scale_update(
    amax_history: torch.Tensor,
    scale: torch.Tensor,
    scale_inv: torch.Tensor,
    fp8_max: float,
    recipe: DelayedScaling,
Przemek Tredak's avatar
Przemek Tredak committed
663
) -> None:
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
    """Updates FP8 meta tensors."""
    new_amax_history, amax = _compute_amax_and_update_history(
        amax_history,
        recipe.amax_compute_algo,
    )
    new_scale = _compute_scaling_factor(amax, scale, fp8_max, recipe)
    scale.copy_(new_scale)
    scale_inv.copy_(1.0 / new_scale)
    amax_history.copy_(new_amax_history)


def split_and_copy(
    buffer: torch.Tensor,
    outputs: List[torch.Tensor],
    chunk_sizes: List[int],
) -> None:
    """Split `buffer` by `chunk_sizes` and copy into `outputs`."""
    splits = buffer.split(chunk_sizes)
    torch._foreach_copy_(outputs, splits)