fp8.py 13.6 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.
"""FP8 utilities for TransformerEngine"""

6
from contextlib import contextmanager
7
from typing import Tuple, Optional, Dict, Any, Union
8
9

import numpy as np
10
11

import paddle
12
from transformer_engine import transformer_engine_paddle as tex
13
from transformer_engine.common.recipe import DelayedScaling, Format
14

15
from .constants import dist_group_type
Tian Zheng's avatar
Tian Zheng committed
16
from .fp8_buffer import FP8MetaFwdBuffer, FP8MetaBwdBuffer, FP8RecomputeBuffer
17

18
__all__ = ["fp8_autocast"]
19

20
# FP8 support
21
22
23
24
25
26
27
28
29
_is_fp8_available = None
_reason_for_no_fp8 = ""


def _check_fp8_support() -> Tuple[bool, str]:
    """Return if fp8 support is available"""

    # Check GPU arch
    arch = paddle.device.cuda.get_device_capability()
30
    if arch >= (9, 0):  # hopper and above
31
        return True, ""
32
    if arch < (8, 9):  # pre-ada
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
        return False, "Device compute capability 8.9 or higher required for FP8 execution."

    # Special handling for Ada
    if tex.get_cublasLt_version() < 120103:
        return False, "CublasLt version 12.1.3.x or higher required for FP8 execution on Ada."
    if not paddle.version.cuda():
        return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
    if tuple(int(v) for v in paddle.version.cuda().split(".")) < (12, 1):
        return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
    return True, ""


def is_fp8_available() -> Tuple[bool, str]:
    """Return if fp8 support is available"""
    global _is_fp8_available, _reason_for_no_fp8
    if _is_fp8_available is None:
        _is_fp8_available, _reason_for_no_fp8 = _check_fp8_support()
    return _is_fp8_available, _reason_for_no_fp8
51
52


53
54
class FP8State:
    """Stores FP8 state"""
55

56
    def __init__(self):
57
58
59
60
61
62
63
        self._fp8_enabled = False
        self._fp8_calibration = False
        self._fp8_recipe = None
        self._fp8_distributed_group = None
        self._is_first_fp8_module = False
        self._fp8_autocast_counter = 0
        self._fp8_autocast_depth = 0
Tian Zheng's avatar
Tian Zheng committed
64
        self._fp8_recompute_enabled = False
65
        self._use_cudagraph = False
66
67
        self._fp8_fwd_buffer = FP8MetaFwdBuffer()
        self._fp8_bwd_buffer = FP8MetaBwdBuffer()
Tian Zheng's avatar
Tian Zheng committed
68
        self._fp8_recompute_buffer = FP8RecomputeBuffer()
69

70
71
    def is_fp8_enabled(self) -> bool:
        """Is FP8 enabled"""
72
        return self._fp8_enabled
73

74
75
    def is_fp8_calibration(self) -> bool:
        """Is FP8 calibration"""
76
        return self._fp8_calibration
77

78
79
    def get_fp8_recipe(self) -> DelayedScaling:
        """Return the fp8 recipe"""
80
        return self._fp8_recipe
81

82
83
    @staticmethod
    def get_default_fp8_recipe() -> DelayedScaling:
84
        """FP8 recipe with default args."""
85
        return DelayedScaling()
86

87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    def get_autocast_id(self) -> int:
        """Returns the number of times of entering the `fp8_autocast` context.
        as a unique ID for different training steps."""
        return self._fp8_autocast_counter

    def is_first_fp8_module(self):
        """Returns `True` only the first time when called multiple
        times from within the same `fp8_autocast` context.
        """
        tmp = self._is_first_fp8_module
        self._is_first_fp8_module = False
        return tmp

    def get_fp8_group(self) -> Union[dist_group_type, None]:
        """Return the fp8 group for scale/amax comm"""
        return self._fp8_distributed_group

    def get_fp8_fwd_buffer(self) -> FP8MetaFwdBuffer:
        """Returns global fp8 forward buffer."""
        return self._fp8_fwd_buffer

    def get_fp8_bwd_buffer(self) -> FP8MetaBwdBuffer:
        """Returns global fp8 backward buffer."""
        return self._fp8_bwd_buffer

Tian Zheng's avatar
Tian Zheng committed
112
113
114
115
116
117
118
119
    def is_fp8_recompute_enabled(self) -> bool:
        """Is FP8 recompute enabled"""
        return self._fp8_recompute_enabled

    def get_fp8_recompute_buffer(self) -> FP8RecomputeBuffer:
        """Returns global fp8 recompute buffer."""
        return self._fp8_recompute_buffer

120
121
122
123
124
125
126
127
128
129
130
131
    def is_cudagraph_enabled(self) -> bool:
        """Is CUDAGraph enabled"""
        return self._use_cudagraph

    def enable_cudagraph(self):
        """Enable CUDA Graphs. Once CUDA Graphs are enabled, they cannot be disabled within the same execution context at current implementation."""
        self._use_cudagraph = True
        self._fp8_fwd_buffer.enable_cudagraph()
        self._fp8_bwd_buffer.enable_cudagraph()
        if self._fp8_recompute_enabled:
            raise RuntimeError("Currently, We do not allow recompute with cudagraph")

132
133
134
135
136
137
138
139
    def enter(
        self,
        enabled: bool,
        calibrating: bool,
        fp8_recipe: Optional[DelayedScaling],
        fp8_group: Optional[dist_group_type],
    ) -> None:
        """Called when entering 'fp8_autocast'"""
140
141
142
143
144
145
146
        self.saved_states = (
            self._fp8_enabled,
            self._fp8_calibration,
            self._fp8_recipe,
            self._fp8_distributed_group,
            self._is_first_fp8_module,
        )
147
148
149
150
151
152
153
154
155
156
157
158
159
160

        self._fp8_enabled = enabled
        self._fp8_calibration = calibrating
        self._fp8_recipe = self.get_default_fp8_recipe() if fp8_recipe is None else fp8_recipe
        self._fp8_distributed_group = fp8_group

        if self._fp8_autocast_depth == 0:
            self._is_first_fp8_module = True
            self._fp8_autocast_counter += 1
        self._fp8_autocast_depth += 1

    def exit(self):
        """Called when exiting 'fp8_autocast'"""
        # Restore saved states
161
162
163
164
165
166
167
        (
            self._fp8_enabled,
            self._fp8_calibration,
            self._fp8_recipe,
            self._fp8_distributed_group,
            self._is_first_fp8_module,
        ) = self.saved_states
168
169
170
171
172
173

        self._fp8_autocast_depth -= 1

        if self._fp8_autocast_depth == 0:
            self._fp8_fwd_buffer.finalize()

174
175
176
177
178
179
180

_global_fp8_state = FP8State()


def get_global_fp8_state() -> FP8State:
    """Get global fp8 state"""
    return _global_fp8_state
181
182
183
184
185
186
187


@contextmanager
def fp8_autocast(
    enabled: bool = False,
    calibrating: bool = False,
    fp8_recipe: Optional[DelayedScaling] = None,
188
    fp8_group: Optional[dist_group_type] = None,
189
190
191
) -> None:
    """
    Context manager for FP8 usage.
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225

    .. code-block:: python

        with fp8_autocast(enabled=True):
            out = model(inp)

    .. note::

        Support for FP8 in the Linear layer of Transformer Engine is currently limited to tensors
        with shapes where both dimensions are divisible by 16. In terms of the input to the full
        Transformer network, this typically requires padding sequence length to be multiple of 16.

    .. note::

        When :attr:`fp8_recipe.reduce_amax==True`, any module must not be invoked more than once
        inside a single `fp8_autocast` region. This is unsupported behavior because the amax
        reduction is handled during the exit of the `fp8_autocast` context. Calling the same
        module more than once inside an `fp8_autocast` region overrides the amax tensors
        before reduction can occur.

    Parameters
    ----------
    enabled: bool, default = `False`
             whether or not to enable fp8
    calibrating: bool, default = `False`
                 calibration mode allows collecting statistics such as amax and scale
                 data of fp8 tensors even when executing without fp8 enabled. This is
                 useful for saving an inference ready fp8 checkpoint while training
                 using a higher precision.
    fp8_recipe: recipe.DelayedScaling, default = `None`
                recipe used for FP8 training.
    fp8_group: paddle.distributed.collective.Group, default = `None`
               distributed group over which amaxes for the fp8 tensors
               are reduced at the end of each training step.
226
227
    """
    try:
228
        _global_fp8_state.enter(enabled, calibrating, fp8_recipe, fp8_group)
229
230
231
232
233
234

        if enabled:
            fp8_available, reason_for_no_fp8 = is_fp8_available()
            assert fp8_available, reason_for_no_fp8
        yield
    finally:
235
        _global_fp8_state.exit()
236
237
238
239


def get_fp8_te_dtype(fp8_recipe: DelayedScaling, fprop_tensor: bool = True) -> tex.DType:
    """Get fp8 data type according to recipe and tensor"""
240
241
242
    if fp8_recipe.fp8_format == Format.E4M3 or (
        fp8_recipe.fp8_format == Format.HYBRID and fprop_tensor
    ):
243
244
245
246
247
248
249
        return tex.DType.kFloat8E4M3
    return tex.DType.kFloat8E5M2


def amax_and_scale_update(
    fp8_meta: Dict[str, Any],
    fwd_update: bool,
250
    update_weight_scale_inv: bool = True,
251
252
    current_step_id_tensor: Optional[paddle.Tensor] = None,
    use_cudagraph: bool = False,
253
254
255
256
257
) -> None:
    """Updates fp8 amaxes/scales for fwd | bwd."""
    amax_compute = fp8_meta["recipe"].amax_compute_algo
    sf_compute = fp8_meta["recipe"].scaling_factor_compute_algo
    fp8_meta_tensor_key = "scaling_fwd" if fwd_update else "scaling_bwd"
258
    fp8_max_key = "fp8_max_fwd" if fwd_update else "fp8_max_bwd"
259
260

    if not callable(amax_compute) and sf_compute is None:
261
        non_weight_mask = fp8_meta[fp8_meta_tensor_key].non_weight_mask
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289

        if use_cudagraph:
            tex.amax_and_scale_update_inplace_legacy(
                _amax_history=fp8_meta[fp8_meta_tensor_key].amax_history,
                _scale=fp8_meta[fp8_meta_tensor_key].scale,
                _scale_inv=fp8_meta[fp8_meta_tensor_key].scale_inv,
                non_weight_mask=non_weight_mask,
                current_step_id_tensor=current_step_id_tensor,
                update_weight_scale_inv=update_weight_scale_inv,
                fwd_update=fwd_update,
                fp8_max=fp8_meta[fp8_max_key],
                margin=float(fp8_meta["recipe"].margin),
                amax_compute=amax_compute,
            )
        else:
            if update_weight_scale_inv:
                # we pass nullptr into kernel when we need to update_weight_scale_inv
                non_weight_mask = paddle.empty([0])
            tex.amax_and_scale_update_inplace(
                _amax_history=fp8_meta[fp8_meta_tensor_key].amax_history,
                _scale=fp8_meta[fp8_meta_tensor_key].scale,
                _scale_inv=fp8_meta[fp8_meta_tensor_key].scale_inv,
                non_weight_mask=non_weight_mask,
                fp8_dtype=int(get_fp8_te_dtype(fp8_meta["recipe"], fwd_update)),
                margin=float(fp8_meta["recipe"].margin),
                amax_compute=amax_compute,
            )

290
    else:
291
292
293
294
295
        raise ValueError(
            "We only support the fp8 recipe with 'max' or 'most_recent' "
            "amax_compute_algo and default scaling_factor_compute_algo at this "
            "moment."
        )
296
297


298
class FP8TensorMeta:
299
300
301
302
303
304
    """Holds FP8 scaling and amax history for FP8 layers"""

    def __init__(self, is_forward: bool):
        self.scale = paddle.Tensor()
        self.scale_inv = paddle.Tensor()
        self.amax_history = paddle.Tensor()
305
        self.non_weight_mask = paddle.Tensor()
306
307
308
        self.is_initialized = False
        self.is_forward = is_forward

309
310
311
312
313
314
315
316
317
318
    def get_non_weight_mask(self, num_gemms: int):
        """Needed for calculation of scale inverses to
        preserve scale_inv when caching FP8 weights"""
        if self.is_forward:
            # [True, False, True]: -> [input, weight, output]
            return paddle.to_tensor([True, False, True] * num_gemms)
        # [True, True]: -> [grad_output, grad_input]
        return paddle.to_tensor([True, True] * num_gemms)

    def prepare(self, num_gemms: int, amax_history_len: int) -> None:
319
320
321
322
323
324
325
326
327
        """Prepare scales and amax tensors. It is called during fprop in each iteration.
        If the meta tensors are not initialized yet, initialization is performed. If already
        initialized, resize the meta tensors if amax_history_len has changed."""

        if self.is_initialized:
            # Handle changed amax history size.
            curr_len = self.amax_history.shape[0]
            num_fp8_tensors = self.amax_history.shape[1]
            if amax_history_len < curr_len:
328
                self.amax_history = self.amax_history[:amax_history_len]
329
330
            elif amax_history_len > curr_len:
                extra_rows = amax_history_len - curr_len
331
332
333
334
335
336
337
                self.amax_history = paddle.concat(
                    [
                        self.amax_history,
                        paddle.zeros((extra_rows, num_fp8_tensors), dtype="float32"),
                    ],
                    axis=0,
                )
338
339
340
341
            return

        # Max. number of fp8 tensors per GEMM = 3 (input, weight, output) for fwd and
        # 2 (grad_output and grad_input) for bwd
342
        num_fp8_tensors = num_gemms * 3 if self.is_forward else num_gemms * 2
343

344
345
346
        self.scale = paddle.ones(num_fp8_tensors, dtype="float32")
        self.scale_inv = paddle.ones(num_fp8_tensors, dtype="float32")
        self.amax_history = paddle.zeros([amax_history_len, num_fp8_tensors], dtype="float32")
347
348
        self.non_weight_mask = self.get_non_weight_mask(num_gemms=num_gemms)

349
350
351
352
353
354
        self.is_initialized = True

    def to_numpy(self):
        """Convert FP8 meta tensors to numpy."""
        assert self.is_initialized, "FP8TensorMeta is not initialized yet."
        return {
355
356
357
            "scale": self.scale.numpy(),
            "scale_inv": self.scale_inv.numpy(),
            "amax_history": self.amax_history.numpy(),
358
359
360
361
        }

    def from_numpy(self, data: Dict[str, np.array]):
        """Set FP8 meta tensors from numpy"""
362
363
364
        self.scale = paddle.to_tensor(data["scale"])
        self.scale_inv = paddle.to_tensor(data["scale_inv"])
        self.amax_history = paddle.to_tensor(data["amax_history"])
365
366
367
368
369

        num_fp8_tensors = self.scale.shape[0]
        num_gemms = num_fp8_tensors // 3 if self.is_forward else num_fp8_tensors // 2
        self.non_weight_mask = self.get_non_weight_mask(num_gemms=num_gemms)

370
        self.is_initialized = True