fp8.py 7.45 KB
Newer Older
1
2
3
4
5
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""FP8 utilities for TransformerEngine"""

6
import copy
7
8
9
10
from contextlib import contextmanager
from typing import Tuple, Optional, Dict, Any

import numpy as np
11
12
13

import paddle
import transformer_engine_paddle as tex
14
from transformer_engine.common.recipe import DelayedScaling, Format
15

16
# FP8 support
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
_is_fp8_available = None
_reason_for_no_fp8 = ""


def _check_fp8_support() -> Tuple[bool, str]:
    """Return if fp8 support is available"""

    # Check GPU arch
    arch = paddle.device.cuda.get_device_capability()
    if arch >= (9, 0):    # hopper and above
        return True, ""
    if arch < (8, 9):    # pre-ada
        return False, "Device compute capability 8.9 or higher required for FP8 execution."

    # Special handling for Ada
    if tex.get_cublasLt_version() < 120103:
        return False, "CublasLt version 12.1.3.x or higher required for FP8 execution on Ada."
    if not paddle.version.cuda():
        return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
    if tuple(int(v) for v in paddle.version.cuda().split(".")) < (12, 1):
        return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
    return True, ""


def is_fp8_available() -> Tuple[bool, str]:
    """Return if fp8 support is available"""
    global _is_fp8_available, _reason_for_no_fp8
    if _is_fp8_available is None:
        _is_fp8_available, _reason_for_no_fp8 = _check_fp8_support()
    return _is_fp8_available, _reason_for_no_fp8
47
48


49
50
class FP8State:
    """Stores FP8 state"""
51

52
53
54
55
    def __init__(self):
        self.fp8_enabled = False
        self.fp8_calibration = False
        self.fp8_recipe = None
56

57
58
59
    def is_fp8_enabled(self) -> bool:
        """Is FP8 enabled"""
        return self.fp8_enabled
60

61
62
63
    def is_fp8_calibration(self) -> bool:
        """Is FP8 calibration"""
        return self.fp8_calibration
64

65
66
67
    def get_fp8_recipe(self) -> DelayedScaling:
        """Return the fp8 recipe"""
        return self.fp8_recipe
68

69
70
71
72
73
74
    @staticmethod
    def get_default_fp8_recipe() -> DelayedScaling:
        """FP8 recipe if not provided by user
        Margin = 0, interval = 1, E4M3
        """
        return DelayedScaling()
75

76
77
78
79
80
81
82

_global_fp8_state = FP8State()


def get_global_fp8_state() -> FP8State:
    """Get global fp8 state"""
    return _global_fp8_state
83
84
85
86
87
88
89
90
91
92
93
94


@contextmanager
def fp8_autocast(
    enabled: bool = False,
    calibrating: bool = False,
    fp8_recipe: Optional[DelayedScaling] = None,
) -> None:
    """
    Context manager for FP8 usage.
    """

95
96
    global _global_fp8_state
    saved_fp8_state = copy.deepcopy(_global_fp8_state)
97
    try:
98
99
100
101
        _global_fp8_state.fp8_enabled = enabled
        _global_fp8_state.fp8_calibration = calibrating
        _global_fp8_state.fp8_recipe = FP8State.get_default_fp8_recipe(
        ) if fp8_recipe is None else fp8_recipe
102
103
104
105
106
107

        if enabled:
            fp8_available, reason_for_no_fp8 = is_fp8_available()
            assert fp8_available, reason_for_no_fp8
        yield
    finally:
108
        _global_fp8_state = saved_fp8_state
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213


def get_fp8_te_dtype(fp8_recipe: DelayedScaling, fprop_tensor: bool = True) -> tex.DType:
    """Get fp8 data type according to recipe and tensor"""
    if fp8_recipe.fp8_format == Format.E4M3 or (fp8_recipe.fp8_format == Format.HYBRID
                                                and fprop_tensor):
        return tex.DType.kFloat8E4M3
    return tex.DType.kFloat8E5M2


def amax_and_scale_update(
    fp8_meta: Dict[str, Any],
    fwd_update: bool,
) -> None:
    """Updates fp8 amaxes/scales for fwd | bwd."""
    amax_compute = fp8_meta["recipe"].amax_compute_algo
    sf_compute = fp8_meta["recipe"].scaling_factor_compute_algo
    fp8_meta_tensor_key = "scaling_fwd" if fwd_update else "scaling_bwd"
    fp8_max_key = "fp8_max_fwd" if fwd_update else "fp8_max_bwd"

    if not callable(amax_compute) and sf_compute is None:
        # Obtain amax from history
        amax_history = fp8_meta[fp8_meta_tensor_key].amax_history
        if amax_compute == "max":
            amax = paddle.max(amax_history, axis=0)
        else:    # amax_compute_algo == "most_recent"
            amax = amax_history[0]

        # Update amax history and set next amax to zero
        if amax_history.shape[0] > 1:
            amax_history = paddle.roll(amax_history, -1, 0)
        amax_history[0] = 0.0
        fp8_meta[fp8_meta_tensor_key].amax_history = amax_history

        # Update scaling factor
        fp8_meta[fp8_meta_tensor_key].scale = tex.update_scale(
            amax=amax,
            scale=fp8_meta[fp8_meta_tensor_key].scale,
            fp8_max=fp8_meta[fp8_max_key],
            margin=float(fp8_meta["recipe"].margin))

        # Update scale_inv
        fp8_meta[fp8_meta_tensor_key].scale_inv = \
                    1.0 / fp8_meta[fp8_meta_tensor_key].scale

    else:
        raise ValueError("We only support the fp8 recipe with 'max' or 'most_recent' "
                         "amax_compute_algo and default scaling_factor_compute_algo at this "
                         "moment.")


class FP8TensorMeta():
    """Holds FP8 scaling and amax history for FP8 layers"""

    def __init__(self, is_forward: bool):
        self.scale = paddle.Tensor()
        self.scale_inv = paddle.Tensor()
        self.amax_history = paddle.Tensor()
        self.is_initialized = False
        self.is_forward = is_forward

    def prepare(self, num_gemms: bool, amax_history_len: int) -> None:
        """Prepare scales and amax tensors. It is called during fprop in each iteration.
        If the meta tensors are not initialized yet, initialization is performed. If already
        initialized, resize the meta tensors if amax_history_len has changed."""

        if self.is_initialized:
            # Handle changed amax history size.
            curr_len = self.amax_history.shape[0]
            num_fp8_tensors = self.amax_history.shape[1]
            if amax_history_len < curr_len:
                self.amax_history = (self.amax_history[:amax_history_len])
            elif amax_history_len > curr_len:
                extra_rows = amax_history_len - curr_len
                self.amax_history = paddle.concat([
                    self.amax_history,
                    paddle.zeros((extra_rows, num_fp8_tensors), dtype='float32')
                ],
                                                  axis=0)
            return

        # Max. number of fp8 tensors per GEMM = 3 (input, weight, output) for fwd and
        # 2 (grad_output and grad_input) for bwd
        num_fp8_tensors = (num_gemms * 3 if self.is_forward else num_gemms * 2)

        self.scale = paddle.ones(num_fp8_tensors, dtype='float32')
        self.scale_inv = paddle.ones(num_fp8_tensors, dtype='float32')
        self.amax_history = paddle.zeros([amax_history_len, num_fp8_tensors], dtype='float32')
        self.is_initialized = True

    def to_numpy(self):
        """Convert FP8 meta tensors to numpy."""
        assert self.is_initialized, "FP8TensorMeta is not initialized yet."
        return {
            'scale': self.scale.numpy(),
            'scale_inv': self.scale_inv.numpy(),
            'amax_history': self.amax_history.numpy(),
        }

    def from_numpy(self, data: Dict[str, np.array]):
        """Set FP8 meta tensors from numpy"""
        self.scale = paddle.to_tensor(data['scale'])
        self.scale_inv = paddle.to_tensor(data['scale_inv'])
        self.amax_history = paddle.to_tensor(data['amax_history'])
        self.is_initialized = True