recompute.py 1.89 KB
Newer Older
Tian Zheng's avatar
Tian Zheng committed
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Methods needed for recompute."""

import os
import inspect

from paddle.distributed import fleet

from .constants import RecomputeFunctionNames
from .fp8 import get_global_fp8_state

14
15
16

__all__ = ['recompute']

Tian Zheng's avatar
Tian Zheng committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

_DISABLE_RECOMPUTE = int(os.getenv("NVTE_DISABLE_RECOMPUTE", "0"))


def is_in_recompute_phase():
    """Inspect call stack to determine if this is called from
    backward phase. Paddle has two recompute methods:
    (1) Use RecomputeFunction. The recomputed function is called from `RecomputeFunction.backward`;
    (2) Use paddle.autograd.saved_tensors_hooks. The recompute function is called from `unpack`."""
    if _DISABLE_RECOMPUTE:
        return False
    frame = inspect.currentframe().f_back
    while frame:
        if frame.f_code.co_name in RecomputeFunctionNames:
            return True
        frame = frame.f_back
    return False


def recompute(function, *args, **kwargs):
    """
    This is a wrapper of paddle.distributed.fleet.utils.recompute. It provides necessary
    state information for fp8 layers.
40
41
42
43
44
45
46
47
48
49

    Parameters
    ----------
    function: Callable
            paddle module used to run the forward and backward passes using
            the specified :attr:`args` and :attr:`kwargs`.
    args : tuple
            tuple of torch tensors for inputs to :attr:`function`.
    kwargs : dict
            dictionary of string keys for keyword arguments to :attr:`function`.
Tian Zheng's avatar
Tian Zheng committed
50
51
52
53
54
55
56
57
58
59
60
61
62
    """
    assert not _DISABLE_RECOMPUTE, "Recompute is disabled. " \
        f"Got NVTE_DISABLE_RECOMPUTE={_DISABLE_RECOMPUTE}."

    global_fp8_state = get_global_fp8_state()

    try:
        global_fp8_state._fp8_recompute_enabled = True
        outputs = fleet.utils.recompute(function, *args, **kwargs)
    finally:
        global_fp8_state._fp8_recompute_enabled = False

    return outputs