checkpointing.py 7.7 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 HuggingFace Inc., Daniel Han-Chen & the Unsloth team and the LlamaFactory team.
chenych's avatar
chenych committed
2
#
luopl's avatar
luopl committed
3
# This code is inspired by the HuggingFace's Transformers and PEFT library,
chenych's avatar
chenych committed
4
5
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/modeling_utils.py
# https://github.com/huggingface/peft/blob/v0.10.0/src/peft/utils/other.py
luopl's avatar
luopl committed
6
7
# and the Unsloth library.
# https://github.com/unslothai/unsloth/blob/July-2024/unsloth/models/_utils.py
chenych's avatar
chenych committed
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import inspect
shihm's avatar
uodata  
shihm committed
22
23
import os
from collections.abc import Callable
luopl's avatar
luopl committed
24
from functools import WRAPPER_ASSIGNMENTS, partial, wraps
chenych's avatar
chenych committed
25
from types import MethodType
shihm's avatar
uodata  
shihm committed
26
from typing import TYPE_CHECKING, Any, Optional, Union
chenych's avatar
chenych committed
27
28
29

import torch

luopl's avatar
luopl committed
30
from ...extras import logging
chenych's avatar
chenych committed
31
32
33
34
35
36
37
38
39
from ...extras.constants import LAYERNORM_NAMES


if TYPE_CHECKING:
    from transformers import PreTrainedModel

    from ...hparams import ModelArguments


luopl's avatar
luopl committed
40
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
41
42


luopl's avatar
luopl committed
43
44
def get_unsloth_gradient_checkpointing_func() -> Callable:
    class UnslothGradientCheckpointing(torch.autograd.Function):
chenych's avatar
chenych committed
45
        r"""Saves VRAM by smartly offloading to RAM."""
luopl's avatar
luopl committed
46
47
48
49
50
51
52
53
54
55
56

        @staticmethod
        @torch.cuda.amp.custom_fwd
        def forward(
            ctx: "torch.autograd.Function",
            forward_function: "torch.Module",
            hidden_states: "torch.Tensor",
            *args: Union["torch.Tensor", Any],
        ) -> "torch.Tensor":
            saved_hidden_states = hidden_states.to("cpu", non_blocking=True)
            with torch.no_grad():
chenych's avatar
chenych committed
57
                outputs = forward_function(hidden_states, *args)
luopl's avatar
luopl committed
58
59
60
61

            ctx.save_for_backward(saved_hidden_states)
            ctx.forward_function = forward_function
            ctx.args = args
chenych's avatar
chenych committed
62
            return outputs
luopl's avatar
luopl committed
63
64
65
66
67
68
69
70

        @staticmethod
        @torch.cuda.amp.custom_bwd
        def backward(ctx: "torch.autograd.Function", grad_output: "torch.Tensor") -> "torch.Tensor":
            (hidden_states,) = ctx.saved_tensors
            hidden_states = hidden_states.to("cuda", non_blocking=True).detach()
            hidden_states.requires_grad_(True)
            with torch.enable_grad():
chenych's avatar
chenych committed
71
72
                outputs = ctx.forward_function(hidden_states, *ctx.args)
                output = outputs[0] if isinstance(outputs, tuple) else outputs
luopl's avatar
luopl committed
73
74
75
76
77
78
79
80

            torch.autograd.backward(output, grad_output)
            return (None, hidden_states.grad) + (None,) * len(ctx.args)

    return UnslothGradientCheckpointing.apply


def get_custom_gradient_checkpointing_func(gradient_checkpointing_func: Callable) -> Callable:
chenych's avatar
chenych committed
81
    r"""Only applies gradient checkpointing to trainable layers."""
luopl's avatar
luopl committed
82

luopl's avatar
luopl committed
83
    @wraps(gradient_checkpointing_func, assigned=WRAPPER_ASSIGNMENTS + ("__self__",))
luopl's avatar
luopl committed
84
    def custom_gradient_checkpointing_func(func: Callable, *args: Union["torch.Tensor", Any], **kwargs):
chenych's avatar
chenych committed
85
86
87
88
        if isinstance(func, partial):
            module: torch.nn.Module = func.func.__self__
        else:
            module: torch.nn.Module = func.__self__
luopl's avatar
luopl committed
89

chenych's avatar
chenych committed
90
        has_grad = False
luopl's avatar
luopl committed
91
        if any(param.requires_grad for param in module.parameters()):
chenych's avatar
chenych committed
92
            has_grad = True
luopl's avatar
luopl committed
93
94
95
            for arg in args:
                if torch.is_tensor(arg) and torch.is_floating_point(arg):
                    arg.requires_grad_(True)
chenych's avatar
chenych committed
96
                    break  # assume the first tensor is always the hidden states
luopl's avatar
luopl committed
97

chenych's avatar
chenych committed
98
99
100
101
        if has_grad:
            return gradient_checkpointing_func(func, *args, **kwargs)
        else:
            return func(*args, **kwargs)
luopl's avatar
luopl committed
102
103
104
105

    return custom_gradient_checkpointing_func


chenych's avatar
chenych committed
106
def _gradient_checkpointing_enable(
luopl's avatar
luopl committed
107
    self: "PreTrainedModel",
chenych's avatar
chenych committed
108
    gradient_checkpointing_kwargs: Optional[dict[str, Any]] = None,
luopl's avatar
luopl committed
109
    use_unsloth_gc: bool = False,
chenych's avatar
chenych committed
110
) -> None:
chenych's avatar
chenych committed
111
    r"""Activates gradient checkpointing for the current model.
chenych's avatar
chenych committed
112
113
114
115
116
117

    Modification of the original method to enable gradient checkpointing for block-wise optimizer.
    """
    from torch.utils.checkpoint import checkpoint

    if not self.supports_gradient_checkpointing:
luopl's avatar
luopl committed
118
        raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
chenych's avatar
chenych committed
119
120
121
122

    if gradient_checkpointing_kwargs is None:
        gradient_checkpointing_kwargs = {"use_reentrant": True}

luopl's avatar
luopl committed
123
124
125
126
    if use_unsloth_gc:
        gradient_checkpointing_func = get_unsloth_gradient_checkpointing_func()
    else:
        gradient_checkpointing_func = partial(checkpoint, **gradient_checkpointing_kwargs)
chenych's avatar
chenych committed
127

luopl's avatar
luopl committed
128
    gradient_checkpointing_func = get_custom_gradient_checkpointing_func(gradient_checkpointing_func)
chenych's avatar
chenych committed
129
130
131
    if "value" in inspect.signature(self._set_gradient_checkpointing).parameters:  # old GC format
        self.apply(partial(self._set_gradient_checkpointing, value=True))
        self.enable_input_require_grads()
luopl's avatar
luopl committed
132
        logger.warning_rank0_once("You are using the old GC format, some features (e.g. BAdam) will be invalid.")
chenych's avatar
chenych committed
133
    else:  # have already enabled input require gradients
luopl's avatar
luopl committed
134
        self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func)
chenych's avatar
chenych committed
135
136
137


def _fp32_forward_post_hook(
chenych's avatar
chenych committed
138
    module: "torch.nn.Module", args: tuple["torch.Tensor"], output: "torch.Tensor"
chenych's avatar
chenych committed
139
140
141
142
143
) -> "torch.Tensor":
    return output.to(torch.float32)


def prepare_model_for_training(model: "PreTrainedModel", model_args: "ModelArguments") -> None:
chenych's avatar
chenych committed
144
145
146
147
148
149
    r"""Prepare the model before training.

    Include:
    (1) cast the layernorm in fp32
    (2) make output embedding layer require grads
    (3) add the upcasting of the lm_head in fp32.
chenych's avatar
chenych committed
150
151
    """
    if model_args.upcast_layernorm:
luopl's avatar
luopl committed
152
        logger.info_rank0("Upcasting layernorm weights in float32.")
chenych's avatar
chenych committed
153
154
155
156
        for name, param in model.named_parameters():
            if param.ndim == 1 and any(ln_name in name for ln_name in LAYERNORM_NAMES):
                param.data = param.data.to(torch.float32)

shihm's avatar
uodata  
shihm committed
157
158
159
160
161
162
163
    if (
        os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true"
        and int(os.environ.get("FSDP_VERSION", "1")) == 2
    ):
        model_args.use_reentrant_gc = False
        logger.warning_rank0("You are using fsdp2, `use_reentrant_gc` has been set to False.")

chenych's avatar
chenych committed
164
165
    if not model_args.disable_gradient_checkpointing:
        if not getattr(model, "supports_gradient_checkpointing", False):
luopl's avatar
luopl committed
166
            logger.warning_rank0("Current model does not support gradient checkpointing.")
chenych's avatar
chenych committed
167
168
169
        else:
            # use_reentrant=False might increase VRAM usage (have not been empirically verified yet)
            # According to: https://github.com/huggingface/transformers/issues/28339
luopl's avatar
luopl committed
170
171
172
173
            gradient_checkpointing_enable = partial(
                _gradient_checkpointing_enable, use_unsloth_gc=model_args.use_unsloth_gc
            )
            model.gradient_checkpointing_enable = MethodType(gradient_checkpointing_enable, model)
luopl's avatar
luopl committed
174
175
176
            model.gradient_checkpointing_enable(
                gradient_checkpointing_kwargs={"use_reentrant": model_args.use_reentrant_gc}
            )
chenych's avatar
chenych committed
177
            setattr(model.config, "use_cache", False)  # turn off when gradient checkpointing is enabled
luopl's avatar
luopl committed
178
            logger.info_rank0("Gradient checkpointing enabled.")
chenych's avatar
chenych committed
179
180
181
182

    if model_args.upcast_lmhead_output:
        output_layer = model.get_output_embeddings()
        if isinstance(output_layer, torch.nn.Linear) and output_layer.weight.dtype != torch.float32:
luopl's avatar
luopl committed
183
            logger.info_rank0("Upcasting lm_head outputs in float32.")
chenych's avatar
chenych committed
184
            output_layer.register_forward_hook(_fp32_forward_post_hook)