adapter.py 12.7 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import re
from typing import TYPE_CHECKING

import torch
from peft import LoraConfig, LoraModel, PeftModel, TaskType, get_peft_model
from transformers.integrations import is_deepspeed_zero3_enabled

luopl's avatar
luopl committed
22
from ..extras import logging
chenych's avatar
chenych committed
23
24
25
from .model_utils.misc import find_all_linear_modules, find_expanded_modules
from .model_utils.quantization import QuantizationMethod
from .model_utils.unsloth import get_unsloth_peft_model, load_unsloth_peft_model
chenych's avatar
chenych committed
26
from .model_utils.visual import COMPOSITE_MODELS, get_forbidden_modules, patch_target_modules
chenych's avatar
chenych committed
27
28
29
30
31
32
33
34


if TYPE_CHECKING:
    from transformers import PretrainedConfig, PreTrainedModel

    from ..hparams import FinetuningArguments, ModelArguments


luopl's avatar
luopl committed
35
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
36
37
38
39
40
41
42
43
44
45
46


def _setup_full_tuning(
    model: "PreTrainedModel",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
    cast_trainable_params_to_fp32: bool,
) -> None:
    if not is_trainable:
        return

luopl's avatar
luopl committed
47
    logger.info_rank0("Fine-tuning method: Full")
luopl's avatar
luopl committed
48
    forbidden_modules = get_forbidden_modules(model.config, finetuning_args)
chenych's avatar
chenych committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
    for name, param in model.named_parameters():
        if not any(forbidden_module in name for forbidden_module in forbidden_modules):
            if cast_trainable_params_to_fp32:
                param.data = param.data.to(torch.float32)
        else:
            param.requires_grad_(False)


def _setup_freeze_tuning(
    model: "PreTrainedModel",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
    cast_trainable_params_to_fp32: bool,
) -> None:
    if not is_trainable:
        return

luopl's avatar
luopl committed
66
    logger.info_rank0("Fine-tuning method: Freeze")
luopl's avatar
luopl committed
67
68
    if hasattr(model.config, "text_config"):  # composite models
        config = getattr(model.config, "text_config")
chenych's avatar
chenych committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
    else:
        config = model.config

    num_layers = (
        getattr(config, "num_hidden_layers", None)
        or getattr(config, "num_layers", None)
        or getattr(config, "n_layer", None)
    )
    if not num_layers:
        raise ValueError("Current model does not support freeze tuning.")

    if finetuning_args.use_llama_pro:
        if num_layers % finetuning_args.freeze_trainable_layers != 0:
            raise ValueError(
chenych's avatar
chenych committed
83
84
                f"`num_layers` {num_layers} should be "
                f"divisible by `num_layer_trainable` {finetuning_args.freeze_trainable_layers}."
chenych's avatar
chenych committed
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
            )

        stride = num_layers // finetuning_args.freeze_trainable_layers
        trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
    elif finetuning_args.freeze_trainable_layers > 0:  # fine-tuning the last n layers if num_layer_trainable > 0
        trainable_layer_ids = range(max(0, num_layers - finetuning_args.freeze_trainable_layers), num_layers)
    else:  # fine-tuning the first n layers if num_layer_trainable < 0
        trainable_layer_ids = range(min(-finetuning_args.freeze_trainable_layers, num_layers))

    hidden_modules = set()
    non_hidden_modules = set()
    for name, _ in model.named_parameters():
        if ".0." in name:
            hidden_modules.add(name.split(".0.")[-1].split(".")[0])
        elif ".1." in name:  # MoD starts from layer 1
            hidden_modules.add(name.split(".1.")[-1].split(".")[0])

        if re.search(r"\.\d+\.", name) is None:
chenych's avatar
chenych committed
103
            non_hidden_modules.add(name.split(".")[-2])  # remove weight/bias
chenych's avatar
chenych committed
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123

    trainable_layers = []
    for module_name in finetuning_args.freeze_trainable_modules:
        if module_name != "all" and module_name not in hidden_modules:
            raise ValueError(
                "Module {} is not found, please choose from {}".format(module_name, ", ".join(hidden_modules))
            )

        for idx in trainable_layer_ids:
            trainable_layers.append(".{:d}.{}".format(idx, module_name if module_name != "all" else ""))

    if finetuning_args.freeze_extra_modules:
        for module_name in finetuning_args.freeze_extra_modules:
            if module_name not in non_hidden_modules:
                raise ValueError(
                    "Module {} is not found, please choose from {}".format(module_name, ", ".join(non_hidden_modules))
                )

            trainable_layers.append(module_name)

chenych's avatar
chenych committed
124
125
126
127
    model_type = getattr(model.config, "model_type", None)
    if not finetuning_args.freeze_multi_modal_projector and model_type in COMPOSITE_MODELS:
        trainable_layers.append(COMPOSITE_MODELS[model_type].projector_key)

luopl's avatar
luopl committed
128
    forbidden_modules = get_forbidden_modules(model.config, finetuning_args)
chenych's avatar
chenych committed
129
130
131
132
133
134
135
136
137
    for name, param in model.named_parameters():
        if any(trainable_layer in name for trainable_layer in trainable_layers) and not any(
            forbidden_module in name for forbidden_module in forbidden_modules
        ):
            if cast_trainable_params_to_fp32:
                param.data = param.data.to(torch.float32)
        else:
            param.requires_grad_(False)

luopl's avatar
luopl committed
138
    logger.info_rank0("Set trainable layers: {}".format(",".join(trainable_layers)))
chenych's avatar
chenych committed
139
140
141
142
143
144
145
146
147
148
149


def _setup_lora_tuning(
    config: "PretrainedConfig",
    model: "PreTrainedModel",
    model_args: "ModelArguments",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
    cast_trainable_params_to_fp32: bool,
) -> "PeftModel":
    if is_trainable:
luopl's avatar
luopl committed
150
        logger.info_rank0("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA"))
chenych's avatar
chenych committed
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182

    adapter_to_resume = None

    if model_args.adapter_name_or_path is not None:
        is_mergeable = True
        if getattr(model, "quantization_method", None):  # merge lora in quantized model is unstable
            assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter."
            is_mergeable = False

        if is_deepspeed_zero3_enabled():
            assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3."
            is_mergeable = False

        if model_args.use_unsloth:
            assert len(model_args.adapter_name_or_path) == 1, "Unsloth model only accepts a single adapter."
            is_mergeable = False

        if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable):
            adapter_to_merge = model_args.adapter_name_or_path[:-1]
            adapter_to_resume = model_args.adapter_name_or_path[-1]
        else:
            adapter_to_merge = model_args.adapter_name_or_path

        init_kwargs = {
            "subfolder": model_args.adapter_folder,
            "offload_folder": model_args.offload_folder,
            "cache_dir": model_args.cache_dir,
            "revision": model_args.model_revision,
            "token": model_args.hf_hub_token,
        }

        for adapter in adapter_to_merge:
chenych's avatar
chenych committed
183
            model: LoraModel = PeftModel.from_pretrained(model, adapter, **init_kwargs)
chenych's avatar
chenych committed
184
185
186
            model = model.merge_and_unload()

        if len(adapter_to_merge) > 0:
luopl's avatar
luopl committed
187
            logger.info_rank0(f"Merged {len(adapter_to_merge)} adapter(s).")
chenych's avatar
chenych committed
188
189
190
191
192
193
194

        if adapter_to_resume is not None:  # resume lora training
            if model_args.use_unsloth:
                model = load_unsloth_peft_model(config, model_args, is_trainable=is_trainable)
            else:
                model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_trainable, **init_kwargs)

luopl's avatar
luopl committed
195
        logger.info_rank0("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path)))
chenych's avatar
chenych committed
196
197
198
199
200
201
202
203
204
205

    if is_trainable and adapter_to_resume is None:  # create new lora weights while training
        if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":
            target_modules = find_all_linear_modules(model, finetuning_args.freeze_vision_tower)
        else:
            target_modules = finetuning_args.lora_target

        if finetuning_args.use_llama_pro:
            target_modules = find_expanded_modules(model, target_modules, finetuning_args.freeze_trainable_layers)

chenych's avatar
chenych committed
206
        target_modules = patch_target_modules(model, finetuning_args, target_modules)
chenych's avatar
chenych committed
207
208
209
210

        if (
            finetuning_args.use_dora
            and getattr(model, "quantization_method", None) is not None
chenych's avatar
chenych committed
211
            and getattr(model, "quantization_method", None) != QuantizationMethod.BNB
chenych's avatar
chenych committed
212
213
214
215
216
217
218
219
220
221
222
223
        ):
            raise ValueError("DoRA is not compatible with PTQ-quantized models.")

        if model_args.resize_vocab and finetuning_args.additional_target is None:
            input_embeddings = model.get_input_embeddings()
            output_embeddings = model.get_output_embeddings()
            module_names = set()
            for name, module in model.named_modules():
                if module in [input_embeddings, output_embeddings]:
                    module_names.add(name.split(".")[-1])

            finetuning_args.additional_target = module_names
luopl's avatar
luopl committed
224
            logger.warning_rank0("Vocab has been resized, add {} to trainable params.".format(",".join(module_names)))
chenych's avatar
chenych committed
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240

        peft_kwargs = {
            "r": finetuning_args.lora_rank,
            "target_modules": target_modules,
            "lora_alpha": finetuning_args.lora_alpha,
            "lora_dropout": finetuning_args.lora_dropout,
            "use_rslora": finetuning_args.use_rslora,
            "use_dora": finetuning_args.use_dora,
            "modules_to_save": finetuning_args.additional_target,
        }

        if model_args.use_unsloth:
            model = get_unsloth_peft_model(model, model_args, peft_kwargs)
        else:
            if finetuning_args.pissa_init:
                if finetuning_args.pissa_iter == -1:
luopl's avatar
luopl committed
241
                    logger.info_rank0("Using PiSSA initialization.")
chenych's avatar
chenych committed
242
243
                    peft_kwargs["init_lora_weights"] = "pissa"
                else:
luopl's avatar
luopl committed
244
245
                    logger.info_rank0(f"Using PiSSA initialization with FSVD steps {finetuning_args.pissa_iter}.")
                    peft_kwargs["init_lora_weights"] = f"pissa_niter_{finetuning_args.pissa_iter}"
chenych's avatar
chenych committed
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267

            lora_config = LoraConfig(
                task_type=TaskType.CAUSAL_LM,
                inference_mode=False,
                **peft_kwargs,
            )
            model = get_peft_model(model, lora_config)

    if is_trainable and cast_trainable_params_to_fp32:
        for param in filter(lambda p: p.requires_grad, model.parameters()):
            param.data = param.data.to(torch.float32)

    return model


def init_adapter(
    config: "PretrainedConfig",
    model: "PreTrainedModel",
    model_args: "ModelArguments",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
) -> "PreTrainedModel":
chenych's avatar
chenych committed
268
    r"""Initialize the adapters.
chenych's avatar
chenych committed
269
270
271
272
273
274
275
276
277
278
279
280
281
282

    Support full-parameter, freeze and LoRA training.

    Note that the trainable parameters must be cast to float32.
    """
    if is_trainable and getattr(model, "quantization_method", None) is not None:
        if finetuning_args.finetuning_type != "lora":
            raise ValueError("Quantized models can only be used for the LoRA tuning.")

        if finetuning_args.pissa_init:
            raise ValueError("Cannot initialize PiSSA adapter on quantized models.")

    # cast trainable parameters to float32 if:
    # 1. is_trainable and not pure_bf16 and not badam and quantization_bit is not None (qlora)
chenych's avatar
chenych committed
283
    # 2. is_trainable and not pure_bf16 and not badam and not zero3 (zero3 already in fp32)
chenych's avatar
chenych committed
284
285
286
287
    cast_trainable_params_to_fp32 = False
    if not is_trainable:
        pass
    elif finetuning_args.pure_bf16 or finetuning_args.use_badam:
luopl's avatar
luopl committed
288
        logger.info_rank0("Pure bf16 / BAdam detected, remaining trainable params in half precision.")
chenych's avatar
chenych committed
289
290
    elif model_args.quantization_bit is None and is_deepspeed_zero3_enabled():
        logger.info_rank0("DeepSpeed ZeRO3 detected, remaining trainable params in float32.")
chenych's avatar
chenych committed
291
    else:
luopl's avatar
luopl committed
292
        logger.info_rank0("Upcasting trainable params to float32.")
chenych's avatar
chenych committed
293
294
295
        cast_trainable_params_to_fp32 = True

    if finetuning_args.finetuning_type == "full":
luopl's avatar
luopl committed
296
        _setup_full_tuning(model, finetuning_args, is_trainable, cast_trainable_params_to_fp32)
chenych's avatar
chenych committed
297
    elif finetuning_args.finetuning_type == "freeze":
luopl's avatar
luopl committed
298
        _setup_freeze_tuning(model, finetuning_args, is_trainable, cast_trainable_params_to_fp32)
chenych's avatar
chenych committed
299
300
301
302
303
    elif finetuning_args.finetuning_type == "lora":
        model = _setup_lora_tuning(
            config, model, model_args, finetuning_args, is_trainable, cast_trainable_params_to_fp32
        )
    else:
luopl's avatar
luopl committed
304
        raise NotImplementedError(f"Unknown finetuning type: {finetuning_args.finetuning_type}.")
chenych's avatar
chenych committed
305
306

    return model