adapter.py 15.5 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import re
from typing import TYPE_CHECKING

import torch
shihm's avatar
uodata  
shihm committed
19
from peft import LoraConfig, LoraModel, OFTConfig, PeftModel, TaskType, get_peft_model
chenych's avatar
chenych committed
20
21
from transformers.integrations import is_deepspeed_zero3_enabled

luopl's avatar
luopl committed
22
from ..extras import logging
shihm's avatar
uodata  
shihm committed
23
24
from ..extras.constants import EngineName
from .model_utils.ktransformers import get_kt_peft_model, load_kt_peft_model
chenych's avatar
chenych committed
25
26
27
from .model_utils.misc import find_all_linear_modules, find_expanded_modules
from .model_utils.quantization import QuantizationMethod
from .model_utils.unsloth import get_unsloth_peft_model, load_unsloth_peft_model
chenych's avatar
chenych committed
28
from .model_utils.visual import COMPOSITE_MODELS, get_forbidden_modules, patch_target_modules
chenych's avatar
chenych committed
29
30
31
32
33
34
35
36


if TYPE_CHECKING:
    from transformers import PretrainedConfig, PreTrainedModel

    from ..hparams import FinetuningArguments, ModelArguments


luopl's avatar
luopl committed
37
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
38
39
40
41
42
43
44
45
46
47
48


def _setup_full_tuning(
    model: "PreTrainedModel",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
    cast_trainable_params_to_fp32: bool,
) -> None:
    if not is_trainable:
        return

luopl's avatar
luopl committed
49
    logger.info_rank0("Fine-tuning method: Full")
luopl's avatar
luopl committed
50
    forbidden_modules = get_forbidden_modules(model.config, finetuning_args)
chenych's avatar
chenych committed
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
    for name, param in model.named_parameters():
        if not any(forbidden_module in name for forbidden_module in forbidden_modules):
            if cast_trainable_params_to_fp32:
                param.data = param.data.to(torch.float32)
        else:
            param.requires_grad_(False)


def _setup_freeze_tuning(
    model: "PreTrainedModel",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
    cast_trainable_params_to_fp32: bool,
) -> None:
    if not is_trainable:
        return

luopl's avatar
luopl committed
68
    logger.info_rank0("Fine-tuning method: Freeze")
luopl's avatar
luopl committed
69
70
    if hasattr(model.config, "text_config"):  # composite models
        config = getattr(model.config, "text_config")
chenych's avatar
chenych committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
    else:
        config = model.config

    num_layers = (
        getattr(config, "num_hidden_layers", None)
        or getattr(config, "num_layers", None)
        or getattr(config, "n_layer", None)
    )
    if not num_layers:
        raise ValueError("Current model does not support freeze tuning.")

    if finetuning_args.use_llama_pro:
        if num_layers % finetuning_args.freeze_trainable_layers != 0:
            raise ValueError(
chenych's avatar
chenych committed
85
86
                f"`num_layers` {num_layers} should be "
                f"divisible by `num_layer_trainable` {finetuning_args.freeze_trainable_layers}."
chenych's avatar
chenych committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
            )

        stride = num_layers // finetuning_args.freeze_trainable_layers
        trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
    elif finetuning_args.freeze_trainable_layers > 0:  # fine-tuning the last n layers if num_layer_trainable > 0
        trainable_layer_ids = range(max(0, num_layers - finetuning_args.freeze_trainable_layers), num_layers)
    else:  # fine-tuning the first n layers if num_layer_trainable < 0
        trainable_layer_ids = range(min(-finetuning_args.freeze_trainable_layers, num_layers))

    hidden_modules = set()
    non_hidden_modules = set()
    for name, _ in model.named_parameters():
        if ".0." in name:
            hidden_modules.add(name.split(".0.")[-1].split(".")[0])
        elif ".1." in name:  # MoD starts from layer 1
            hidden_modules.add(name.split(".1.")[-1].split(".")[0])

        if re.search(r"\.\d+\.", name) is None:
chenych's avatar
chenych committed
105
            non_hidden_modules.add(name.split(".")[-2])  # remove weight/bias
chenych's avatar
chenych committed
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125

    trainable_layers = []
    for module_name in finetuning_args.freeze_trainable_modules:
        if module_name != "all" and module_name not in hidden_modules:
            raise ValueError(
                "Module {} is not found, please choose from {}".format(module_name, ", ".join(hidden_modules))
            )

        for idx in trainable_layer_ids:
            trainable_layers.append(".{:d}.{}".format(idx, module_name if module_name != "all" else ""))

    if finetuning_args.freeze_extra_modules:
        for module_name in finetuning_args.freeze_extra_modules:
            if module_name not in non_hidden_modules:
                raise ValueError(
                    "Module {} is not found, please choose from {}".format(module_name, ", ".join(non_hidden_modules))
                )

            trainable_layers.append(module_name)

chenych's avatar
chenych committed
126
127
128
129
    model_type = getattr(model.config, "model_type", None)
    if not finetuning_args.freeze_multi_modal_projector and model_type in COMPOSITE_MODELS:
        trainable_layers.append(COMPOSITE_MODELS[model_type].projector_key)

luopl's avatar
luopl committed
130
    forbidden_modules = get_forbidden_modules(model.config, finetuning_args)
chenych's avatar
chenych committed
131
132
133
134
135
136
137
138
139
    for name, param in model.named_parameters():
        if any(trainable_layer in name for trainable_layer in trainable_layers) and not any(
            forbidden_module in name for forbidden_module in forbidden_modules
        ):
            if cast_trainable_params_to_fp32:
                param.data = param.data.to(torch.float32)
        else:
            param.requires_grad_(False)

luopl's avatar
luopl committed
140
    logger.info_rank0("Set trainable layers: {}".format(",".join(trainable_layers)))
chenych's avatar
chenych committed
141
142
143
144
145
146
147
148
149
150
151


def _setup_lora_tuning(
    config: "PretrainedConfig",
    model: "PreTrainedModel",
    model_args: "ModelArguments",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
    cast_trainable_params_to_fp32: bool,
) -> "PeftModel":
    if is_trainable:
shihm's avatar
uodata  
shihm committed
152
153
154
155
        if finetuning_args.finetuning_type == "oft":
            logger.info_rank0("Fine-tuning method: OFT")
        else:
            logger.info_rank0("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA"))
chenych's avatar
chenych committed
156
157
158
159
160
161
162
163
164
165
166
167
168

    adapter_to_resume = None

    if model_args.adapter_name_or_path is not None:
        is_mergeable = True
        if getattr(model, "quantization_method", None):  # merge lora in quantized model is unstable
            assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter."
            is_mergeable = False

        if is_deepspeed_zero3_enabled():
            assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3."
            is_mergeable = False

shihm's avatar
uodata  
shihm committed
169
170
171
172
        if model_args.use_kt:
            assert len(model_args.adapter_name_or_path) == 1, "KTransformers model only accepts a single adapter"
            is_mergeable = False

chenych's avatar
chenych committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
        if model_args.use_unsloth:
            assert len(model_args.adapter_name_or_path) == 1, "Unsloth model only accepts a single adapter."
            is_mergeable = False

        if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable):
            adapter_to_merge = model_args.adapter_name_or_path[:-1]
            adapter_to_resume = model_args.adapter_name_or_path[-1]
        else:
            adapter_to_merge = model_args.adapter_name_or_path

        init_kwargs = {
            "subfolder": model_args.adapter_folder,
            "offload_folder": model_args.offload_folder,
            "cache_dir": model_args.cache_dir,
            "revision": model_args.model_revision,
            "token": model_args.hf_hub_token,
        }

shihm's avatar
uodata  
shihm committed
191
192
193
194
195
196
        if model_args.use_kt:
            if model_args.infer_backend != EngineName.KT:
                raise ValueError(
                    "We should use ktransformers as backend to infer the adapter fine-tuned by ktransformers."
                )

chenych's avatar
chenych committed
197
        for adapter in adapter_to_merge:
chenych's avatar
chenych committed
198
            model: LoraModel = PeftModel.from_pretrained(model, adapter, **init_kwargs)
chenych's avatar
chenych committed
199
200
201
            model = model.merge_and_unload()

        if len(adapter_to_merge) > 0:
luopl's avatar
luopl committed
202
            logger.info_rank0(f"Merged {len(adapter_to_merge)} adapter(s).")
chenych's avatar
chenych committed
203
204

        if adapter_to_resume is not None:  # resume lora training
shihm's avatar
uodata  
shihm committed
205
206
207
            if model_args.use_kt:
                model = load_kt_peft_model(model_args, model)
            elif model_args.use_unsloth:
chenych's avatar
chenych committed
208
                model = load_unsloth_peft_model(config, model_args, finetuning_args, is_trainable=is_trainable)
chenych's avatar
chenych committed
209
210
211
            else:
                model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_trainable, **init_kwargs)

luopl's avatar
luopl committed
212
        logger.info_rank0("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path)))
chenych's avatar
chenych committed
213
214
215
216
217
218
219

    if is_trainable and adapter_to_resume is None:  # create new lora weights while training
        if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":
            target_modules = find_all_linear_modules(model, finetuning_args.freeze_vision_tower)
        else:
            target_modules = finetuning_args.lora_target

shihm's avatar
uodata  
shihm committed
220
221
222
223
224
225
226
227
228
229
        if model_args.use_kt:
            new_list = []
            for m in target_modules:
                if m in ("down_proj", "up_proj", "gate_proj"):
                    new_list.extend([f"mlp.{m}", f"shared_experts.{m}"])
                elif m not in ("generate_linear", "orig_module", "prefill_linear"):
                    new_list.append(m)

            target_modules[:] = new_list

chenych's avatar
chenych committed
230
231
232
        if finetuning_args.use_llama_pro:
            target_modules = find_expanded_modules(model, target_modules, finetuning_args.freeze_trainable_layers)

chenych's avatar
chenych committed
233
        target_modules = patch_target_modules(model, finetuning_args, target_modules)
chenych's avatar
chenych committed
234
235
236
237

        if (
            finetuning_args.use_dora
            and getattr(model, "quantization_method", None) is not None
chenych's avatar
chenych committed
238
            and getattr(model, "quantization_method", None) != QuantizationMethod.BNB
chenych's avatar
chenych committed
239
240
241
242
243
244
245
246
247
248
249
250
        ):
            raise ValueError("DoRA is not compatible with PTQ-quantized models.")

        if model_args.resize_vocab and finetuning_args.additional_target is None:
            input_embeddings = model.get_input_embeddings()
            output_embeddings = model.get_output_embeddings()
            module_names = set()
            for name, module in model.named_modules():
                if module in [input_embeddings, output_embeddings]:
                    module_names.add(name.split(".")[-1])

            finetuning_args.additional_target = module_names
luopl's avatar
luopl committed
251
            logger.warning_rank0("Vocab has been resized, add {} to trainable params.".format(",".join(module_names)))
chenych's avatar
chenych committed
252

shihm's avatar
uodata  
shihm committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        if finetuning_args.finetuning_type == "lora":
            peft_kwargs = {
                "r": finetuning_args.lora_rank,
                "target_modules": target_modules,
                "lora_alpha": finetuning_args.lora_alpha,
                "lora_dropout": finetuning_args.lora_dropout,
                "use_rslora": finetuning_args.use_rslora,
                "use_dora": finetuning_args.use_dora,
                "modules_to_save": finetuning_args.additional_target,
            }
        elif finetuning_args.finetuning_type == "oft":
            peft_kwargs = {
                "r": finetuning_args.oft_rank,
                "oft_block_size": finetuning_args.oft_block_size,
                "target_modules": target_modules,
                "module_dropout": finetuning_args.module_dropout,
                "modules_to_save": finetuning_args.additional_target,
            }

        if model_args.use_kt:
            if finetuning_args.finetuning_type == "oft":
                raise ValueError("KTransformers is currently not supported for OFT.")
            if finetuning_args.finetuning_type == "lora":
                peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    **peft_kwargs,
                )
            else:
                raise ValueError("KTransformers is currently only supported for LoRA.")

            model = get_kt_peft_model(model, peft_config)
            print(f"KT_model:{model}")
        elif model_args.use_unsloth:
            if finetuning_args.finetuning_type == "oft":
                raise ValueError("Unsloth is currently not supported for OFT.")
chenych's avatar
chenych committed
289
290
291
292
293

            model = get_unsloth_peft_model(model, model_args, peft_kwargs)
        else:
            if finetuning_args.pissa_init:
                if finetuning_args.pissa_iter == -1:
luopl's avatar
luopl committed
294
                    logger.info_rank0("Using PiSSA initialization.")
chenych's avatar
chenych committed
295
296
                    peft_kwargs["init_lora_weights"] = "pissa"
                else:
luopl's avatar
luopl committed
297
298
                    logger.info_rank0(f"Using PiSSA initialization with FSVD steps {finetuning_args.pissa_iter}.")
                    peft_kwargs["init_lora_weights"] = f"pissa_niter_{finetuning_args.pissa_iter}"
chenych's avatar
chenych committed
299

shihm's avatar
uodata  
shihm committed
300
301
302
303
304
305
306
307
308
309
310
311
312
            if finetuning_args.finetuning_type == "lora":
                peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    **peft_kwargs,
                )
            elif finetuning_args.finetuning_type == "oft":
                peft_config = OFTConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    **peft_kwargs,
                )
            model = get_peft_model(model, peft_config)
chenych's avatar
chenych committed
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327

    if is_trainable and cast_trainable_params_to_fp32:
        for param in filter(lambda p: p.requires_grad, model.parameters()):
            param.data = param.data.to(torch.float32)

    return model


def init_adapter(
    config: "PretrainedConfig",
    model: "PreTrainedModel",
    model_args: "ModelArguments",
    finetuning_args: "FinetuningArguments",
    is_trainable: bool,
) -> "PreTrainedModel":
chenych's avatar
chenych committed
328
    r"""Initialize the adapters.
chenych's avatar
chenych committed
329
330
331
332
333
334

    Support full-parameter, freeze and LoRA training.

    Note that the trainable parameters must be cast to float32.
    """
    if is_trainable and getattr(model, "quantization_method", None) is not None:
shihm's avatar
uodata  
shihm committed
335
336
        if finetuning_args.finetuning_type not in ["lora", "oft"]:
            raise ValueError("Quantized models can only be used for the LoRA or OFT tuning.")
chenych's avatar
chenych committed
337
338
339
340
341
342

        if finetuning_args.pissa_init:
            raise ValueError("Cannot initialize PiSSA adapter on quantized models.")

    # cast trainable parameters to float32 if:
    # 1. is_trainable and not pure_bf16 and not badam and quantization_bit is not None (qlora)
chenych's avatar
chenych committed
343
    # 2. is_trainable and not pure_bf16 and not badam and not zero3 (zero3 already in fp32)
chenych's avatar
chenych committed
344
345
346
347
    cast_trainable_params_to_fp32 = False
    if not is_trainable:
        pass
    elif finetuning_args.pure_bf16 or finetuning_args.use_badam:
luopl's avatar
luopl committed
348
        logger.info_rank0("Pure bf16 / BAdam detected, remaining trainable params in half precision.")
chenych's avatar
chenych committed
349
350
    elif model_args.quantization_bit is None and is_deepspeed_zero3_enabled():
        logger.info_rank0("DeepSpeed ZeRO3 detected, remaining trainable params in float32.")
chenych's avatar
chenych committed
351
    else:
luopl's avatar
luopl committed
352
        logger.info_rank0("Upcasting trainable params to float32.")
chenych's avatar
chenych committed
353
354
355
        cast_trainable_params_to_fp32 = True

    if finetuning_args.finetuning_type == "full":
luopl's avatar
luopl committed
356
        _setup_full_tuning(model, finetuning_args, is_trainable, cast_trainable_params_to_fp32)
chenych's avatar
chenych committed
357
    elif finetuning_args.finetuning_type == "freeze":
luopl's avatar
luopl committed
358
        _setup_freeze_tuning(model, finetuning_args, is_trainable, cast_trainable_params_to_fp32)
shihm's avatar
uodata  
shihm committed
359
    elif finetuning_args.finetuning_type in ["lora", "oft"]:
chenych's avatar
chenych committed
360
361
362
363
        model = _setup_lora_tuning(
            config, model, model_args, finetuning_args, is_trainable, cast_trainable_params_to_fp32
        )
    else:
luopl's avatar
luopl committed
364
        raise NotImplementedError(f"Unknown finetuning type: {finetuning_args.finetuning_type}.")
chenych's avatar
chenych committed
365
366

    return model