misc.py 3.65 KB
Newer Older
chenych's avatar
chenych committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import TYPE_CHECKING, List

luopl's avatar
luopl committed
17
from ...extras import logging
chenych's avatar
chenych committed
18
19
20
21
22
23


if TYPE_CHECKING:
    from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer


luopl's avatar
luopl committed
24
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
25
26
27
28
29
30


def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool) -> List[str]:
    r"""
    Finds all available modules to apply lora or galore.
    """
luopl's avatar
luopl committed
31
    model_type = getattr(model.config, "model_type", None)
chenych's avatar
chenych committed
32
    forbidden_modules = {"lm_head"}
luopl's avatar
luopl committed
33
    if model_type == "chatglm":
chenych's avatar
chenych committed
34
        forbidden_modules.add("output_layer")
luopl's avatar
luopl committed
35
    elif model_type == "internlm2":
chenych's avatar
chenych committed
36
        forbidden_modules.add("output")
luopl's avatar
luopl committed
37
    elif model_type in ["llava", "llava_next", "llava_next_video", "mllama", "paligemma", "video_llava"]:
chenych's avatar
chenych committed
38
        forbidden_modules.add("multi_modal_projector")
luopl's avatar
luopl committed
39
40
    elif model_type == "qwen2_vl":
        forbidden_modules.add("merger")
chenych's avatar
chenych committed
41
42

    if freeze_vision_tower:
luopl's avatar
luopl committed
43
44
45
        if model_type == "mllama":
            forbidden_modules.add("vision_model")
        elif model_type == "qwen2_vl":
luopl's avatar
luopl committed
46
47
48
            forbidden_modules.add("visual")
        else:
            forbidden_modules.add("vision_tower")
chenych's avatar
chenych committed
49
50
51
52
53
54
55
56
57

    module_names = set()
    for name, module in model.named_modules():
        if any(forbidden_module in name for forbidden_module in forbidden_modules):
            continue

        if "Linear" in module.__class__.__name__ and "Embedding" not in module.__class__.__name__:
            module_names.add(name.split(".")[-1])

luopl's avatar
luopl committed
58
    logger.info_rank0("Found linear modules: {}".format(",".join(module_names)))
chenych's avatar
chenych committed
59
60
61
62
63
64
65
66
67
68
69
70
71
    return list(module_names)


def find_expanded_modules(model: "PreTrainedModel", target_modules: List[str], num_layer_trainable: int) -> List[str]:
    r"""
    Finds the modules in the expanded blocks to apply lora.
    """
    num_layers = getattr(model.config, "num_hidden_layers", None)
    if not num_layers:
        raise ValueError("Model was not supported.")

    if num_layers % num_layer_trainable != 0:
        raise ValueError(
luopl's avatar
luopl committed
72
            f"`num_layers` {num_layers} should be divisible by `num_layer_trainable` {num_layer_trainable}."
chenych's avatar
chenych committed
73
74
75
76
        )

    stride = num_layers // num_layer_trainable
    trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
luopl's avatar
luopl committed
77
    trainable_layers = [f".{idx:d}." for idx in trainable_layer_ids]
chenych's avatar
chenych committed
78
79
80
81
82
83
84
    module_names = []
    for name, _ in model.named_modules():
        if any(target_module in name for target_module in target_modules) and any(
            trainable_layer in name for trainable_layer in trainable_layers
        ):
            module_names.append(name)

luopl's avatar
luopl committed
85
    logger.info_rank0("Apply lora to layers: {}".format(",".join(map(str, trainable_layer_ids))))
chenych's avatar
chenych committed
86
87
88
89
90
91
92
93
94
95
    return module_names


def register_autoclass(config: "PretrainedConfig", model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer"):
    if "AutoConfig" in getattr(config, "auto_map", {}):
        config.__class__.register_for_auto_class()
    if "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
        model.__class__.register_for_auto_class()
    if "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
        tokenizer.__class__.register_for_auto_class()