unsloth.py 3.43 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

chenych's avatar
chenych committed
15
from typing import TYPE_CHECKING, Any, Optional
chenych's avatar
chenych committed
16

luopl's avatar
luopl committed
17
from ...extras import logging
chenych's avatar
chenych committed
18
19
20
21
22
23
24
25
26
from ...extras.misc import get_current_device


if TYPE_CHECKING:
    from transformers import PretrainedConfig, PreTrainedModel

    from ...hparams import ModelArguments


luopl's avatar
luopl committed
27
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
28
29
30
31


def _get_unsloth_kwargs(
    config: "PretrainedConfig", model_name_or_path: str, model_args: "ModelArguments"
chenych's avatar
chenych committed
32
) -> dict[str, Any]:
chenych's avatar
chenych committed
33
34
35
36
37
38
39
40
41
    return {
        "model_name": model_name_or_path,
        "max_seq_length": model_args.model_max_length or 4096,
        "dtype": model_args.compute_dtype,
        "load_in_4bit": model_args.quantization_bit == 4,
        "token": model_args.hf_hub_token,
        "device_map": {"": get_current_device()},
        "rope_scaling": getattr(config, "rope_scaling", None),
        "fix_tokenizer": False,
luopl's avatar
luopl committed
42
        "trust_remote_code": model_args.trust_remote_code,
chenych's avatar
chenych committed
43
44
45
46
47
48
49
        "use_gradient_checkpointing": "unsloth",
    }


def load_unsloth_pretrained_model(
    config: "PretrainedConfig", model_args: "ModelArguments"
) -> Optional["PreTrainedModel"]:
chenych's avatar
chenych committed
50
51
    r"""Optionally load pretrained model with unsloth. Used in training."""
    from unsloth import FastLanguageModel  # type: ignore
chenych's avatar
chenych committed
52
53
54
55
56

    unsloth_kwargs = _get_unsloth_kwargs(config, model_args.model_name_or_path, model_args)
    try:
        model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs)
    except NotImplementedError:
luopl's avatar
luopl committed
57
        logger.warning_rank0("Unsloth does not support model type {}.".format(getattr(config, "model_type", None)))
chenych's avatar
chenych committed
58
59
60
61
62
63
64
        model = None
        model_args.use_unsloth = False

    return model


def get_unsloth_peft_model(
chenych's avatar
chenych committed
65
    model: "PreTrainedModel", model_args: "ModelArguments", peft_kwargs: dict[str, Any]
chenych's avatar
chenych committed
66
) -> "PreTrainedModel":
chenych's avatar
chenych committed
67
68
    r"""Get the peft model for the pretrained model with unsloth. Used in training."""
    from unsloth import FastLanguageModel  # type: ignore
chenych's avatar
chenych committed
69
70
71
72
73
74
75
76
77
78
79
80

    unsloth_peft_kwargs = {
        "model": model,
        "max_seq_length": model_args.model_max_length,
        "use_gradient_checkpointing": "unsloth",
    }
    return FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)


def load_unsloth_peft_model(
    config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool
) -> "PreTrainedModel":
chenych's avatar
chenych committed
81
82
    r"""Load peft model with unsloth. Used in both training and inference."""
    from unsloth import FastLanguageModel  # type: ignore
chenych's avatar
chenych committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96

    unsloth_kwargs = _get_unsloth_kwargs(config, model_args.adapter_name_or_path[0], model_args)
    try:
        if not is_trainable:
            unsloth_kwargs["use_gradient_checkpointing"] = False

        model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs)
    except NotImplementedError:
        raise ValueError("Unsloth does not support model type {}.".format(getattr(config, "model_type", None)))

    if not is_trainable:
        FastLanguageModel.for_inference(model)

    return model