attention.py 4.44 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import TYPE_CHECKING

luopl's avatar
luopl committed
17
from ...extras import logging
chenych's avatar
chenych committed
18
from ...extras.constants import AttentionFunction
shihm's avatar
uodata  
shihm committed
19
from ...extras.packages import is_torch_version_greater_than
chenych's avatar
chenych committed
20
21
22
23
24
25
26
27


if TYPE_CHECKING:
    from transformers import PretrainedConfig

    from ...hparams import ModelArguments


luopl's avatar
luopl committed
28
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
29
30


chenych's avatar
chenych committed
31
def configure_attn_implementation(config: "PretrainedConfig", model_args: "ModelArguments") -> None:
shihm's avatar
uodata  
shihm committed
32
33
34
35
36
37
38
39
40
41
42
43
44
45
    from transformers.utils import is_flash_attn_2_available

    if getattr(config, "model_type", None) == "gpt_oss":
        from transformers.integrations.hub_kernels import load_and_register_kernel

        flash_attn3_kernel = "kernels-community/vllm-flash-attn3"
        load_and_register_kernel(flash_attn3_kernel)
        setattr(config, "_attn_implementation", flash_attn3_kernel)
        setattr(config, "_attn_implementation_internal", flash_attn3_kernel)
        model_args.flash_attn = AttentionFunction.FA3

        logger.info_rank0("Using FlashAttention-3 with attention sink for the gpt-oss model.")
        return

chenych's avatar
chenych committed
46
    if getattr(config, "model_type", None) == "gemma2":
chenych's avatar
chenych committed
47
        if model_args.flash_attn == AttentionFunction.AUTO or model_args.flash_attn == AttentionFunction.FA2:
chenych's avatar
chenych committed
48
            if is_flash_attn_2_available():
chenych's avatar
chenych committed
49
50
51
                if model_args.flash_attn != AttentionFunction.FA2:
                    logger.warning_rank0("Gemma 2 should use flash attention 2, change `flash_attn` to fa2.")
                    model_args.flash_attn = AttentionFunction.FA2
chenych's avatar
chenych committed
52
            else:
luopl's avatar
luopl committed
53
                logger.warning_rank0("FlashAttention-2 is not installed, use eager attention.")
chenych's avatar
chenych committed
54
55
                model_args.flash_attn = AttentionFunction.DISABLED
        elif model_args.flash_attn == AttentionFunction.SDPA:
luopl's avatar
luopl committed
56
57
58
            logger.warning_rank0(
                "Gemma-2 should use soft-capping attention, while the SDPA attention does not support it."
            )
chenych's avatar
chenych committed
59

chenych's avatar
chenych committed
60
    if model_args.flash_attn == AttentionFunction.AUTO:
chenych's avatar
chenych committed
61
62
        return

chenych's avatar
chenych committed
63
    elif model_args.flash_attn == AttentionFunction.DISABLED:
chenych's avatar
chenych committed
64
65
        requested_attn_implementation = "eager"

chenych's avatar
chenych committed
66
    elif model_args.flash_attn == AttentionFunction.SDPA:
shihm's avatar
uodata  
shihm committed
67
        if not is_torch_version_greater_than("2.1.1"):
luopl's avatar
luopl committed
68
            logger.warning_rank0("torch>=2.1.1 is required for SDPA attention.")
chenych's avatar
chenych committed
69
70
71
            return

        requested_attn_implementation = "sdpa"
chenych's avatar
chenych committed
72
    elif model_args.flash_attn == AttentionFunction.FA2:
shihm's avatar
uodata  
shihm committed
73
74
75
        from transformers import is_torch_npu_available

        if not (is_flash_attn_2_available() or is_torch_npu_available()):
luopl's avatar
luopl committed
76
            logger.warning_rank0("FlashAttention-2 is not installed.")
chenych's avatar
chenych committed
77
78
79
80
            return

        requested_attn_implementation = "flash_attention_2"
    else:
luopl's avatar
luopl committed
81
        raise NotImplementedError(f"Unknown attention type: {model_args.flash_attn}")
chenych's avatar
chenych committed
82
83
84

    if getattr(config, "model_type", None) == "internlm2":  # special case for custom models
        setattr(config, "attn_implementation", requested_attn_implementation)
chenych's avatar
chenych committed
85
86
87
    elif getattr(config, "model_type", None) == "kimi_vl":
        setattr(config.vision_config, "_attn_implementation", requested_attn_implementation)
        setattr(config.text_config, "_attn_implementation", requested_attn_implementation)
chenych's avatar
chenych committed
88
89
90
91
92
93
94
95
96
97
98
    else:
        setattr(config, "_attn_implementation", requested_attn_implementation)


def print_attn_implementation(config: "PretrainedConfig") -> None:
    if getattr(config, "model_type", None) == "internlm2":  # special case for custom models
        attn_implementation = getattr(config, "attn_implementation", None)
    else:
        attn_implementation = getattr(config, "_attn_implementation", None)

    if attn_implementation == "flash_attention_2":
luopl's avatar
luopl committed
99
        logger.info_rank0("Using FlashAttention-2 for faster training and inference.")
chenych's avatar
chenych committed
100
    elif attn_implementation == "sdpa":
luopl's avatar
luopl committed
101
        logger.info_rank0("Using torch SDPA for faster training and inference.")
chenych's avatar
chenych committed
102
    else:
luopl's avatar
luopl committed
103
        logger.info_rank0("Using vanilla attention implementation.")