attention.py 3.59 KB
Newer Older
chenych's avatar
chenych committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import TYPE_CHECKING

from transformers.utils import is_flash_attn_2_available, is_torch_sdpa_available
from transformers.utils.versions import require_version

luopl's avatar
luopl committed
20
from ...extras import logging
chenych's avatar
chenych committed
21
22
23
24
25
26
27
28


if TYPE_CHECKING:
    from transformers import PretrainedConfig

    from ...hparams import ModelArguments


luopl's avatar
luopl committed
29
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
30
31
32
33
34
35
36
37
38
39


def configure_attn_implementation(
    config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool
) -> None:
    if getattr(config, "model_type", None) == "gemma2" and is_trainable:
        if model_args.flash_attn == "auto" or model_args.flash_attn == "fa2":
            if is_flash_attn_2_available():
                require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4")
                require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3")
luopl's avatar
luopl committed
40
                if model_args.flash_attn != "fa2":
luopl's avatar
luopl committed
41
                    logger.warning_rank0("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
luopl's avatar
luopl committed
42
                    model_args.flash_attn = "fa2"
chenych's avatar
chenych committed
43
            else:
luopl's avatar
luopl committed
44
                logger.warning_rank0("FlashAttention-2 is not installed, use eager attention.")
chenych's avatar
chenych committed
45
46
                model_args.flash_attn = "disabled"
        elif model_args.flash_attn == "sdpa":
luopl's avatar
luopl committed
47
48
49
            logger.warning_rank0(
                "Gemma-2 should use soft-capping attention, while the SDPA attention does not support it."
            )
chenych's avatar
chenych committed
50
51
52
53
54
55
56
57
58

    if model_args.flash_attn == "auto":
        return

    elif model_args.flash_attn == "disabled":
        requested_attn_implementation = "eager"

    elif model_args.flash_attn == "sdpa":
        if not is_torch_sdpa_available():
luopl's avatar
luopl committed
59
            logger.warning_rank0("torch>=2.1.1 is required for SDPA attention.")
chenych's avatar
chenych committed
60
61
62
63
64
            return

        requested_attn_implementation = "sdpa"
    elif model_args.flash_attn == "fa2":
        if not is_flash_attn_2_available():
luopl's avatar
luopl committed
65
            logger.warning_rank0("FlashAttention-2 is not installed.")
chenych's avatar
chenych committed
66
67
68
69
            return

        requested_attn_implementation = "flash_attention_2"
    else:
luopl's avatar
luopl committed
70
        raise NotImplementedError(f"Unknown attention type: {model_args.flash_attn}")
chenych's avatar
chenych committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84

    if getattr(config, "model_type", None) == "internlm2":  # special case for custom models
        setattr(config, "attn_implementation", requested_attn_implementation)
    else:
        setattr(config, "_attn_implementation", requested_attn_implementation)


def print_attn_implementation(config: "PretrainedConfig") -> None:
    if getattr(config, "model_type", None) == "internlm2":  # special case for custom models
        attn_implementation = getattr(config, "attn_implementation", None)
    else:
        attn_implementation = getattr(config, "_attn_implementation", None)

    if attn_implementation == "flash_attention_2":
luopl's avatar
luopl committed
85
        logger.info_rank0("Using FlashAttention-2 for faster training and inference.")
chenych's avatar
chenych committed
86
    elif attn_implementation == "sdpa":
luopl's avatar
luopl committed
87
        logger.info_rank0("Using torch SDPA for faster training and inference.")
chenych's avatar
chenych committed
88
    else:
luopl's avatar
luopl committed
89
        logger.info_rank0("Using vanilla attention implementation.")