test_attention.py 1.91 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

chenych's avatar
chenych committed
17
import pytest
shihm's avatar
uodata  
shihm committed
18
19
20
21
22
23
24
25
26
27
28
from transformers.utils import is_flash_attn_2_available


# Compatible with Transformers v4 and Transformers v5
try:
    from transformers.utils import is_torch_sdpa_available
except ImportError:

    def is_torch_sdpa_available():
        return True

chenych's avatar
chenych committed
29

chenych's avatar
chenych committed
30
from llamafactory.extras.packages import is_transformers_version_greater_than
chenych's avatar
chenych committed
31
32
33
from llamafactory.train.test_utils import load_infer_model


chenych's avatar
chenych committed
34
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
chenych's avatar
chenych committed
35
36

INFER_ARGS = {
chenych's avatar
chenych committed
37
    "model_name_or_path": TINY_LLAMA3,
chenych's avatar
chenych committed
38
39
40
41
    "template": "llama3",
}


chenych's avatar
chenych committed
42
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
chenych's avatar
chenych committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def test_attention():
    attention_available = ["disabled"]
    if is_torch_sdpa_available():
        attention_available.append("sdpa")

    if is_flash_attn_2_available():
        attention_available.append("fa2")

    llama_attention_classes = {
        "disabled": "LlamaAttention",
        "sdpa": "LlamaSdpaAttention",
        "fa2": "LlamaFlashAttention2",
    }
    for requested_attention in attention_available:
        model = load_infer_model(flash_attn=requested_attention, **INFER_ARGS)
        for module in model.modules():
            if "Attention" in module.__class__.__name__:
                assert module.__class__.__name__ == llama_attention_classes[requested_attention]