silly_attention.py 1.73 KB
Newer Older
raojy's avatar
raojy committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Shared PyTorch custom silly attention for compilation tests.
Centralizes custom operation definitions to avoid duplicate registrations.
"""

import torch
from torch.library import Library

from vllm.utils.torch_utils import direct_register_custom_op

# Shared library for all compilation test operations
# Using "silly" namespace to match existing test expectations
# import this file will automatically register
# torch ops for testing (like silly.attention)
silly_lib = Library("silly", "FRAGMENT")

# Global counter that counts the number of times attention is invoked
_global_counter = 0


def get_global_counter():
    """Get the current global counter value"""
    return _global_counter


def reset_global_counter():
    """Reset the global counter to 0"""
    global _global_counter
    _global_counter = 0


def silly_attention(
    q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, out: torch.Tensor
) -> None:
    """
    Unified attention implementation that depends on
    all inputs and affects the output.
    Always increments a global counter that tests can use or ignore.
    """
    global _global_counter

    # Always increment the global counter
    _global_counter += 1

    # Unified implementation that depends on all inputs
    out.copy_(q + k + v)


def silly_attention_fake(
    q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, out: torch.Tensor
) -> None:
    """Fake implementation for testing"""
    return


# Register the unified attention operation
direct_register_custom_op(
    op_name="attention",
    op_func=silly_attention,
    mutates_args=["out"],
    fake_impl=silly_attention_fake,
    target_lib=silly_lib,
)