test_pos_encoding.py 2.35 KB
Newer Older
1
from typing import Optional
2

3
import pytest
4
5
import torch

6
from vllm.model_executor.layers.rotary_embedding import get_rope
7

8
IS_NEOX_STYLE = [True, False]
9
10
11
DTYPES = [torch.half, torch.bfloat16, torch.float]
HEAD_SIZES = [64, 80, 96, 112, 128, 256]
ROTARY_DIMS = [None, 32]  # None means rotary dim == head size
12
13
14
NUM_HEADS = [7, 17]  # Arbitrary values for testing
BATCH_SIZES = [1, 5]  # Arbitrary values for testing
SEQ_LENS = [11, 8192]  # Arbitrary values for testing
15
SEEDS = [0]
16
DEVICES = [i for i in range(1 if torch.cuda.device_count() == 1 else 2)]
17

18

19
@pytest.mark.parametrize("is_neox_style", IS_NEOX_STYLE)
20
21
@pytest.mark.parametrize("batch_size", BATCH_SIZES)
@pytest.mark.parametrize("seq_len", SEQ_LENS)
22
23
24
25
26
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("rotary_dim", ROTARY_DIMS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
27
@pytest.mark.parametrize("device", DEVICES)
28
@torch.inference_mode()
29
30
def test_rotary_embedding(
    is_neox_style: bool,
31
32
    batch_size: int,
    seq_len: int,
33
34
    num_heads: int,
    head_size: int,
35
    rotary_dim: Optional[int],
36
    dtype: torch.dtype,
37
    seed: int,
38
    device: int,
39
    max_position: int = 8192,
40
41
    base: int = 10000,
) -> None:
42
43
44
45
    if rotary_dim is None:
        rotary_dim = head_size
    torch.random.manual_seed(seed)
    torch.cuda.manual_seed(seed)
46
    gpu_id = f"cuda:{device}"
47
48
49
    if rotary_dim is None:
        rotary_dim = head_size
    rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style)
50
    rope = rope.to(dtype=dtype, device=gpu_id)
51
52
53

    positions = torch.randint(0,
                              max_position, (batch_size, seq_len),
54
                              device=gpu_id)
55
56
    query = torch.randn(batch_size,
                        seq_len,
57
58
                        num_heads * head_size,
                        dtype=dtype,
59
                        device=gpu_id)
60
    key = torch.randn_like(query)
61

62
63
64
65
    # NOTE(woosuk): The reference implementation should be executed first
    # because the custom kernel is in-place.
    ref_query, ref_key = rope._forward(positions, query, key)
    out_query, out_key = rope.forward(positions, query, key)
66
    # Compare the results.
67
68
    assert torch.allclose(out_query, ref_query, atol=1e-5, rtol=1e-5)
    assert torch.allclose(out_key, ref_key, atol=1e-5, rtol=1e-5)