test_activation.py 2.77 KB
Newer Older
1
import pytest
Woosuk Kwon's avatar
Woosuk Kwon committed
2
import torch
3

4
from vllm.model_executor.layers.activation import FastGELU, NewGELU, SiluAndMul
5
from allclose_default import get_default_atol, get_default_rtol
Woosuk Kwon's avatar
Woosuk Kwon committed
6

7
8
9
10
DTYPES = [torch.half, torch.bfloat16, torch.float]
NUM_TOKENS = [7, 83, 2048]  # Arbitrary values for testing
D = [512, 4096, 5120, 13824]  # Arbitrary values for testing
SEEDS = [0]
11
12
13
CUDA_DEVICES = [
    f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
]
14

Woosuk Kwon's avatar
Woosuk Kwon committed
15

16
17
18
19
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
20
@pytest.mark.parametrize("device", CUDA_DEVICES)
Woosuk Kwon's avatar
Woosuk Kwon committed
21
@torch.inference_mode()
22
def test_silu_and_mul(
Woosuk Kwon's avatar
Woosuk Kwon committed
23
24
25
    num_tokens: int,
    d: int,
    dtype: torch.dtype,
26
    seed: int,
27
    device: str,
Woosuk Kwon's avatar
Woosuk Kwon committed
28
) -> None:
29
    torch.random.manual_seed(seed)
30
31
32
33
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    torch.set_default_device(device)
    x = torch.randn(num_tokens, 2 * d, dtype=dtype)
34
35
36
    layer = SiluAndMul()
    out = layer(x)
    ref_out = layer._forward(x)
37
38
39
40
    assert torch.allclose(out,
                          ref_out,
                          atol=get_default_atol(out),
                          rtol=get_default_rtol(out))
Woosuk Kwon's avatar
Woosuk Kwon committed
41
42


43
44
45
46
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
47
@pytest.mark.parametrize("device", CUDA_DEVICES)
48
@torch.inference_mode()
49
def test_gelu_new(
50
51
52
    num_tokens: int,
    d: int,
    dtype: torch.dtype,
53
    seed: int,
54
    device: str,
55
) -> None:
56
    torch.random.manual_seed(seed)
57
58
59
60
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    torch.set_default_device(device)
    x = torch.randn(num_tokens, d, dtype=dtype)
61
62
63
    layer = NewGELU()
    out = layer(x)
    ref_out = layer._forward(x)
64
65
66
67
    assert torch.allclose(out,
                          ref_out,
                          atol=get_default_atol(out),
                          rtol=get_default_rtol(out))
68
69


70
71
72
73
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
74
@pytest.mark.parametrize("device", CUDA_DEVICES)
75
def test_gelu_fast(
76
77
78
    num_tokens: int,
    d: int,
    dtype: torch.dtype,
79
    seed: int,
80
    device: str,
81
) -> None:
82
    torch.random.manual_seed(seed)
83
84
85
86
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    torch.set_default_device(device)
    x = torch.randn(num_tokens, d, dtype=dtype)
87
88
89
    layer = FastGELU()
    out = layer(x)
    ref_out = layer._forward(x)
90
91
92
93
    assert torch.allclose(out,
                          ref_out,
                          atol=get_default_atol(out),
                          rtol=get_default_rtol(out))