test_layernorm.py 1.66 KB
Newer Older
1
import pytest
2
3
import torch

4
from vllm.model_executor.layers.layernorm import RMSNorm
5

6
7
DTYPES = [torch.half, torch.bfloat16, torch.float]
NUM_TOKENS = [7, 83, 4096]  # Arbitrary values for testing
8
9
HIDDEN_SIZES = [768, 5120, 8192]  # Arbitrary values for testing
ADD_RESIDUAL = [False, True]
10
11
SEEDS = [0]

12

13
14
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
15
@pytest.mark.parametrize("add_residual", ADD_RESIDUAL)
16
17
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
18
@torch.inference_mode()
19
def test_rms_norm(
20
21
    num_tokens: int,
    hidden_size: int,
22
    add_residual: bool,
23
    dtype: torch.dtype,
24
    seed: int,
25
) -> None:
26
27
28
    torch.random.manual_seed(seed)
    torch.cuda.manual_seed(seed)

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
    layer = RMSNorm(hidden_size).to(dtype).cuda()
    layer.weight.data.normal_(mean=1.0, std=0.1)
    scale = 1 / (2 * hidden_size)
    x = torch.randn(num_tokens, hidden_size, dtype=dtype, device="cuda")
    x *= scale
    residual = torch.randn_like(x) * scale if add_residual else None

    # NOTE(woosuk): The reference implementation should be executed first
    # because the custom kernel is in-place.
    ref_out = layer._forward(x, residual)
    out = layer(x, residual)
    # NOTE(woosuk): LayerNorm operators (including RMS) typically have larger
    # numerical errors than other operators because they involve reductions.
    # Therefore, we use a larger tolerance.
    if add_residual:
        assert torch.allclose(out[0], ref_out[0], atol=1e-2, rtol=1e-2)
        assert torch.allclose(out[1], ref_out[1], atol=1e-2, rtol=1e-2)
    else:
        assert torch.allclose(out, ref_out, atol=1e-2, rtol=1e-2)