test_per_token_quant_fp8.py 1.61 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
import itertools
from typing import Optional, Tuple

import pytest
import torch
from sgl_kernel import sgl_per_token_quant_fp8

from sglang.srt.utils import is_hip

is_hip_ = is_hip()
fp8_type_ = torch.float8_e4m3fnuz if is_hip_ else torch.float8_e4m3fn


14
15
16
17
18
19
20
21
22
def torch_per_token_quant_fp8(tensor, inv_scale):
    # The reference implementation that fully aligns to
    # the kernel being tested.
    finfo = torch.finfo(torch.float8_e4m3fn)
    inv_scale = inv_scale.view(-1, 1)
    scale = inv_scale.reciprocal()
    qweight = (tensor.to(torch.float32) * scale).clamp(min=finfo.min, max=finfo.max)
    qweight = qweight.to(torch.float8_e4m3fn)
    return qweight
23
24
25
26
27


def sglang_per_token_quant_fp8(
    input: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
Yineng Zhang's avatar
Yineng Zhang committed
28
    scale = torch.zeros(input.size(0), device=input.device, dtype=torch.float32)
29
30
31
    output = torch.empty_like(input, device=input.device, dtype=fp8_type_)

    sgl_per_token_quant_fp8(input, output, scale)
Yineng Zhang's avatar
Yineng Zhang committed
32
33
    scale = scale.reshape(-1, 1)

34
35
36
37
38
    return output, scale


@pytest.mark.parametrize(
    "num_tokens,hidden_dim",
Yineng Zhang's avatar
Yineng Zhang committed
39
    list(itertools.product([128, 256, 512], [512, 2048, 4096])),
40
41
42
43
44
45
46
47
48
)
def test_per_token_quant_compare_implementations(
    num_tokens: int,
    hidden_dim: int,
):
    device = torch.device("cuda")
    x = torch.rand((num_tokens, hidden_dim), dtype=torch.float16, device=device)

    sglang_out, sglang_scale = sglang_per_token_quant_fp8(x)
49
    torch_out = torch_per_token_quant_fp8(x, sglang_scale)
50
51

    torch.testing.assert_close(
52
        sglang_out.float(), torch_out.float(), rtol=1e-3, atol=1e-3
53
54
55
56
57
    )


if __name__ == "__main__":
    pytest.main([__file__])