test_moe_fused_gate.py 3.18 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import pytest
import torch
from sgl_kernel import moe_fused_gate

from sglang.srt.layers.moe.topk import biased_grouped_topk


@pytest.mark.parametrize(
    "seq_length",
    list(range(1, 10))
    + [16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536],
)
@pytest.mark.parametrize(
    "params",
    [
        (128, 4, 2, 4),
        (256, 8, 4, 8),  # deepseek v3
        (512, 16, 8, 16),
    ],
)
21
@pytest.mark.parametrize("num_fused_shared_experts", [0, 1, 2])
Ke Bao's avatar
Ke Bao committed
22
def test_moe_fused_gate_combined(seq_length, params, num_fused_shared_experts):
23
    num_experts, num_expert_group, topk_group, topk = params
Ke Bao's avatar
Ke Bao committed
24
    dtype = torch.float32
25
26

    torch.manual_seed(seq_length)
Ke Bao's avatar
Ke Bao committed
27
    tensor = torch.rand((seq_length, num_experts), dtype=dtype, device="cuda")
28
    scores = tensor.clone()
Ke Bao's avatar
Ke Bao committed
29
    bias = torch.rand(num_experts, dtype=dtype, device="cuda")
30
    topk = topk + num_fused_shared_experts
31
32
33
34
35
36
37

    output, indices = moe_fused_gate(
        tensor,
        bias,
        num_expert_group=num_expert_group,
        topk_group=topk_group,
        topk=topk,
38
        num_fused_shared_experts=num_fused_shared_experts,
39
        routed_scaling_factor=2.5,
40
41
42
43
44
45
46
47
48
    )
    ref_output, ref_indices = biased_grouped_topk(
        scores,
        scores,
        bias,
        topk=topk,
        renormalize=True,
        num_expert_group=num_expert_group,
        topk_group=topk_group,
49
        num_fused_shared_experts=num_fused_shared_experts,
50
        routed_scaling_factor=2.5,
51
52
    )

53
54
    # When num_fused_shared_experts > 0, ignore the comparison of the last topk dimension
    if num_fused_shared_experts > 0:
55
56
57
58
59
60
61
        original_indices = indices.clone()
        original_ref_indices = ref_indices.clone()

        indices = indices[:, :-1]
        ref_indices = ref_indices[:, :-1]

        valid_min = num_experts
62
        valid_max = num_experts + num_fused_shared_experts
63
64
65
66
67
68
69
70
71
72
73
        shared_indices = original_indices[:, -1]
        shared_ref_indices = original_ref_indices[:, -1]
        if shared_indices is not None:
            assert torch.all(
                (shared_indices >= valid_min) & (shared_indices < valid_max)
            ), f"Shared expert indices out of range: found values outside [{valid_min}, {valid_max})"
        if shared_ref_indices is not None:
            assert torch.all(
                (shared_ref_indices >= valid_min) & (shared_ref_indices < valid_max)
            ), f"Shared expert reference indices out of range: found values outside [{valid_min}, {valid_max})"

74
75
76
77
78
79
80
81
82
    idx_check = torch.allclose(
        ref_indices.sort()[0].to(torch.int32),
        indices.sort()[0].to(torch.int32),
        rtol=1e-04,
        atol=1e-05,
    )
    output_check = torch.allclose(
        ref_output.sort()[0].to(torch.float32),
        output.sort()[0].to(torch.float32),
83
84
        rtol=1e-02,
        atol=1e-03,
85
86
87
88
    )

    assert idx_check, (
        f"Indices mismatch at seq_length {seq_length}, dtype {dtype}, "
89
        f"params {params}, num_fused_shared_experts {num_fused_shared_experts}"
90
91
92
    )
    assert output_check, (
        f"Output mismatch at seq_length {seq_length}, dtype {dtype}, "
93
        f"params {params}, num_fused_shared_experts {num_fused_shared_experts}"
94
95
96
97
98
    )


if __name__ == "__main__":
    pytest.main([__file__])