"lm_eval/api/metrics.py" did not exist on "ed53d51c5c9c5f88abe4d55e379b76f501118b43"
bench_moe_ep_post_reorder.py 2.25 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import torch
import triton

from sglang.srt.layers.moe.ep_moe.kernels import post_reorder_triton_kernel

batch_sizes = [64, 128, 256, 512, 640, 768, 1024, 2048, 4096]
configs = [(bs,) for bs in batch_sizes]


@triton.testing.perf_report(
    triton.testing.Benchmark(
        x_names=["batch_size"],
        x_vals=[list(_) for _ in configs],
        line_arg="provider",
15
16
17
        line_vals=["triton"],
        line_names=["Triton Kernel"],
        styles=[("orange", "-")],
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
        ylabel="us",
        plot_name="ep-moe-post-reorder-performance",
        args={},
    )
)
def benchmark(batch_size, provider):
    dtype = torch.bfloat16
    device = torch.device("cuda")
    hidden_size, topk, start_expert_id, end_expert_id, block_size = 4096, 8, 0, 255, 512

    def alloc_tensors():
        down_output = torch.randn(
            batch_size * topk, hidden_size, dtype=dtype, device=device
        )
        output = torch.zeros(batch_size, hidden_size, dtype=dtype, device=device)
        src2dst = torch.randint(
            0, batch_size * topk, (batch_size, topk), dtype=torch.int32, device=device
        )
        topk_ids = torch.randint(
            start_expert_id,
            end_expert_id + 1,
            (batch_size, topk),
            dtype=torch.int32,
            device=device,
        )
        topk_weights = torch.rand(batch_size, topk, dtype=dtype, device=device)
        return down_output, output, src2dst, topk_ids, topk_weights

    quantiles = [0.5, 0.2, 0.8]

48
    if provider == "triton":
49
50
51
52
53
54
55
56
57
58
59
60
61
        d_out, out, s2d, tk_ids, tk_weights = alloc_tensors()

        def run_triton():
            post_reorder_triton_kernel[(batch_size,)](
                d_out.view(-1),
                out.view(-1),
                s2d.view(-1),
                tk_ids.view(-1),
                tk_weights.view(-1),
                start_expert_id,
                end_expert_id,
                topk,
                hidden_size,
62
                0,
63
64
65
                block_size,
            )

66
67
68
        ms, min_ms, max_ms = triton.testing.do_bench_cudagraph(
            run_triton, quantiles=quantiles
        )
69
70
71
72
73
74
75
76
77

    else:
        raise ValueError(f"Unknown provider: {provider}")

    return 1000 * ms, 1000 * max_ms, 1000 * min_ms


if __name__ == "__main__":
    benchmark.run(print_data=True)