test_bench_serving.py 3.14 KB
Newer Older
1
2
3
import unittest

from sglang.test.test_utils import (
4
    DEFAULT_FP8_MODEL_NAME_FOR_TEST,
5
6
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
7
    is_in_ci,
8
9
10
11
12
13
14
15
16
17
18
19
20
21
    run_bench_serving,
)


class TestBenchServing(unittest.TestCase):

    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

22
        if is_in_ci():
23
24
25
26
27
28
29
30
31
32
            assert res["output_throughput"] > 2600

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

33
        if is_in_ci():
34
35
36
37
38
39
40
41
42
43
            assert res["output_throughput"] > 2800

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

44
        if is_in_ci():
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
            assert res["output_throughput"] > 2600

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

60
        if is_in_ci():
61
62
            assert res["output_throughput"] > 2600

63
64
65
66
67
68
69
70
71
72
73
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
            model=DEFAULT_FP8_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
            assert res["output_throughput"] > 3100

74
75
76
77
78
79
80
81
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

82
        if is_in_ci():
83
            assert res["median_e2e_latency_ms"] < 12000
Lianmin Zheng's avatar
Lianmin Zheng committed
84
            assert res["median_ttft_ms"] < 80
85
86
87
88
89
90
91
92
93
94
            assert res["median_itl_ms"] < 12

    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

95
        if is_in_ci():
96
97
98
99
100
101
102
103
104
105
            assert res["output_throughput"] > 1850

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

106
        if is_in_ci():
107
108
109
110
111
            assert res["output_throughput"] > 1950


if __name__ == "__main__":
    unittest.main()