test_bench_serving.py 5.67 KB
Newer Older
1
2
3
import unittest

from sglang.test.test_utils import (
4
    DEFAULT_FP8_MODEL_NAME_FOR_TEST,
5
6
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
7
    is_in_ci,
8
    run_bench_serving,
9
    write_github_step_summary,
10
11
12
13
14
15
16
17
18
19
20
21
22
)


class TestBenchServing(unittest.TestCase):

    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

23
        if is_in_ci():
24
25
26
27
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
28
            self.assertGreater(res["output_throughput"], 3350)
29
30
31
32
33
34

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
35
            other_server_args=["--max-running-requests", "10"],
36
37
38
39
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
40
            need_warmup=True,
41
42
43
        )

        if is_in_ci():
44
45
46
47
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
48
49
50
            # There is a regression with torch 2.5
            # This number was 950 for torch 2.4
            self.assertGreater(res["output_throughput"], 800)
51
52
53
54
55
56
57
58
59

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

60
        if is_in_ci():
61
62
63
64
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
65
            self.assertGreater(res["output_throughput"], 3350)
66
67
68
69
70
71
72
73
74

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

75
        if is_in_ci():
76
77
78
79
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
80
            self.assertGreater(res["output_throughput"], 2600)
81
82
83
84
85
86
87
88
89
90
91
92
93
94

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

95
        if is_in_ci():
96
97
98
99
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
100
            self.assertGreater(res["output_throughput"], 3450)
101

102
103
104
105
106
107
108
109
110
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
            model=DEFAULT_FP8_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
111
112
113
114
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
115
            self.assertGreater(res["output_throughput"], 3850)
116

117
118
119
120
121
122
123
124
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

125
        if is_in_ci():
126
127
128
129
            write_github_step_summary(
                f"### test_online_latency_default\n"
                f'median_e2e_latency_ms : {res["median_e2e_latency_ms"]:.2f} token/s\n'
            )
130
            self.assertLess(res["median_e2e_latency_ms"], 12000)
131
132
            self.assertLess(res["median_ttft_ms"], 86)
            self.assertLess(res["median_itl_ms"], 10)
133
134
135
136
137
138
139
140
141

    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

142
        if is_in_ci():
143
144
145
146
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
147
            self.assertGreater(res["output_throughput"], 2150)
148
149
150
151
152
153
154
155
156

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

157
        if is_in_ci():
158
159
160
161
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
162
            self.assertGreater(res["output_throughput"], 2150)
163
164
165
166


if __name__ == "__main__":
    unittest.main()