test_bench_serving.py 11.2 KB
Newer Older
1
import os
2
3
4
import unittest

from sglang.test.test_utils import (
5
6
    DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
    DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
7
    DEFAULT_MODEL_NAME_FOR_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
8
    DEFAULT_MODEL_NAME_FOR_TEST_FP8,
9
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
10
11
    DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
    DEFAULT_VLM_CHAT_TEMPLATE_FOR_TEST,
12
    CustomTestCase,
13
    is_in_ci,
14
    run_bench_serving,
15
    write_github_step_summary,
16
17
18
)


19
class TestBenchServing(CustomTestCase):
20
21
22
23
24
25
26
27
28

    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

29
        if is_in_ci():
30
31
32
33
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
34
            if os.getenv("SGLANG_AMD_CI") == "1":
kk's avatar
kk committed
35
                self.assertGreater(res["output_throughput"], 3150)
36
37
            else:
                self.assertGreater(res["output_throughput"], 3800)
38
39
40
41
42
43

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
44
            other_server_args=["--max-running-requests", "10"],
45
46
47
48
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
49
            need_warmup=True,
50
51
52
        )

        if is_in_ci():
53
54
55
56
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
57
            self.assertGreater(res["output_throughput"], 1050)
58
59
60
61
62
63
64
65
66

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

67
        if is_in_ci():
68
69
70
71
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
72
            if os.getenv("SGLANG_AMD_CI") == "1":
kk's avatar
kk committed
73
                self.assertGreater(res["output_throughput"], 3050)
74
75
            else:
                self.assertGreater(res["output_throughput"], 3800)
76
77
78
79
80
81
82
83
84

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

85
        if is_in_ci():
86
87
88
89
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
90
            self.assertGreater(res["output_throughput"], 2600)
91
92
93
94
95
96
97
98
99
100
101
102
103
104

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

105
        if is_in_ci():
106
107
108
109
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
110
111
112
113
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3700)
114

115
116
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
Lianmin Zheng's avatar
Lianmin Zheng committed
117
            model=DEFAULT_MODEL_NAME_FOR_TEST_FP8,
118
119
120
121
122
123
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
124
125
126
127
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
128
            if os.getenv("SGLANG_AMD_CI") == "1":
kk's avatar
kk committed
129
                self.assertGreater(res["output_throughput"], 3500)
130
131
            else:
                self.assertGreater(res["output_throughput"], 4300)
132

133
134
135
136
137
138
139
140
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

141
        if is_in_ci():
142
143
            write_github_step_summary(
                f"### test_online_latency_default\n"
144
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
145
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
146
            self.assertLess(res["median_e2e_latency_ms"], 11000)
147
148
149
150
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertLess(res["median_ttft_ms"], 115)
            else:
                self.assertLess(res["median_ttft_ms"], 86)
151
            self.assertLess(res["median_itl_ms"], 10)
152

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
    def test_vlm_offline_throughput(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_offline_throughput\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 2000)
                # TODO: not set yet, need AMD machine
            else:
                self.assertGreater(res["output_throughput"], 2500)

    def test_vlm_online_latency(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
            num_prompts=50,
            request_rate=1,
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_online_latency\n"
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
193
            self.assertLess(res["median_e2e_latency_ms"], 16500)
194
195
196
197
198
199
200
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertLess(res["median_ttft_ms"], 150)
                # TODO: not set yet, need AMD machine
            else:
                self.assertLess(res["median_ttft_ms"], 90)
            self.assertLess(res["median_itl_ms"], 8)

201
202
203
    def test_online_latency_eagle(self):
        res = run_bench_serving(
            model=DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
204
205
            num_prompts=300,
            request_rate=8,
206
            sharegpt_context_len=3072,
207
208
209
210
211
212
213
214
215
216
            disable_ignore_eos=True,
            dataset_name="sharegpt",
            other_server_args=[
                "--speculative-algorithm",
                "EAGLE",
                "--speculative-draft-model-path",
                DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
                "--speculative-num-steps",
                "5",
                "--speculative-eagle-topk",
217
                "4",
218
                "--speculative-num-draft-tokens",
219
                "16",
220
221
222
                "--mem-fraction-static",
                "0.7",
            ],
223
            need_warmup=True,
224
            seed=42,
225
226
227
228
229
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_online_latency_eagle\n"
230
231
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
                f'accept_length: {res["accept_length"]:.2f} \n'
232
            )
233
            if os.getenv("SGLANG_AMD_CI") == "1":
fzyzcjy's avatar
fzyzcjy committed
234
                self.assertLess(res["median_e2e_latency_ms"], 1800)
235
236
            else:
                self.assertLess(res["median_e2e_latency_ms"], 900)
Lianmin Zheng's avatar
Lianmin Zheng committed
237
            self.assertGreater(res["accept_length"], 3.0)
238

239
240
241
242
243
244
245
246
    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

247
        if is_in_ci():
248
249
250
251
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
252
253
254
255
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
256
257
258
259
260
261
262
263
264

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

265
        if is_in_ci():
266
267
268
269
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
270
271
272
273
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
274

275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
    def test_pp_offline_throughput_default_decode(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=1000,
            request_rate=float("inf"),
            random_input_len=1,
            random_output_len=1024,
            other_server_args=["--pp", "2"],
            need_warmup=True,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_offline_throughput_default_decode\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
            self.assertGreater(res["output_throughput"], 7500)

    def test_pp_long_context_prefill(self):
        res = run_bench_serving(
            model="meta-llama/Llama-3.3-70B-Instruct",
            num_prompts=4,
            request_rate=float("inf"),
            random_input_len=128000,
            random_output_len=1,
            dataset_name="random",
            other_server_args=[
                "--quantization",
                "fp8",
                "--pp",
                2,
            ],
            need_warmup=False,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_long_context_latency_prefill\n"
                f'input_throughput: {res["input_throughput"]:.2f} ms\n'
            )
            self.assertGreater(res["input_throughput"], 4000)

319
320
321

if __name__ == "__main__":
    unittest.main()