test_bench_serving.py 15.3 KB
Newer Older
Lifu Huang's avatar
Lifu Huang committed
1
2
import asyncio
import itertools
3
import unittest
Lifu Huang's avatar
Lifu Huang committed
4
5

import requests
6
7

from sglang.test.test_utils import (
8
9
    DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
    DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
10
    DEFAULT_MODEL_NAME_FOR_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
11
    DEFAULT_MODEL_NAME_FOR_TEST_FP8,
12
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
13
    DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
14
    CustomTestCase,
15
    is_in_amd_ci,
16
    is_in_ci,
17
    run_bench_serving,
18
    write_github_step_summary,
19
20
21
)


22
class TestBenchServing(CustomTestCase):
23
24
25
26
27
28
29
30
    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

31
        if is_in_ci():
32
33
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
Lifu Huang's avatar
Lifu Huang committed
34
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
35
            )
36
            if is_in_amd_ci():
Sai Enduri's avatar
Sai Enduri committed
37
                self.assertGreater(res["output_throughput"], 3050)
38
39
            else:
                self.assertGreater(res["output_throughput"], 3800)
40
41
42
43
44
45

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
46
            other_server_args=["--max-running-requests", "10"],
47
48
49
50
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
51
            need_warmup=True,
52
53
54
        )

        if is_in_ci():
55
56
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
Lifu Huang's avatar
Lifu Huang committed
57
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
58
            )
59
60
61
62
            if is_in_amd_ci():
                self.assertGreater(res["output_throughput"], 1000)
            else:
                self.assertGreater(res["output_throughput"], 1050)
63
64
65
66
67
68
69
70
71

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

72
        if is_in_ci():
73
74
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
Lifu Huang's avatar
Lifu Huang committed
75
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
76
            )
77
            if is_in_amd_ci():
kk's avatar
kk committed
78
                self.assertGreater(res["output_throughput"], 3050)
79
80
            else:
                self.assertGreater(res["output_throughput"], 3800)
81
82
83
84
85
86
87
88
89

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

90
        if is_in_ci():
91
92
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
Lifu Huang's avatar
Lifu Huang committed
93
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
94
            )
95
            self.assertGreater(res["output_throughput"], 2600)
96
97
98
99
100
101
102
103
104
105
106
107
108
109

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

110
        if is_in_ci():
111
112
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
Lifu Huang's avatar
Lifu Huang committed
113
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
114
            )
115
            if is_in_amd_ci():
116
117
118
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3700)
119

120
121
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
Lianmin Zheng's avatar
Lianmin Zheng committed
122
            model=DEFAULT_MODEL_NAME_FOR_TEST_FP8,
123
124
125
126
127
128
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
129
130
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
Lifu Huang's avatar
Lifu Huang committed
131
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
132
            )
133
            if is_in_amd_ci():
kk's avatar
kk committed
134
                self.assertGreater(res["output_throughput"], 3500)
135
136
            else:
                self.assertGreater(res["output_throughput"], 4300)
137

138
139
140
141
142
143
144
145
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

146
        if is_in_ci():
147
148
            write_github_step_summary(
                f"### test_online_latency_default\n"
Lifu Huang's avatar
Lifu Huang committed
149
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
150
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
151
            self.assertLess(res["median_e2e_latency_ms"], 11000)
152
            if is_in_amd_ci():
153
154
155
                self.assertLess(res["median_ttft_ms"], 115)
            else:
                self.assertLess(res["median_ttft_ms"], 86)
156
            self.assertLess(res["median_itl_ms"], 10)
157

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
    def test_vlm_offline_throughput(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_offline_throughput\n"
Lifu Huang's avatar
Lifu Huang committed
173
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
174
            )
175
            if is_in_amd_ci():
176
177
178
179
180
181
182
183
                self.assertGreater(res["output_throughput"], 2000)
                # TODO: not set yet, need AMD machine
            else:
                self.assertGreater(res["output_throughput"], 2500)

    def test_vlm_online_latency(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
184
            num_prompts=250,
185
186
187
188
189
190
191
192
193
194
195
            request_rate=1,
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_online_latency\n"
Lifu Huang's avatar
Lifu Huang committed
196
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
197
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
198
            self.assertLess(res["median_e2e_latency_ms"], 16500)
199
            if is_in_amd_ci():
200
201
202
                self.assertLess(res["median_ttft_ms"], 150)
                # TODO: not set yet, need AMD machine
            else:
Yineng Zhang's avatar
Yineng Zhang committed
203
                self.assertLess(res["median_ttft_ms"], 100)
204
205
            self.assertLess(res["median_itl_ms"], 8)

Lifu Huang's avatar
Lifu Huang committed
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    def test_lora_online_latency(self):
        # TODO (lifuhuang): verify LoRA support in AMD.
        if is_in_amd_ci():
            pass

        res = self._run_lora_latency_test(enable_background_task=False)

        if is_in_ci():
            write_github_step_summary(
                f"### test_lora_online_latency\n"
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
                f"median_ttft_ms: {res['median_ttft_ms']:.2f} ms\n"
            )
            self.assertLess(res["median_e2e_latency_ms"], 2400)
            self.assertLess(res["median_ttft_ms"], 58)

    def test_lora_online_latency_with_concurrent_adapter_updates(self):
        # TODO (lifuhuang): verify LoRA support in AMD.
        if is_in_amd_ci():
            pass

        res = self._run_lora_latency_test(enable_background_task=True)

        if is_in_ci():
            write_github_step_summary(
                f"### test_lora_online_latency\n"
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
                f"median_ttft_ms: {res['median_ttft_ms']:.2f} ms\n"
            )
            self.assertLess(res["median_e2e_latency_ms"], 4000)
236
            self.assertLess(res["median_ttft_ms"], 80)
Lifu Huang's avatar
Lifu Huang committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324

    def _run_lora_latency_test(self, enable_background_task: bool):
        """
        Run a latency test for LoRA with the specified background task setting.
        """

        async def lora_loader_unloader_task(
            base_url: str,
            start_event: asyncio.Event,
            stop_event: asyncio.Event,
        ):
            """
            A background task that repeatedly loads and unloads a LoRA adapter.
            """
            await start_event.wait()

            path_cycler = itertools.cycle(
                [
                    "pbevan11/llama-3.1-8b-ocr-correction",
                    "faridlazuarda/valadapt-llama-3.1-8B-it-chinese",
                    "philschmid/code-llama-3-1-8b-text-to-sql-lora",
                ]
            )
            load_url = f"{base_url}/load_lora_adapter"
            unload_url = f"{base_url}/unload_lora_adapter"
            num_updates = 0

            while not stop_event.is_set():
                # 1. Load the LoRA adapter
                lora_path = next(path_cycler)
                response = await asyncio.to_thread(
                    requests.post,
                    load_url,
                    json={"lora_name": lora_path, "lora_path": lora_path},
                )
                self.assertTrue(
                    response.ok, f"Failed to load LoRA adapter: {response.text}"
                )
                num_updates += 1

                if stop_event.is_set():
                    break

                # Yield control to allow other tasks to run.
                await asyncio.sleep(1)

                # 2. Unload the LoRA adapter
                response = await asyncio.to_thread(
                    requests.post,
                    unload_url,
                    json={"lora_name": lora_path},
                )
                self.assertTrue(
                    response.ok, f"Failed to unload LoRA adapter: {response.text}"
                )
                num_updates += 1

                # Yield control to allow other tasks to run.
                await asyncio.sleep(1)

        background_task = lora_loader_unloader_task if enable_background_task else None
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=400,
            request_rate=8,
            other_server_args=[
                "--enable-lora",
                "--max-loras-per-batch",
                "1",
                "--disable-radix-cache",
                "--random-seed",
                "42",
                "--mem-fraction-static",
                "0.8",
                "--lora-paths",
                "Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16",
                "--max-lora-rank",
                "256",
            ],
            dataset_name="random",
            random_input_len=256,
            random_output_len=256,
            lora_name=["Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16"],
            background_task=background_task,
        )

        return res

325
326
327
    def test_online_latency_eagle(self):
        res = run_bench_serving(
            model=DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
328
329
            num_prompts=300,
            request_rate=8,
330
            sharegpt_context_len=3072,
331
332
333
334
335
336
337
338
339
340
            disable_ignore_eos=True,
            dataset_name="sharegpt",
            other_server_args=[
                "--speculative-algorithm",
                "EAGLE",
                "--speculative-draft-model-path",
                DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
                "--speculative-num-steps",
                "5",
                "--speculative-eagle-topk",
341
                "4",
342
                "--speculative-num-draft-tokens",
343
                "16",
344
345
346
                "--mem-fraction-static",
                "0.7",
            ],
347
            need_warmup=True,
348
            seed=42,
349
350
351
352
353
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_online_latency_eagle\n"
Lifu Huang's avatar
Lifu Huang committed
354
355
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
                f"accept_length: {res['accept_length']:.2f} \n"
356
            )
357
            if is_in_amd_ci():
fzyzcjy's avatar
fzyzcjy committed
358
                self.assertLess(res["median_e2e_latency_ms"], 1800)
359
360
            else:
                self.assertLess(res["median_e2e_latency_ms"], 900)
Lianmin Zheng's avatar
Lianmin Zheng committed
361
            self.assertGreater(res["accept_length"], 3.0)
362

363
364
365
366
367
368
369
370
    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

371
        if is_in_ci():
372
373
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
Lifu Huang's avatar
Lifu Huang committed
374
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
375
            )
376
            if is_in_amd_ci():
377
378
379
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
380
381
382
383
384
385
386
387
388

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

389
        if is_in_ci():
390
391
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
Lifu Huang's avatar
Lifu Huang committed
392
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
393
            )
394
            if is_in_amd_ci():
395
396
397
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
398

399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
    def test_pp_offline_throughput_default_decode(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=1000,
            request_rate=float("inf"),
            random_input_len=1,
            random_output_len=1024,
            other_server_args=["--pp", "2"],
            need_warmup=True,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_offline_throughput_default_decode\n"
Lifu Huang's avatar
Lifu Huang committed
414
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
415
            )
416
            self.assertGreater(res["output_throughput"], 6700)
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438

    def test_pp_long_context_prefill(self):
        res = run_bench_serving(
            model="meta-llama/Llama-3.3-70B-Instruct",
            num_prompts=4,
            request_rate=float("inf"),
            random_input_len=128000,
            random_output_len=1,
            dataset_name="random",
            other_server_args=[
                "--quantization",
                "fp8",
                "--pp",
                2,
            ],
            need_warmup=False,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_long_context_latency_prefill\n"
Lifu Huang's avatar
Lifu Huang committed
439
                f"input_throughput: {res['input_throughput']:.2f} ms\n"
440
441
442
            )
            self.assertGreater(res["input_throughput"], 4000)

443
444
445

if __name__ == "__main__":
    unittest.main()