test_bench_serving.py 18 KB
Newer Older
Lifu Huang's avatar
Lifu Huang committed
1
2
import asyncio
import itertools
3
import unittest
Lifu Huang's avatar
Lifu Huang committed
4
5

import requests
6

7
from sglang.srt.utils.hf_transformers_utils import get_tokenizer
8
from sglang.test.test_utils import (
9
10
    DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
    DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
11
    DEFAULT_MODEL_NAME_FOR_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
12
    DEFAULT_MODEL_NAME_FOR_TEST_FP8,
13
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
14
    DEFAULT_SMALL_MODEL_NAME_FOR_TEST_SCORE,
15
    DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
16
    CustomTestCase,
17
    is_in_amd_ci,
18
    is_in_ci,
19
    run_bench_serving,
20
    run_score_benchmark,
21
    write_github_step_summary,
22
23
24
)


25
class TestBenchServing(CustomTestCase):
26
27
28
29
30
31
32
33
    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

34
        if is_in_ci():
35
36
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
Lifu Huang's avatar
Lifu Huang committed
37
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
38
            )
39
            if is_in_amd_ci():
Sai Enduri's avatar
Sai Enduri committed
40
                self.assertGreater(res["output_throughput"], 3050)
41
42
            else:
                self.assertGreater(res["output_throughput"], 3800)
43
44
45
46
47
48

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
49
            other_server_args=["--max-running-requests", "10"],
50
51
52
53
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
54
            need_warmup=True,
55
56
57
        )

        if is_in_ci():
58
59
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
Lifu Huang's avatar
Lifu Huang committed
60
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
61
            )
62
63
64
65
            if is_in_amd_ci():
                self.assertGreater(res["output_throughput"], 1000)
            else:
                self.assertGreater(res["output_throughput"], 1050)
66
67
68
69
70
71
72
73
74

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

75
        if is_in_ci():
76
77
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
Lifu Huang's avatar
Lifu Huang committed
78
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
79
            )
80
            if is_in_amd_ci():
kk's avatar
kk committed
81
                self.assertGreater(res["output_throughput"], 3050)
82
83
            else:
                self.assertGreater(res["output_throughput"], 3800)
84
85
86
87
88
89
90
91
92

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

93
        if is_in_ci():
94
95
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
Lifu Huang's avatar
Lifu Huang committed
96
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
97
            )
98
            self.assertGreater(res["output_throughput"], 2600)
99
100
101
102
103
104
105
106
107
108
109
110
111
112

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

113
        if is_in_ci():
114
115
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
Lifu Huang's avatar
Lifu Huang committed
116
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
117
            )
118
            if is_in_amd_ci():
119
120
121
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3700)
122

123
124
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
Lianmin Zheng's avatar
Lianmin Zheng committed
125
            model=DEFAULT_MODEL_NAME_FOR_TEST_FP8,
126
127
128
129
130
131
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
132
133
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
Lifu Huang's avatar
Lifu Huang committed
134
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
135
            )
136
            if is_in_amd_ci():
kk's avatar
kk committed
137
                self.assertGreater(res["output_throughput"], 3500)
138
139
            else:
                self.assertGreater(res["output_throughput"], 4300)
140

141
142
143
144
145
146
147
148
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

149
        if is_in_ci():
150
151
            write_github_step_summary(
                f"### test_online_latency_default\n"
Lifu Huang's avatar
Lifu Huang committed
152
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
153
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
154
            self.assertLess(res["median_e2e_latency_ms"], 11000)
155
            if is_in_amd_ci():
156
157
158
                self.assertLess(res["median_ttft_ms"], 115)
            else:
                self.assertLess(res["median_ttft_ms"], 86)
159
            self.assertLess(res["median_itl_ms"], 10)
160

161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
    def test_vlm_offline_throughput(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_offline_throughput\n"
Lifu Huang's avatar
Lifu Huang committed
176
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
177
            )
178
            if is_in_amd_ci():
179
180
181
182
183
184
185
186
                self.assertGreater(res["output_throughput"], 2000)
                # TODO: not set yet, need AMD machine
            else:
                self.assertGreater(res["output_throughput"], 2500)

    def test_vlm_online_latency(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
187
            num_prompts=250,
188
189
190
191
192
193
194
195
196
197
198
            request_rate=1,
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_online_latency\n"
Lifu Huang's avatar
Lifu Huang committed
199
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
200
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
201
            self.assertLess(res["median_e2e_latency_ms"], 16500)
202
            if is_in_amd_ci():
203
204
205
                self.assertLess(res["median_ttft_ms"], 150)
                # TODO: not set yet, need AMD machine
            else:
Yineng Zhang's avatar
Yineng Zhang committed
206
                self.assertLess(res["median_ttft_ms"], 100)
207
208
            self.assertLess(res["median_itl_ms"], 8)

Lifu Huang's avatar
Lifu Huang committed
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
    def test_lora_online_latency(self):
        # TODO (lifuhuang): verify LoRA support in AMD.
        if is_in_amd_ci():
            pass

        res = self._run_lora_latency_test(enable_background_task=False)

        if is_in_ci():
            write_github_step_summary(
                f"### test_lora_online_latency\n"
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
                f"median_ttft_ms: {res['median_ttft_ms']:.2f} ms\n"
            )
            self.assertLess(res["median_e2e_latency_ms"], 2400)
            self.assertLess(res["median_ttft_ms"], 58)

    def test_lora_online_latency_with_concurrent_adapter_updates(self):
        # TODO (lifuhuang): verify LoRA support in AMD.
        if is_in_amd_ci():
            pass

        res = self._run_lora_latency_test(enable_background_task=True)

        if is_in_ci():
            write_github_step_summary(
                f"### test_lora_online_latency\n"
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
                f"median_ttft_ms: {res['median_ttft_ms']:.2f} ms\n"
            )
            self.assertLess(res["median_e2e_latency_ms"], 4000)
239
            self.assertLess(res["median_ttft_ms"], 80)
Lifu Huang's avatar
Lifu Huang committed
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327

    def _run_lora_latency_test(self, enable_background_task: bool):
        """
        Run a latency test for LoRA with the specified background task setting.
        """

        async def lora_loader_unloader_task(
            base_url: str,
            start_event: asyncio.Event,
            stop_event: asyncio.Event,
        ):
            """
            A background task that repeatedly loads and unloads a LoRA adapter.
            """
            await start_event.wait()

            path_cycler = itertools.cycle(
                [
                    "pbevan11/llama-3.1-8b-ocr-correction",
                    "faridlazuarda/valadapt-llama-3.1-8B-it-chinese",
                    "philschmid/code-llama-3-1-8b-text-to-sql-lora",
                ]
            )
            load_url = f"{base_url}/load_lora_adapter"
            unload_url = f"{base_url}/unload_lora_adapter"
            num_updates = 0

            while not stop_event.is_set():
                # 1. Load the LoRA adapter
                lora_path = next(path_cycler)
                response = await asyncio.to_thread(
                    requests.post,
                    load_url,
                    json={"lora_name": lora_path, "lora_path": lora_path},
                )
                self.assertTrue(
                    response.ok, f"Failed to load LoRA adapter: {response.text}"
                )
                num_updates += 1

                if stop_event.is_set():
                    break

                # Yield control to allow other tasks to run.
                await asyncio.sleep(1)

                # 2. Unload the LoRA adapter
                response = await asyncio.to_thread(
                    requests.post,
                    unload_url,
                    json={"lora_name": lora_path},
                )
                self.assertTrue(
                    response.ok, f"Failed to unload LoRA adapter: {response.text}"
                )
                num_updates += 1

                # Yield control to allow other tasks to run.
                await asyncio.sleep(1)

        background_task = lora_loader_unloader_task if enable_background_task else None
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=400,
            request_rate=8,
            other_server_args=[
                "--enable-lora",
                "--max-loras-per-batch",
                "1",
                "--disable-radix-cache",
                "--random-seed",
                "42",
                "--mem-fraction-static",
                "0.8",
                "--lora-paths",
                "Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16",
                "--max-lora-rank",
                "256",
            ],
            dataset_name="random",
            random_input_len=256,
            random_output_len=256,
            lora_name=["Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16"],
            background_task=background_task,
        )

        return res

328
329
330
    def test_online_latency_eagle(self):
        res = run_bench_serving(
            model=DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
331
332
            num_prompts=300,
            request_rate=8,
333
            sharegpt_context_len=3072,
334
335
336
337
338
339
340
341
342
343
            disable_ignore_eos=True,
            dataset_name="sharegpt",
            other_server_args=[
                "--speculative-algorithm",
                "EAGLE",
                "--speculative-draft-model-path",
                DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
                "--speculative-num-steps",
                "5",
                "--speculative-eagle-topk",
344
                "4",
345
                "--speculative-num-draft-tokens",
346
                "16",
347
348
349
                "--mem-fraction-static",
                "0.7",
            ],
350
            need_warmup=True,
351
            seed=42,
352
353
354
355
356
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_online_latency_eagle\n"
Lifu Huang's avatar
Lifu Huang committed
357
358
                f"median_e2e_latency_ms: {res['median_e2e_latency_ms']:.2f} ms\n"
                f"accept_length: {res['accept_length']:.2f} \n"
359
            )
360
            if is_in_amd_ci():
fzyzcjy's avatar
fzyzcjy committed
361
                self.assertLess(res["median_e2e_latency_ms"], 1800)
362
363
            else:
                self.assertLess(res["median_e2e_latency_ms"], 900)
Lianmin Zheng's avatar
Lianmin Zheng committed
364
            self.assertGreater(res["accept_length"], 3.0)
365

366
367
368
369
370
371
372
373
    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

374
        if is_in_ci():
375
376
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
Lifu Huang's avatar
Lifu Huang committed
377
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
378
            )
379
            if is_in_amd_ci():
380
381
382
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
383
384
385
386
387
388
389
390
391

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

392
        if is_in_ci():
393
394
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
Lifu Huang's avatar
Lifu Huang committed
395
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
396
            )
397
            if is_in_amd_ci():
398
399
400
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
401

402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
    def test_pp_offline_throughput_default_decode(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=1000,
            request_rate=float("inf"),
            random_input_len=1,
            random_output_len=1024,
            other_server_args=["--pp", "2"],
            need_warmup=True,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_offline_throughput_default_decode\n"
Lifu Huang's avatar
Lifu Huang committed
417
                f"Output throughput: {res['output_throughput']:.2f} token/s\n"
418
            )
419
            self.assertGreater(res["output_throughput"], 6700)
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441

    def test_pp_long_context_prefill(self):
        res = run_bench_serving(
            model="meta-llama/Llama-3.3-70B-Instruct",
            num_prompts=4,
            request_rate=float("inf"),
            random_input_len=128000,
            random_output_len=1,
            dataset_name="random",
            other_server_args=[
                "--quantization",
                "fp8",
                "--pp",
                2,
            ],
            need_warmup=False,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_long_context_latency_prefill\n"
Lifu Huang's avatar
Lifu Huang committed
442
                f"input_throughput: {res['input_throughput']:.2f} ms\n"
443
444
445
            )
            self.assertGreater(res["input_throughput"], 4000)

446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
    def test_score_api_latency_throughput(self):
        """Test score API latency and throughput performance"""
        res = run_score_benchmark(
            model=DEFAULT_SMALL_MODEL_NAME_FOR_TEST_SCORE,
            num_requests=1000,
            batch_size=10,
            other_server_args=[],
            need_warmup=True,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_score_api_throughput\n"
                f"Average latency: {res['avg_latency_ms']:.2f} ms\n"
                f"P95 latency: {res['p95_latency_ms']:.2f} ms\n"
                f"Score API throughput: {res['throughput']:.2f} req/s\n"
                f"Successful requests: {res['successful_requests']}/{res['total_requests']}\n"
            )

        self.assertEqual(res["successful_requests"], res["total_requests"])
        self.assertLess(res["avg_latency_ms"], 48)
        self.assertLess(res["p95_latency_ms"], 50)
        self.assertGreater(res["throughput"], 20)

    def test_score_api_batch_scaling(self):
        """Test score API performance with different batch sizes"""
        batch_sizes = [10, 25, 50]

        for batch_size in batch_sizes:
            res = run_score_benchmark(
                model=DEFAULT_SMALL_MODEL_NAME_FOR_TEST_SCORE,
                num_requests=500,
                batch_size=batch_size,
            )

            if is_in_ci():
                write_github_step_summary(
                    f"### test_score_api_batch_scaling_size_{batch_size}\n"
                    f"Batch size: {batch_size}\n"
                    f"Average latency: {res['avg_latency_ms']:.2f} ms\n"
                    f"P95 latency: {res['p95_latency_ms']:.2f} ms\n"
                    f"Throughput: {res['throughput']:.2f} req/s\n"
                    f"Successful requests: {res['successful_requests']}/{res['total_requests']}\n"
                )

            self.assertEqual(res["successful_requests"], res["total_requests"])
            if batch_size == 10:
                avg_latency_bound = 45
            elif batch_size == 25:
                avg_latency_bound = 50
            elif batch_size == 50:
                avg_latency_bound = 60
            else:
                avg_latency_bound = 60
            self.assertLess(res["avg_latency_ms"], avg_latency_bound)
            if batch_size == 10:
                p95_latency_bound = 50
            elif batch_size == 25:
                p95_latency_bound = 60
            elif batch_size == 50:
                p95_latency_bound = 65
            else:
                p95_latency_bound = 65
            self.assertLess(res["p95_latency_ms"], p95_latency_bound)

511
512
513

if __name__ == "__main__":
    unittest.main()