test_bench_serving.py 10.9 KB
Newer Older
1
2
3
import unittest

from sglang.test.test_utils import (
4
5
    DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
    DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
6
    DEFAULT_MODEL_NAME_FOR_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
7
    DEFAULT_MODEL_NAME_FOR_TEST_FP8,
8
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
9
    DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
10
    CustomTestCase,
11
    is_in_amd_ci,
12
    is_in_ci,
13
    run_bench_serving,
14
    write_github_step_summary,
15
16
17
)


18
class TestBenchServing(CustomTestCase):
19
20
21
22
23
24
25
26
27

    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

28
        if is_in_ci():
29
30
31
32
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
33
            if is_in_amd_ci():
kk's avatar
kk committed
34
                self.assertGreater(res["output_throughput"], 3150)
35
36
            else:
                self.assertGreater(res["output_throughput"], 3800)
37
38
39
40
41
42

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
43
            other_server_args=["--max-running-requests", "10"],
44
45
46
47
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
48
            need_warmup=True,
49
50
51
        )

        if is_in_ci():
52
53
54
55
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
56
            self.assertGreater(res["output_throughput"], 1050)
57
58
59
60
61
62
63
64
65

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

66
        if is_in_ci():
67
68
69
70
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
71
            if is_in_amd_ci():
kk's avatar
kk committed
72
                self.assertGreater(res["output_throughput"], 3050)
73
74
            else:
                self.assertGreater(res["output_throughput"], 3800)
75
76
77
78
79
80
81
82
83

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

84
        if is_in_ci():
85
86
87
88
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
89
            self.assertGreater(res["output_throughput"], 2600)
90
91
92
93
94
95
96
97
98
99
100
101
102
103

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

104
        if is_in_ci():
105
106
107
108
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
109
            if is_in_amd_ci():
110
111
112
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3700)
113

114
115
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
Lianmin Zheng's avatar
Lianmin Zheng committed
116
            model=DEFAULT_MODEL_NAME_FOR_TEST_FP8,
117
118
119
120
121
122
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
123
124
125
126
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
127
            if is_in_amd_ci():
kk's avatar
kk committed
128
                self.assertGreater(res["output_throughput"], 3500)
129
130
            else:
                self.assertGreater(res["output_throughput"], 4300)
131

132
133
134
135
136
137
138
139
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

140
        if is_in_ci():
141
142
            write_github_step_summary(
                f"### test_online_latency_default\n"
143
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
144
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
145
            self.assertLess(res["median_e2e_latency_ms"], 11000)
146
            if is_in_amd_ci():
147
148
149
                self.assertLess(res["median_ttft_ms"], 115)
            else:
                self.assertLess(res["median_ttft_ms"], 86)
150
            self.assertLess(res["median_itl_ms"], 10)
151

152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    def test_vlm_offline_throughput(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_offline_throughput\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
169
            if is_in_amd_ci():
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
                self.assertGreater(res["output_throughput"], 2000)
                # TODO: not set yet, need AMD machine
            else:
                self.assertGreater(res["output_throughput"], 2500)

    def test_vlm_online_latency(self):
        res = run_bench_serving(
            model=DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST,
            num_prompts=50,
            request_rate=1,
            other_server_args=[
                "--mem-fraction-static",
                "0.7",
            ],
            dataset_name="mmmu",
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_vlm_online_latency\n"
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
192
            self.assertLess(res["median_e2e_latency_ms"], 16500)
193
            if is_in_amd_ci():
194
195
196
197
198
199
                self.assertLess(res["median_ttft_ms"], 150)
                # TODO: not set yet, need AMD machine
            else:
                self.assertLess(res["median_ttft_ms"], 90)
            self.assertLess(res["median_itl_ms"], 8)

200
201
202
    def test_online_latency_eagle(self):
        res = run_bench_serving(
            model=DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
203
204
            num_prompts=300,
            request_rate=8,
205
            sharegpt_context_len=3072,
206
207
208
209
210
211
212
213
214
215
            disable_ignore_eos=True,
            dataset_name="sharegpt",
            other_server_args=[
                "--speculative-algorithm",
                "EAGLE",
                "--speculative-draft-model-path",
                DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
                "--speculative-num-steps",
                "5",
                "--speculative-eagle-topk",
216
                "4",
217
                "--speculative-num-draft-tokens",
218
                "16",
219
220
221
                "--mem-fraction-static",
                "0.7",
            ],
222
            need_warmup=True,
223
            seed=42,
224
225
226
227
228
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_online_latency_eagle\n"
229
230
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
                f'accept_length: {res["accept_length"]:.2f} \n'
231
            )
232
            if is_in_amd_ci():
fzyzcjy's avatar
fzyzcjy committed
233
                self.assertLess(res["median_e2e_latency_ms"], 1800)
234
235
            else:
                self.assertLess(res["median_e2e_latency_ms"], 900)
Lianmin Zheng's avatar
Lianmin Zheng committed
236
            self.assertGreater(res["accept_length"], 3.0)
237

238
239
240
241
242
243
244
245
    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

246
        if is_in_ci():
247
248
249
250
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
251
            if is_in_amd_ci():
252
253
254
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
255
256
257
258
259
260
261
262
263

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

264
        if is_in_ci():
265
266
267
268
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
269
            if is_in_amd_ci():
270
271
272
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
273

274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    def test_pp_offline_throughput_default_decode(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=1000,
            request_rate=float("inf"),
            random_input_len=1,
            random_output_len=1024,
            other_server_args=["--pp", "2"],
            need_warmup=True,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_offline_throughput_default_decode\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
291
            self.assertGreater(res["output_throughput"], 6700)
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317

    def test_pp_long_context_prefill(self):
        res = run_bench_serving(
            model="meta-llama/Llama-3.3-70B-Instruct",
            num_prompts=4,
            request_rate=float("inf"),
            random_input_len=128000,
            random_output_len=1,
            dataset_name="random",
            other_server_args=[
                "--quantization",
                "fp8",
                "--pp",
                2,
            ],
            need_warmup=False,
            seed=42,
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_pp_long_context_latency_prefill\n"
                f'input_throughput: {res["input_throughput"]:.2f} ms\n'
            )
            self.assertGreater(res["input_throughput"], 4000)

318
319
320

if __name__ == "__main__":
    unittest.main()