test_bench_serving.py 7.96 KB
Newer Older
1
import os
2
3
4
import unittest

from sglang.test.test_utils import (
5
6
    DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
    DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
7
    DEFAULT_MODEL_NAME_FOR_TEST,
Lianmin Zheng's avatar
Lianmin Zheng committed
8
    DEFAULT_MODEL_NAME_FOR_TEST_FP8,
9
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
10
    CustomTestCase,
11
    is_in_ci,
12
    run_bench_serving,
13
    write_github_step_summary,
14
15
16
)


17
class TestBenchServing(CustomTestCase):
18
19
20
21
22
23
24
25
26

    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

27
        if is_in_ci():
28
29
30
31
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
32
33
34
35
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3800)
36
37
38
39
40
41

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
42
            other_server_args=["--max-running-requests", "10"],
43
44
45
46
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
47
            need_warmup=True,
48
49
50
        )

        if is_in_ci():
51
52
53
54
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
55
            self.assertGreater(res["output_throughput"], 1050)
56
57
58
59
60
61
62
63
64

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

65
        if is_in_ci():
66
67
68
69
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
70
71
72
73
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3800)
74
75
76
77
78
79
80
81
82

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

83
        if is_in_ci():
84
85
86
87
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
88
            self.assertGreater(res["output_throughput"], 2600)
89
90
91
92
93
94
95
96
97
98
99
100
101
102

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

103
        if is_in_ci():
104
105
106
107
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
108
109
110
111
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 3500)
            else:
                self.assertGreater(res["output_throughput"], 3700)
112

113
114
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
Lianmin Zheng's avatar
Lianmin Zheng committed
115
            model=DEFAULT_MODEL_NAME_FOR_TEST_FP8,
116
117
118
119
120
121
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
122
123
124
125
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
126
127
128
129
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 4000)
            else:
                self.assertGreater(res["output_throughput"], 4300)
130

131
132
133
134
135
136
137
138
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

139
        if is_in_ci():
140
141
            write_github_step_summary(
                f"### test_online_latency_default\n"
142
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
143
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
144
            self.assertLess(res["median_e2e_latency_ms"], 11000)
145
146
147
148
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertLess(res["median_ttft_ms"], 115)
            else:
                self.assertLess(res["median_ttft_ms"], 86)
149
            self.assertLess(res["median_itl_ms"], 10)
150

151
152
153
    def test_online_latency_eagle(self):
        res = run_bench_serving(
            model=DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
154
155
            num_prompts=300,
            request_rate=8,
156
            sharegpt_context_len=3072,
157
158
159
160
161
162
163
164
165
166
            disable_ignore_eos=True,
            dataset_name="sharegpt",
            other_server_args=[
                "--speculative-algorithm",
                "EAGLE",
                "--speculative-draft-model-path",
                DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
                "--speculative-num-steps",
                "5",
                "--speculative-eagle-topk",
167
                "4",
168
                "--speculative-num-draft-tokens",
169
                "16",
170
171
172
                "--mem-fraction-static",
                "0.7",
            ],
173
            need_warmup=True,
174
            seed=42,
175
176
177
178
179
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_online_latency_eagle\n"
180
181
                f'median_e2e_latency_ms: {res["median_e2e_latency_ms"]:.2f} ms\n'
                f'accept_length: {res["accept_length"]:.2f} \n'
182
            )
183
184
185
186
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertLess(res["median_e2e_latency_ms"], 1450)
            else:
                self.assertLess(res["median_e2e_latency_ms"], 900)
Lianmin Zheng's avatar
Lianmin Zheng committed
187
            self.assertGreater(res["accept_length"], 3.0)
188

189
190
191
192
193
194
195
196
    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

197
        if is_in_ci():
198
199
200
201
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
202
203
204
205
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
206
207
208
209
210
211
212
213
214

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

215
        if is_in_ci():
216
217
218
219
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
220
221
222
223
            if os.getenv("SGLANG_AMD_CI") == "1":
                self.assertGreater(res["output_throughput"], 2100)
            else:
                self.assertGreater(res["output_throughput"], 2200)
224
225
226
227


if __name__ == "__main__":
    unittest.main()