test_bench_serving.py 6.97 KB
Newer Older
1
2
3
import unittest

from sglang.test.test_utils import (
4
5
    DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
    DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
6
    DEFAULT_FP8_MODEL_NAME_FOR_TEST,
7
8
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_MOE_MODEL_NAME_FOR_TEST,
9
    CustomTestCase,
10
    is_in_ci,
11
    run_bench_serving,
12
    write_github_step_summary,
13
14
15
)


16
class TestBenchServing(CustomTestCase):
17
18
19
20
21
22
23
24
25

    def test_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

26
        if is_in_ci():
27
28
29
30
            write_github_step_summary(
                f"### test_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
31
            self.assertGreater(res["output_throughput"], 3350)
32
33
34
35
36
37

    def test_offline_throughput_non_stream_small_batch_size(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=200,
            request_rate=float("inf"),
38
            other_server_args=["--max-running-requests", "10"],
39
40
41
42
            dataset_name="sharegpt",
            random_input_len=None,
            random_output_len=None,
            disable_stream=True,
43
            need_warmup=True,
44
45
46
        )

        if is_in_ci():
47
48
49
50
            write_github_step_summary(
                f"### test_offline_throughput_non_stream_small_batch_size\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
51
52
            # There is a regression with torch 2.5
            # This number was 950 for torch 2.4
Lianmin Zheng's avatar
Lianmin Zheng committed
53
            self.assertGreater(res["output_throughput"], 1000)
54
55
56
57
58
59
60
61
62

    def test_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--disable-radix-cache"],
        )

63
        if is_in_ci():
64
65
66
67
            write_github_step_summary(
                f"### test_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
68
            self.assertGreater(res["output_throughput"], 3350)
69
70
71
72
73
74
75
76
77

    def test_offline_throughput_without_chunked_prefill(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=["--chunked-prefill-size", "-1"],
        )

78
        if is_in_ci():
79
80
81
82
            write_github_step_summary(
                f"### test_offline_throughput_without_chunked_prefill\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
83
            self.assertGreater(res["output_throughput"], 2600)
84
85
86
87
88
89
90
91
92
93
94
95
96
97

    def test_offline_throughput_with_triton_attention_backend(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[
                "--attention-backend",
                "triton",
                "--context-length",
                "8192",
            ],
        )

98
        if is_in_ci():
99
100
101
102
            write_github_step_summary(
                f"### test_offline_throughput_with_triton_attention_backend\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
103
            self.assertGreater(res["output_throughput"], 3450)
104

105
106
107
108
109
110
111
112
113
    def test_offline_throughput_default_fp8(self):
        res = run_bench_serving(
            model=DEFAULT_FP8_MODEL_NAME_FOR_TEST,
            num_prompts=500,
            request_rate=float("inf"),
            other_server_args=[],
        )

        if is_in_ci():
114
115
116
117
            write_github_step_summary(
                f"### test_offline_throughput_default_fp8\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
118
            self.assertGreater(res["output_throughput"], 3900)
119

120
121
122
123
124
125
126
127
    def test_online_latency_default(self):
        res = run_bench_serving(
            model=DEFAULT_MODEL_NAME_FOR_TEST,
            num_prompts=100,
            request_rate=1,
            other_server_args=[],
        )

128
        if is_in_ci():
129
130
            write_github_step_summary(
                f"### test_online_latency_default\n"
131
                f'median_e2e_latency_ms : {res["median_e2e_latency_ms"]:.2f} ms\n'
132
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
133
            self.assertLess(res["median_e2e_latency_ms"], 11000)
134
135
            self.assertLess(res["median_ttft_ms"], 86)
            self.assertLess(res["median_itl_ms"], 10)
136

137
138
139
    def test_online_latency_eagle(self):
        res = run_bench_serving(
            model=DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
140
141
            num_prompts=300,
            request_rate=8,
142
            sharegpt_context_len=3072,
143
144
145
146
147
148
149
150
151
152
            disable_ignore_eos=True,
            dataset_name="sharegpt",
            other_server_args=[
                "--speculative-algorithm",
                "EAGLE",
                "--speculative-draft-model-path",
                DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
                "--speculative-num-steps",
                "5",
                "--speculative-eagle-topk",
153
                "4",
154
                "--speculative-num-draft-tokens",
155
                "16",
156
157
158
                "--mem-fraction-static",
                "0.7",
            ],
159
            need_warmup=True,
160
            seed=42,
161
162
163
164
165
166
        )

        if is_in_ci():
            write_github_step_summary(
                f"### test_online_latency_eagle\n"
                f'median_e2e_latency_ms : {res["median_e2e_latency_ms"]:.2f} ms\n'
167
                f'accept_length : {res["accept_length"]:.2f} \n'
168
            )
169
            self.assertLess(res["median_e2e_latency_ms"], 900)
170
            self.assertGreater(res["accept_length"], 2.99)
171

172
173
174
175
176
177
178
179
    def test_moe_offline_throughput_default(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2"],
        )

180
        if is_in_ci():
181
182
183
184
            write_github_step_summary(
                f"### test_moe_offline_throughput_default\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
185
            self.assertGreater(res["output_throughput"], 2200)
186
187
188
189
190
191
192
193
194

    def test_moe_offline_throughput_without_radix_cache(self):
        res = run_bench_serving(
            model=DEFAULT_MOE_MODEL_NAME_FOR_TEST,
            num_prompts=300,
            request_rate=float("inf"),
            other_server_args=["--tp", "2", "--disable-radix-cache"],
        )

195
        if is_in_ci():
196
197
198
199
            write_github_step_summary(
                f"### test_moe_offline_throughput_without_radix_cache\n"
                f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
200
            self.assertGreater(res["output_throughput"], 2200)
201
202
203
204


if __name__ == "__main__":
    unittest.main()