run_suite.py 13.6 KB
Newer Older
1
2
import argparse
import glob
Lianmin Zheng's avatar
Lianmin Zheng committed
3
from dataclasses import dataclass
4
5
6

from sglang.test.test_utils import run_unittest_files

Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10
11
12
13

@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


14
suites = {
15
    "per-commit": [
Lianmin Zheng's avatar
Lianmin Zheng committed
16
        TestFile("models/lora/test_lora.py", 76),
17
        TestFile("models/lora/test_lora_backend.py", 99),
18
        TestFile("models/lora/test_multi_lora_backend.py", 60),
19
20
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
        TestFile("models/test_embedding_models.py", 73),
21
        # TestFile("models/test_clip_models.py", 52),
woodx's avatar
woodx committed
22
23
        TestFile("models/test_encoder_embedding_models.py", 100),
        TestFile("models/test_cross_encoder_models.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
24
        TestFile("models/test_compressed_tensors_models.py", 42),
Lianmin Zheng's avatar
Lianmin Zheng committed
25
        TestFile("models/test_generation_models.py", 103),
26
        # TestFile("models/test_gme_qwen_models.py", 45),
Lianmin Zheng's avatar
Lianmin Zheng committed
27
        # TestFile("models/test_grok_models.py", 60),  # Disabled due to illegal memory access
Lianmin Zheng's avatar
Lianmin Zheng committed
28
        TestFile("models/test_qwen_models.py", 82),
Lianmin Zheng's avatar
Lianmin Zheng committed
29
        TestFile("models/test_reward_models.py", 132),
30
        TestFile("models/test_vlm_models.py", 437),
31
        TestFile("models/test_transformers_models.py", 320),
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/features/test_cache_report.py", 100),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
51
        TestFile("test_abort.py", 51),
52
        TestFile("test_block_int8.py", 22),
Lianmin Zheng's avatar
Lianmin Zheng committed
53
        TestFile("test_create_kvindices.py", 2),
54
        TestFile("test_chunked_prefill.py", 313),
55
56
        TestFile("test_eagle_infer_a.py", 370),
        TestFile("test_eagle_infer_b.py", 270),
Lianmin Zheng's avatar
Lianmin Zheng committed
57
58
        TestFile("test_ebnf_constrained.py", 108),
        TestFile("test_eval_fp8_accuracy.py", 303),
59
        TestFile("test_fa3.py", 376),
60
        # TestFile("test_flashmla.py", 352),
61
        TestFile("test_fp8_kernel.py", 8),
62
        TestFile("test_function_call_parser.py", 10),
Lianmin Zheng's avatar
Lianmin Zheng committed
63
64
        TestFile("test_fused_moe.py", 30),
        TestFile("test_hicache.py", 116),
65
        TestFile("test_hicache_mla.py", 127),
66
        TestFile("test_hidden_states.py", 55),
67
        TestFile("test_int8_kernel.py", 8),
Lianmin Zheng's avatar
Lianmin Zheng committed
68
        TestFile("test_input_embeddings.py", 38),
69
        TestFile("test_jinja_template_utils.py", 1),
70
        TestFile("test_metrics.py", 32),
71
72
73
74
75
        TestFile("test_mla.py", 167),
        TestFile("test_mla_deepseek_v3.py", 342),
        TestFile("test_mla_int8_deepseek_v3.py", 429),
        TestFile("test_mla_flashinfer.py", 302),
        TestFile("test_mla_fp8.py", 93),
Lianmin Zheng's avatar
Lianmin Zheng committed
76
        TestFile("test_no_chunked_prefill.py", 108),
77
        TestFile("test_no_overlap_scheduler.py", 234),
Lianmin Zheng's avatar
Lianmin Zheng committed
78
        TestFile("test_penalty.py", 41),
Lianmin Zheng's avatar
Lianmin Zheng committed
79
        TestFile("test_page_size.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
80
        TestFile("test_pytorch_sampling_backend.py", 66),
81
        TestFile("test_radix_attention.py", 105),
Lianmin Zheng's avatar
Lianmin Zheng committed
82
83
84
        TestFile("test_regex_constrained.py", 64),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
85
        TestFile("test_skip_tokenizer_init.py", 117),
86
        TestFile("test_srt_engine.py", 261),
Lianmin Zheng's avatar
Lianmin Zheng committed
87
        TestFile("test_srt_endpoint.py", 130),
Lianmin Zheng's avatar
Lianmin Zheng committed
88
        TestFile("test_torch_compile.py", 76),
Lianmin Zheng's avatar
Lianmin Zheng committed
89
        TestFile("test_torch_compile_moe.py", 172),
90
        TestFile("test_torch_native_attention_backend.py", 123),
Lianmin Zheng's avatar
Lianmin Zheng committed
91
92
        TestFile("test_torchao.py", 70),
        TestFile("test_triton_attention_kernels.py", 4),
93
        TestFile("test_triton_attention_backend.py", 150),
Lianmin Zheng's avatar
Lianmin Zheng committed
94
        TestFile("test_triton_moe_channel_fp8_kernel.py", 25),
95
        TestFile("test_triton_sliding_window.py", 250),
Lianmin Zheng's avatar
Lianmin Zheng committed
96
97
98
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_update_weights_from_tensor.py", 48),
        TestFile("test_vertex_endpoint.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
99
        TestFile("test_vision_chunked_prefill.py", 175),
100
        TestFile("test_vlm_input_format.py", 300),
101
102
        TestFile("test_vision_openai_server_a.py", 584),
        TestFile("test_vision_openai_server_b.py", 556),
Lianmin Zheng's avatar
Lianmin Zheng committed
103
        TestFile("test_w8a8_quantization.py", 46),
104
    ],
105
    "per-commit-amd": [
106
107
108
        TestFile("models/lora/test_lora_backend.py", 99),
        TestFile("models/lora/test_multi_lora_backend.py", 60),
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
109
110
111
112
        TestFile("test_mla.py", 242),
        TestFile("test_mla_deepseek_v3.py", 221),
        TestFile("test_torch_compile.py", 76),
        TestFile("test_torch_compile_moe.py", 172),
113
114
        TestFile("models/test_qwen_models.py", 82),
        TestFile("models/test_reward_models.py", 132),
115
116
117
118
119
120
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
121
122
123
124
        TestFile("test_abort.py", 51),
        TestFile("test_block_int8.py", 22),
        TestFile("test_create_kvindices.py", 2),
        TestFile("test_chunked_prefill.py", 313),
125
126
127
        TestFile("test_eval_fp8_accuracy.py", 303),
        TestFile("test_function_call_parser.py", 10),
        TestFile("test_input_embeddings.py", 38),
128
129
130
131
132
133
134
135
136
137
138
139
140
        TestFile("openai_server/features/test_cache_report.py", 100),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
        TestFile("test_metrics.py", 32),
        TestFile("test_no_chunked_prefill.py", 108),
        TestFile("test_no_overlap_scheduler.py", 234),
        TestFile("test_penalty.py", 41),
        TestFile("test_page_size.py", 60),
        TestFile("test_pytorch_sampling_backend.py", 66),
        TestFile("test_radix_attention.py", 105),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
        TestFile("test_skip_tokenizer_init.py", 117),
        TestFile("test_torch_native_attention_backend.py", 123),
        TestFile("test_triton_attention_backend.py", 150),
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_vertex_endpoint.py", 31),
        TestFile("test_vision_chunked_prefill.py", 175),
156
    ],
157
    "per-commit-2-gpu": [
158
159
160
161
162
163
164
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
        TestFile("test_dp_attention.py", 137),
        TestFile("test_mla_tp.py", 170),
        TestFile("test_moe_ep.py", 181),
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
165
        TestFile("test_release_memory_occupation.py", 44),
166
    ],
167
    "per-commit-2-gpu-amd": [
168
169
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
170
        TestFile("test_mla_tp.py", 170),
171
172
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
173
    ],
174
175
176
177
    "per-commit-4-gpu": [
        TestFile("test_local_attn.py", 250),
        TestFile("test_pp_single_node.py", 150),
    ],
178
179
180
    "per-commit-4-gpu-amd": [
        TestFile("test_pp_single_node.py", 150),
    ],
Stefan He's avatar
Stefan He committed
181
    "per-commit-8-gpu": [
182
183
184
185
186
        # Disabled deepep tests temporarily because it takes too much time.
        # TODO: re-enable them after reducing the test time with compilation cache and smaller models.
        # TestFile("test_deepep_intranode.py", 50),
        # TestFile("test_deepep_low_latency.py", 50),
        # TestFile("test_moe_deepep_eval_accuracy_large.py", 250),
187
188
189
        TestFile("test_disaggregation.py", 270),
        TestFile("test_disaggregation_different_tp.py", 155),
        TestFile("test_full_deepseek_v3.py", 463),
Stefan He's avatar
Stefan He committed
190
    ],
191
192
193
    "per-commit-8-gpu-amd": [
        TestFile("test_full_deepseek_v3.py", 250),
    ],
194
195
196
197
198
    "per-commit-cpu": [
        TestFile("cpu/test_activation.py"),
        TestFile("cpu/test_decode.py"),
        TestFile("cpu/test_extend.py"),
        TestFile("cpu/test_gemm.py"),
199
        TestFile("cpu/test_mla.py"),
200
201
202
        TestFile("cpu/test_moe.py"),
        TestFile("cpu/test_norm.py"),
        TestFile("cpu/test_qkv_proj_with_rope.py"),
203
        TestFile("cpu/test_rope.py"),
204
205
        TestFile("cpu/test_shared_expert.py"),
    ],
206
    "nightly": [
Lianmin Zheng's avatar
Lianmin Zheng committed
207
        TestFile("test_nightly_gsm8k_eval.py"),
208
    ],
209
210
211
    "nightly-amd": [
        TestFile("test_nightly_gsm8k_eval_amd.py"),
    ],
212
213
    "vllm_dependency_test": [
        TestFile("test_awq.py"),
214
        TestFile("test_bnb.py"),
215
        TestFile("test_gguf.py", 78),
216
        TestFile("test_gptqmodel_dynamic.py", 72),
217
        TestFile("test_vllm_dependency.py"),
218
    ],
219
220
}

Lianmin Zheng's avatar
Lianmin Zheng committed
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264

def auto_partition(files, rank, size):
    """
    Partition files into size sublists with approximately equal sums of estimated times
    using stable sorting, and return the partition for the specified rank.

    Args:
        files (list): List of file objects with estimated_time attribute
        rank (int): Index of the partition to return (0 to size-1)
        size (int): Number of partitions

    Returns:
        list: List of file objects in the specified rank's partition
    """
    weights = [f.estimated_time for f in files]

    if not weights or size <= 0 or size > len(weights):
        return []

    # Create list of (weight, original_index) tuples
    # Using negative index as secondary key to maintain original order for equal weights
    indexed_weights = [(w, -i) for i, w in enumerate(weights)]
    # Stable sort in descending order by weight
    # If weights are equal, larger (negative) index comes first (i.e., earlier original position)
    indexed_weights = sorted(indexed_weights, reverse=True)

    # Extract original indices (negate back to positive)
    indexed_weights = [(w, -i) for w, i in indexed_weights]

    # Initialize partitions and their sums
    partitions = [[] for _ in range(size)]
    sums = [0.0] * size

    # Greedy approach: assign each weight to partition with smallest current sum
    for weight, idx in indexed_weights:
        # Find partition with minimum sum
        min_sum_idx = sums.index(min(sums))
        partitions[min_sum_idx].append(idx)
        sums[min_sum_idx] += weight

    # Return the files corresponding to the indices in the specified rank's partition
    indices = partitions[rank]
    return [files[i] for i in indices]

265
266
267
268
269
270

if __name__ == "__main__":
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "--timeout-per-file",
        type=int,
271
        default=1800,
272
273
274
275
276
277
278
279
280
        help="The time limit for running one file in seconds.",
    )
    arg_parser.add_argument(
        "--suite",
        type=str,
        default=list(suites.keys())[0],
        choices=list(suites.keys()) + ["all"],
        help="The suite to run",
    )
281
282
283
284
285
286
287
288
289
290
291
292
    arg_parser.add_argument(
        "--range-begin",
        type=int,
        default=0,
        help="The begin index of the range of the files to run.",
    )
    arg_parser.add_argument(
        "--range-end",
        type=int,
        default=None,
        help="The end index of the range of the files to run.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
293
294
295
296
297
298
299
300
301
302
    arg_parser.add_argument(
        "--auto-partition-id",
        type=int,
        help="Use auto load balancing. The part id.",
    )
    arg_parser.add_argument(
        "--auto-partition-size",
        type=int,
        help="Use auto load balancing. The number of parts.",
    )
303
    args = arg_parser.parse_args()
Lianmin Zheng's avatar
Lianmin Zheng committed
304
    print(f"{args=}")
305
306
307
308
309
310

    if args.suite == "all":
        files = glob.glob("**/test_*.py", recursive=True)
    else:
        files = suites[args.suite]

Lianmin Zheng's avatar
Lianmin Zheng committed
311
312
313
314
    if args.auto_partition_size:
        files = auto_partition(files, args.auto_partition_id, args.auto_partition_size)
    else:
        files = files[args.range_begin : args.range_end]
315

Lianmin Zheng's avatar
Lianmin Zheng committed
316
    print("The running tests are ", [f.name for f in files])
317

318
319
    exit_code = run_unittest_files(files, args.timeout_per_file)
    exit(exit_code)