run_suite.py 16.3 KB
Newer Older
1
2
import argparse
import glob
Lianmin Zheng's avatar
Lianmin Zheng committed
3
from dataclasses import dataclass
4
5
6

from sglang.test.test_utils import run_unittest_files

Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10
11
12
13

@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


14
suites = {
15
    "per-commit": [
16
17
18
19
20
21
22
23
        TestFile("hicache/test_hicache.py", 116),
        TestFile("hicache/test_hicache_mla.py", 127),
        TestFile("hicache/test_hicache_storage.py", 127),
        TestFile("lora/test_lora.py", 200),
        TestFile("lora/test_lora_eviction.py", 200),
        TestFile("lora/test_lora_backend.py", 99),
        TestFile("lora/test_multi_lora_backend.py", 60),
        TestFile("lora/test_lora_cuda_graph.py", 250),
24
        TestFile("lora/test_lora_update.py", 400),
25
        TestFile("lora/test_lora_qwen3.py", 97),
26
        TestFile("lora/test_lora_radix_cache.py", 100),
27
        TestFile("models/test_embedding_models.py", 73),
28
        # TestFile("models/test_clip_models.py", 52),
woodx's avatar
woodx committed
29
30
        TestFile("models/test_encoder_embedding_models.py", 100),
        TestFile("models/test_cross_encoder_models.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
31
        TestFile("models/test_compressed_tensors_models.py", 42),
Lianmin Zheng's avatar
Lianmin Zheng committed
32
        TestFile("models/test_generation_models.py", 103),
33
        # TestFile("models/test_gme_qwen_models.py", 45),
Lianmin Zheng's avatar
Lianmin Zheng committed
34
        # TestFile("models/test_grok_models.py", 60),  # Disabled due to illegal memory access
Lianmin Zheng's avatar
Lianmin Zheng committed
35
        TestFile("models/test_qwen_models.py", 82),
Lianmin Zheng's avatar
Lianmin Zheng committed
36
        TestFile("models/test_reward_models.py", 132),
37
        TestFile("models/test_vlm_models.py", 437),
38
        TestFile("models/test_transformers_models.py", 320),
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
57
58
59
        TestFile("quant/test_block_int8.py", 22),
        TestFile("quant/test_fp8_kernel.py", 8),
        TestFile("quant/test_int8_kernel.py", 8),
60
        TestFile("quant/test_triton_scaled_mm.py", 8),
61
62
63
        TestFile("quant/test_w8a8_quantization.py", 46),
        TestFile("rl/test_update_weights_from_disk.py", 114),
        TestFile("rl/test_update_weights_from_tensor.py", 48),
Lianmin Zheng's avatar
Lianmin Zheng committed
64
        TestFile("test_abort.py", 51),
Lianmin Zheng's avatar
Lianmin Zheng committed
65
        TestFile("test_create_kvindices.py", 2),
66
        TestFile("test_chunked_prefill.py", 313),
67
        TestFile("test_eagle_infer_a.py", 370),
68
        TestFile("test_eagle_infer_b.py", 700),
Lianmin Zheng's avatar
Lianmin Zheng committed
69
70
        TestFile("test_ebnf_constrained.py", 108),
        TestFile("test_eval_fp8_accuracy.py", 303),
71
        TestFile("test_fa3.py", 376),
72
        # TestFile("test_flashmla.py", 352),
73
        TestFile("test_function_call_parser.py", 10),
Lianmin Zheng's avatar
Lianmin Zheng committed
74
        TestFile("test_fused_moe.py", 30),
75
        TestFile("test_gpt_oss_1gpu.py", 600),
76
        TestFile("test_harmony_parser.py", 20),
77
        TestFile("test_hidden_states.py", 55),
78
        TestFile("test_hybrid_attn_backend.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
79
        TestFile("test_input_embeddings.py", 38),
80
        TestFile("test_io_struct.py", 8),
81
        TestFile("test_jinja_template_utils.py", 1),
82
        TestFile("test_metrics.py", 32),
83
        TestFile("test_mla.py", 167),
84
        TestFile("test_mla_deepseek_v3.py", 700),
85
86
87
        TestFile("test_mla_int8_deepseek_v3.py", 429),
        TestFile("test_mla_flashinfer.py", 302),
        TestFile("test_mla_fp8.py", 93),
88
        TestFile("test_multi_tokenizer.py", 230),
Lianmin Zheng's avatar
Lianmin Zheng committed
89
        TestFile("test_no_chunked_prefill.py", 108),
90
        TestFile("test_no_overlap_scheduler.py", 234),
91
        TestFile("test_original_logprobs.py", 200),
Lianmin Zheng's avatar
Lianmin Zheng committed
92
        TestFile("test_penalty.py", 41),
Lianmin Zheng's avatar
Lianmin Zheng committed
93
        TestFile("test_page_size.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
94
        TestFile("test_pytorch_sampling_backend.py", 66),
95
        TestFile("test_radix_attention.py", 105),
Lianmin Zheng's avatar
Lianmin Zheng committed
96
        TestFile("test_regex_constrained.py", 64),
97
        TestFile("test_reasoning_parser.py", 5),
Lianmin Zheng's avatar
Lianmin Zheng committed
98
        TestFile("test_retract_decode.py", 54),
99
        TestFile("test_request_queue_validation.py", 30),
Lianmin Zheng's avatar
Lianmin Zheng committed
100
        TestFile("test_server_args.py", 1),
101
        TestFile("test_skip_tokenizer_init.py", 117),
102
        TestFile("test_srt_engine.py", 261),
Lianmin Zheng's avatar
Lianmin Zheng committed
103
        TestFile("test_srt_endpoint.py", 130),
104
        TestFile("test_start_profile.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
105
        TestFile("test_torch_compile.py", 76),
Lianmin Zheng's avatar
Lianmin Zheng committed
106
        TestFile("test_torch_compile_moe.py", 172),
107
        TestFile("test_torch_native_attention_backend.py", 123),
Lianmin Zheng's avatar
Lianmin Zheng committed
108
109
        TestFile("test_torchao.py", 70),
        TestFile("test_triton_attention_kernels.py", 4),
110
        TestFile("test_triton_attention_backend.py", 150),
Lianmin Zheng's avatar
Lianmin Zheng committed
111
        TestFile("test_triton_moe_channel_fp8_kernel.py", 25),
112
        TestFile("test_triton_sliding_window.py", 250),
113
        TestFile("test_utils_update_weights.py", 48),
Lianmin Zheng's avatar
Lianmin Zheng committed
114
        TestFile("test_vision_chunked_prefill.py", 175),
115
        TestFile("test_vlm_input_format.py", 300),
116
117
        TestFile("test_vision_openai_server_a.py", 403),
        TestFile("test_vision_openai_server_b.py", 446),
118
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
119
    "per-commit-2-gpu": [
120
121
        TestFile("lora/test_lora_tp.py", 116),
        TestFile("rl/test_update_weights_from_distributed.py", 103),
Lianmin Zheng's avatar
Lianmin Zheng committed
122
123
124
125
126
127
128
129
130
131
132
133
134
        TestFile("test_data_parallelism.py", 73),
        TestFile("test_dp_attention.py", 277),
        TestFile("test_patch_torch.py", 19),
        TestFile("test_release_memory_occupation.py", 127),
    ],
    "per-commit-4-gpu": [
        TestFile("test_gpt_oss_4gpu.py", 600),
        TestFile("test_local_attn.py", 250),
        TestFile("test_pp_single_node.py", 372),
        TestFile("test_multi_instance_release_memory_occupation.py", 64),
    ],
    "per-commit-8-gpu": [
        # Disabled because it hangs on the CI.
135
        # TestFile("ep/test_moe_ep.py", 181),
Lianmin Zheng's avatar
Lianmin Zheng committed
136
137
138
139
140
141
142
143
        TestFile("test_disaggregation.py", 499),
        TestFile("test_disaggregation_different_tp.py", 155),
        TestFile("test_full_deepseek_v3.py", 333),
    ],
    "per-commit-8-gpu-b200": [
        # add more here
    ],
    "per-commit-4-gpu-deepep": [
144
        TestFile("ep/test_deepep_small.py", 531),
Lianmin Zheng's avatar
Lianmin Zheng committed
145
146
    ],
    "per-commit-8-gpu-deepep": [
147
        TestFile("ep/test_deepep_large.py", 338),
Lianmin Zheng's avatar
Lianmin Zheng committed
148
    ],
149
150
151
    "per-commit-8-gpu-h20": [
        TestFile("quant/test_w4a8_deepseek_v3.py", 371),
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
152
153
154
155
    "nightly": [
        TestFile("test_nightly_gsm8k_eval.py"),
    ],
    "vllm_dependency_test": [
156
        TestFile("quant/test_awq.py", 163),
Lianmin Zheng's avatar
Lianmin Zheng committed
157
158
159
        TestFile("test_bnb.py", 5),
        TestFile("test_gptqmodel_dynamic.py", 102),
        TestFile("test_vllm_dependency.py", 185),
160
        # TestFile("test_gguf.py", 96),
Lianmin Zheng's avatar
Lianmin Zheng committed
161
162
163
164
165
    ],
}

# Add AMD tests
suite_amd = {
166
    "per-commit-amd": [
167
168
169
        TestFile("hicache/test_hicache.py", 116),
        TestFile("hicache/test_hicache_mla.py", 127),
        TestFile("hicache/test_hicache_storage.py", 127),
170
171
        TestFile("lora/test_lora.py", 200),
        TestFile("lora/test_lora_eviction.py", 200),
172
173
174
        TestFile("lora/test_lora_backend.py", 99),
        TestFile("lora/test_multi_lora_backend.py", 60),
        TestFile("lora/test_lora_cuda_graph.py", 250),
175
176
177
        TestFile("lora/test_lora_qwen3.py", 97),
        TestFile("models/test_embedding_models.py", 73),
        TestFile("models/test_compressed_tensors_models.py", 42),
178
179
        TestFile("models/test_qwen_models.py", 82),
        TestFile("models/test_reward_models.py", 132),
180
181
182
183
184
        TestFile("models/test_transformers_models.py", 320),
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
185
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
186
        TestFile("openai_server/basic/test_openai_server.py", 149),
187
        TestFile("openai_server/features/test_enable_thinking.py", 70),
188
189
190
191
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        # TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
192
        TestFile("openai_server/features/test_reasoning_content.py", 89),
193
194
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
195
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
196
197
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
198
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
199
200
201
        TestFile("quant/test_block_int8.py", 22),
        TestFile("quant/test_awq_dequant.py", 2),
        TestFile("rl/test_update_weights_from_disk.py", 114),
202
        # TestFile("rl/test_update_weights_from_tensor.py", 48),
203
204
205
        TestFile("test_abort.py", 51),
        TestFile("test_create_kvindices.py", 2),
        TestFile("test_chunked_prefill.py", 313),
206
        TestFile("test_ebnf_constrained.py", 108),
207
208
        TestFile("test_eval_fp8_accuracy.py", 303),
        TestFile("test_function_call_parser.py", 10),
209
        TestFile("test_fused_moe.py", 30),
210
        TestFile("test_input_embeddings.py", 38),
211
212
213
        TestFile("test_io_struct.py", 8),
        TestFile("test_jinja_template_utils.py", 1),
        TestFile("test_metrics.py", 32),
214
215
        TestFile("test_mla.py", 242),
        TestFile("test_mla_deepseek_v3.py", 221),
216
        TestFile("test_no_chunked_prefill.py", 108),
217
        # TestFile("test_no_overlap_scheduler.py", 234), # Disabled temporarily and track in #7703
218
219
220
221
        TestFile("test_penalty.py", 41),
        TestFile("test_page_size.py", 60),
        TestFile("test_pytorch_sampling_backend.py", 66),
        TestFile("test_radix_attention.py", 105),
222
        TestFile("test_regex_constrained.py", 64),
223
        TestFile("test_retract_decode.py", 54),
224
225
        TestFile("test_reasoning_parser.py", 5),
        TestFile("test_rope_rocm.py", 3),
226
227
        TestFile("test_server_args.py", 1),
        TestFile("test_skip_tokenizer_init.py", 117),
228
229
        TestFile("test_srt_engine.py", 261),
        TestFile("test_srt_endpoint.py", 130),
230
231
        TestFile("test_torch_compile.py", 76),
        TestFile("test_torch_compile_moe.py", 172),
232
233
        TestFile("test_torch_native_attention_backend.py", 123),
        TestFile("test_triton_attention_backend.py", 150),
234
        # TestFile("test_vision_chunked_prefill.py", 175), # Disabled temporarily and track in #7701
235
236
        TestFile("test_wave_attention_kernels.py", 2),
        TestFile("test_wave_attention_backend.py", 150),
237
238
    ],
    "per-commit-2-gpu-amd": [
239
240
        TestFile("lora/test_lora_tp.py", 116),
        TestFile("rl/test_update_weights_from_distributed.py", 103),
241
242
        TestFile("test_data_parallelism.py", 73),
        TestFile("test_patch_torch.py", 19),
243
    ],
244
245
246
    "per-commit-4-gpu-amd": [
        TestFile("test_pp_single_node.py", 150),
    ],
247
248
249
    "per-commit-8-gpu-amd": [
        TestFile("test_full_deepseek_v3.py", 250),
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
250
251
    "nightly-amd": [
        TestFile("test_nightly_gsm8k_eval_amd.py"),
fzyzcjy's avatar
fzyzcjy committed
252
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
253
254
255
256
}

# Add Intel Xeon tests
suite_xeon = {
257
258
    "per-commit-cpu": [
        TestFile("cpu/test_activation.py"),
259
        TestFile("cpu/test_binding.py"),
260
261
262
        TestFile("cpu/test_decode.py"),
        TestFile("cpu/test_extend.py"),
        TestFile("cpu/test_gemm.py"),
263
        TestFile("cpu/test_mla.py"),
264
265
266
        TestFile("cpu/test_moe.py"),
        TestFile("cpu/test_norm.py"),
        TestFile("cpu/test_qkv_proj_with_rope.py"),
267
        TestFile("cpu/test_rope.py"),
268
        TestFile("cpu/test_shared_expert.py"),
269
        TestFile("cpu/test_topk.py"),
YanbingJiang's avatar
YanbingJiang committed
270
        TestFile("test_intel_amx_attention_backend.py"),
271
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
272
273
274
275
276
}

# Add Ascend NPU tests
suite_ascend = {
    "per-commit-1-ascend-npu": [
277
        TestFile("ascend/test_ascend_tp1_bf16.py", 400),
278
        TestFile("ascend/test_ascend_graph_tp1_bf16.py", 400),
279
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
280
    "per-commit-2-ascend-npu": [
281
        TestFile("ascend/test_ascend_tp2_bf16.py", 400),
282
        TestFile("ascend/test_ascend_graph_tp2_bf16.py", 400),
283
284
        TestFile("ascend/test_ascend_tp2_fia_bf16.py", 400),
        TestFile("ascend/test_ascend_mla_fia_w8a8int8.py", 400),
285
    ],
Lianmin Zheng's avatar
Lianmin Zheng committed
286
    "per-commit-4-ascend-npu": [
287
        TestFile("ascend/test_ascend_mla_w8a8int8.py", 400),
288
    ],
289
290
}

Lianmin Zheng's avatar
Lianmin Zheng committed
291
292
293
294
suites.update(suite_amd)
suites.update(suite_xeon)
suites.update(suite_ascend)

Lianmin Zheng's avatar
Lianmin Zheng committed
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338

def auto_partition(files, rank, size):
    """
    Partition files into size sublists with approximately equal sums of estimated times
    using stable sorting, and return the partition for the specified rank.

    Args:
        files (list): List of file objects with estimated_time attribute
        rank (int): Index of the partition to return (0 to size-1)
        size (int): Number of partitions

    Returns:
        list: List of file objects in the specified rank's partition
    """
    weights = [f.estimated_time for f in files]

    if not weights or size <= 0 or size > len(weights):
        return []

    # Create list of (weight, original_index) tuples
    # Using negative index as secondary key to maintain original order for equal weights
    indexed_weights = [(w, -i) for i, w in enumerate(weights)]
    # Stable sort in descending order by weight
    # If weights are equal, larger (negative) index comes first (i.e., earlier original position)
    indexed_weights = sorted(indexed_weights, reverse=True)

    # Extract original indices (negate back to positive)
    indexed_weights = [(w, -i) for w, i in indexed_weights]

    # Initialize partitions and their sums
    partitions = [[] for _ in range(size)]
    sums = [0.0] * size

    # Greedy approach: assign each weight to partition with smallest current sum
    for weight, idx in indexed_weights:
        # Find partition with minimum sum
        min_sum_idx = sums.index(min(sums))
        partitions[min_sum_idx].append(idx)
        sums[min_sum_idx] += weight

    # Return the files corresponding to the indices in the specified rank's partition
    indices = partitions[rank]
    return [files[i] for i in indices]

339
340
341
342
343
344

if __name__ == "__main__":
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "--timeout-per-file",
        type=int,
345
        default=1800,
346
347
348
349
350
351
352
353
354
        help="The time limit for running one file in seconds.",
    )
    arg_parser.add_argument(
        "--suite",
        type=str,
        default=list(suites.keys())[0],
        choices=list(suites.keys()) + ["all"],
        help="The suite to run",
    )
355
356
357
358
359
360
361
362
363
364
365
366
    arg_parser.add_argument(
        "--range-begin",
        type=int,
        default=0,
        help="The begin index of the range of the files to run.",
    )
    arg_parser.add_argument(
        "--range-end",
        type=int,
        default=None,
        help="The end index of the range of the files to run.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
367
368
369
370
371
372
373
374
375
376
    arg_parser.add_argument(
        "--auto-partition-id",
        type=int,
        help="Use auto load balancing. The part id.",
    )
    arg_parser.add_argument(
        "--auto-partition-size",
        type=int,
        help="Use auto load balancing. The number of parts.",
    )
377
    args = arg_parser.parse_args()
Lianmin Zheng's avatar
Lianmin Zheng committed
378
    print(f"{args=}")
379
380
381
382
383
384

    if args.suite == "all":
        files = glob.glob("**/test_*.py", recursive=True)
    else:
        files = suites[args.suite]

Lianmin Zheng's avatar
Lianmin Zheng committed
385
386
387
388
    if args.auto_partition_size:
        files = auto_partition(files, args.auto_partition_id, args.auto_partition_size)
    else:
        files = files[args.range_begin : args.range_end]
389

Lianmin Zheng's avatar
Lianmin Zheng committed
390
    print("The running tests are ", [f.name for f in files])
391

392
393
    exit_code = run_unittest_files(files, args.timeout_per_file)
    exit(exit_code)