run_suite.py 13.9 KB
Newer Older
1
2
import argparse
import glob
Lianmin Zheng's avatar
Lianmin Zheng committed
3
from dataclasses import dataclass
4
5
6

from sglang.test.test_utils import run_unittest_files

Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10
11
12
13

@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


14
suites = {
15
    "per-commit": [
16
        TestFile("models/lora/test_lora.py", 200),
17
        TestFile("models/lora/test_lora_eviction.py", 200),
18
        TestFile("models/lora/test_lora_backend.py", 99),
19
        TestFile("models/lora/test_multi_lora_backend.py", 60),
20
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
21
        TestFile("models/lora/test_lora_update.py", 800),
22
        TestFile("models/lora/test_lora_qwen3.py", 97),
23
        TestFile("models/test_embedding_models.py", 73),
24
        # TestFile("models/test_clip_models.py", 52),
woodx's avatar
woodx committed
25
26
        TestFile("models/test_encoder_embedding_models.py", 100),
        TestFile("models/test_cross_encoder_models.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
27
        TestFile("models/test_compressed_tensors_models.py", 42),
Lianmin Zheng's avatar
Lianmin Zheng committed
28
        TestFile("models/test_generation_models.py", 103),
29
        # TestFile("models/test_gme_qwen_models.py", 45),
Lianmin Zheng's avatar
Lianmin Zheng committed
30
        # TestFile("models/test_grok_models.py", 60),  # Disabled due to illegal memory access
Lianmin Zheng's avatar
Lianmin Zheng committed
31
        TestFile("models/test_qwen_models.py", 82),
Lianmin Zheng's avatar
Lianmin Zheng committed
32
        TestFile("models/test_reward_models.py", 132),
33
        TestFile("models/test_vlm_models.py", 437),
34
        TestFile("models/test_transformers_models.py", 320),
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/features/test_cache_report.py", 100),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
54
        TestFile("test_abort.py", 51),
55
        TestFile("test_block_int8.py", 22),
Lianmin Zheng's avatar
Lianmin Zheng committed
56
        TestFile("test_create_kvindices.py", 2),
57
        TestFile("test_chunked_prefill.py", 313),
58
        TestFile("test_eagle_infer_a.py", 370),
59
        TestFile("test_eagle_infer_b.py", 700),
Lianmin Zheng's avatar
Lianmin Zheng committed
60
61
        TestFile("test_ebnf_constrained.py", 108),
        TestFile("test_eval_fp8_accuracy.py", 303),
62
        TestFile("test_fa3.py", 376),
63
        # TestFile("test_flashmla.py", 352),
64
        TestFile("test_fp8_kernel.py", 8),
65
        TestFile("test_function_call_parser.py", 10),
Lianmin Zheng's avatar
Lianmin Zheng committed
66
67
        TestFile("test_fused_moe.py", 30),
        TestFile("test_hicache.py", 116),
68
        TestFile("test_hicache_mla.py", 127),
69
        TestFile("test_hicache_storage.py", 127),
70
        TestFile("test_hidden_states.py", 55),
71
        TestFile("test_int8_kernel.py", 8),
Lianmin Zheng's avatar
Lianmin Zheng committed
72
        TestFile("test_input_embeddings.py", 38),
73
        TestFile("test_io_struct.py", 8),
74
        TestFile("test_jinja_template_utils.py", 1),
75
        TestFile("test_metrics.py", 32),
76
        TestFile("test_mla.py", 167),
77
        TestFile("test_mla_deepseek_v3.py", 700),
78
79
80
        TestFile("test_mla_int8_deepseek_v3.py", 429),
        TestFile("test_mla_flashinfer.py", 302),
        TestFile("test_mla_fp8.py", 93),
81
        TestFile("test_multi_tokenizer.py", 200),
Lianmin Zheng's avatar
Lianmin Zheng committed
82
        TestFile("test_no_chunked_prefill.py", 108),
83
        TestFile("test_no_overlap_scheduler.py", 234),
Lianmin Zheng's avatar
Lianmin Zheng committed
84
        TestFile("test_penalty.py", 41),
Lianmin Zheng's avatar
Lianmin Zheng committed
85
        TestFile("test_page_size.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
86
        TestFile("test_pytorch_sampling_backend.py", 66),
87
        TestFile("test_radix_attention.py", 105),
Lianmin Zheng's avatar
Lianmin Zheng committed
88
89
        TestFile("test_regex_constrained.py", 64),
        TestFile("test_retract_decode.py", 54),
90
        TestFile("test_request_queue_validation.py", 30),
Lianmin Zheng's avatar
Lianmin Zheng committed
91
        TestFile("test_server_args.py", 1),
92
        TestFile("test_skip_tokenizer_init.py", 117),
93
        TestFile("test_srt_engine.py", 261),
Lianmin Zheng's avatar
Lianmin Zheng committed
94
        TestFile("test_srt_endpoint.py", 130),
95
        TestFile("test_start_profile.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
96
        TestFile("test_torch_compile.py", 76),
Lianmin Zheng's avatar
Lianmin Zheng committed
97
        TestFile("test_torch_compile_moe.py", 172),
98
        TestFile("test_torch_native_attention_backend.py", 123),
Lianmin Zheng's avatar
Lianmin Zheng committed
99
100
        TestFile("test_torchao.py", 70),
        TestFile("test_triton_attention_kernels.py", 4),
101
        TestFile("test_triton_attention_backend.py", 150),
Lianmin Zheng's avatar
Lianmin Zheng committed
102
        TestFile("test_triton_moe_channel_fp8_kernel.py", 25),
103
        TestFile("test_triton_sliding_window.py", 250),
Lianmin Zheng's avatar
Lianmin Zheng committed
104
105
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_update_weights_from_tensor.py", 48),
106
        TestFile("test_utils_update_weights.py", 48),
Lianmin Zheng's avatar
Lianmin Zheng committed
107
        TestFile("test_vertex_endpoint.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
108
        TestFile("test_vision_chunked_prefill.py", 175),
109
        TestFile("test_vlm_input_format.py", 300),
110
        TestFile("test_vision_openai_server_a.py", 584),
111
        TestFile("test_vision_openai_server_b.py", 620),
Lianmin Zheng's avatar
Lianmin Zheng committed
112
        TestFile("test_w8a8_quantization.py", 46),
113
        TestFile("test_reasoning_parser.py", 5),
114
        TestFile("test_hybrid_attn_backend.py", 100),
115
    ],
116
    "per-commit-amd": [
117
118
119
        TestFile("models/lora/test_lora_backend.py", 99),
        TestFile("models/lora/test_multi_lora_backend.py", 60),
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
120
121
122
123
        TestFile("test_mla.py", 242),
        TestFile("test_mla_deepseek_v3.py", 221),
        TestFile("test_torch_compile.py", 76),
        TestFile("test_torch_compile_moe.py", 172),
124
125
        TestFile("models/test_qwen_models.py", 82),
        TestFile("models/test_reward_models.py", 132),
126
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
127
128
129
130
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
131
132
133
134
        TestFile("test_abort.py", 51),
        TestFile("test_block_int8.py", 22),
        TestFile("test_create_kvindices.py", 2),
        TestFile("test_chunked_prefill.py", 313),
135
136
        TestFile("test_eval_fp8_accuracy.py", 303),
        TestFile("test_function_call_parser.py", 10),
137
        TestFile("test_fused_moe.py", 30),
138
139
140
        TestFile("test_input_embeddings.py", 38),
        TestFile("test_metrics.py", 32),
        TestFile("test_no_chunked_prefill.py", 108),
141
        # TestFile("test_no_overlap_scheduler.py", 234), # Disabled temporarily and track in #7703
142
143
144
145
146
147
148
149
150
151
152
        TestFile("test_penalty.py", 41),
        TestFile("test_page_size.py", 60),
        TestFile("test_pytorch_sampling_backend.py", 66),
        TestFile("test_radix_attention.py", 105),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
        TestFile("test_skip_tokenizer_init.py", 117),
        TestFile("test_torch_native_attention_backend.py", 123),
        TestFile("test_triton_attention_backend.py", 150),
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_vertex_endpoint.py", 31),
153
        # TestFile("test_vision_chunked_prefill.py", 175), # Disabled temporarily and track in #7701
154
        TestFile("test_reasoning_parser.py", 5),
155
        TestFile("test_rope_rocm.py", 3),
156
        TestFile("test_awq_dequant.py", 2),
157
    ],
158
159
160
161
162
163
164
165
    "per-commit-1-ascend-npu": [
        TestFile("test_ascend_tp1_bf16.py", 400),
    ],
    "per-commit-2-ascend-npu": [
        TestFile("test_ascend_tp2_bf16.py", 400),
    ],
    "per-commit-4-ascend-npu": [
        TestFile("test_ascend_mla_w8a8int8.py", 400),
166
    ],
167
    "per-commit-2-gpu": [
168
169
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
170
        TestFile("test_dp_attention.py", 277),
171
172
173
        TestFile("test_mla_tp.py", 170),
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
174
        TestFile("test_release_memory_occupation.py", 127),
175
    ],
176
    "per-commit-2-gpu-amd": [
177
178
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
179
        TestFile("test_mla_tp.py", 170),
180
181
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
182
    ],
183
184
    "per-commit-4-gpu": [
        TestFile("test_local_attn.py", 250),
185
        TestFile("test_pp_single_node.py", 372),
186
        TestFile("test_multi_instance_release_memory_occupation.py", 64),
187
    ],
188
189
190
    "per-commit-4-gpu-deepep": [
        TestFile("test_deepep_small.py", 531),
    ],
191
192
193
    "per-commit-4-gpu-amd": [
        TestFile("test_pp_single_node.py", 150),
    ],
Stefan He's avatar
Stefan He committed
194
    "per-commit-8-gpu": [
195
196
        # Disabled because it hangs on the CI.
        # TestFile("test_moe_ep.py", 181),
197
        TestFile("test_disaggregation.py", 499),
198
        TestFile("test_disaggregation_different_tp.py", 155),
199
        TestFile("test_full_deepseek_v3.py", 333),
Stefan He's avatar
Stefan He committed
200
    ],
201
    "per-commit-8-gpu-deepep": [
202
        TestFile("test_deepep_large.py", 338),
203
    ],
204
205
206
    "per-commit-8-gpu-amd": [
        TestFile("test_full_deepseek_v3.py", 250),
    ],
fzyzcjy's avatar
fzyzcjy committed
207
208
209
    "per-commit-8-gpu-b200": [
        # add more here
    ],
210
211
    "per-commit-cpu": [
        TestFile("cpu/test_activation.py"),
212
        TestFile("cpu/test_binding.py"),
213
214
215
        TestFile("cpu/test_decode.py"),
        TestFile("cpu/test_extend.py"),
        TestFile("cpu/test_gemm.py"),
216
        TestFile("cpu/test_mla.py"),
217
218
219
        TestFile("cpu/test_moe.py"),
        TestFile("cpu/test_norm.py"),
        TestFile("cpu/test_qkv_proj_with_rope.py"),
220
        TestFile("cpu/test_rope.py"),
221
        TestFile("cpu/test_shared_expert.py"),
222
        TestFile("cpu/test_topk.py"),
YanbingJiang's avatar
YanbingJiang committed
223
        TestFile("test_intel_amx_attention_backend.py"),
224
    ],
225
    "nightly": [
Lianmin Zheng's avatar
Lianmin Zheng committed
226
        TestFile("test_nightly_gsm8k_eval.py"),
227
    ],
228
229
230
    "nightly-amd": [
        TestFile("test_nightly_gsm8k_eval_amd.py"),
    ],
231
    "vllm_dependency_test": [
232
233
234
235
236
        TestFile("test_awq.py", 163),
        TestFile("test_bnb.py", 5),
        TestFile("test_gguf.py", 96),
        TestFile("test_gptqmodel_dynamic.py", 102),
        TestFile("test_vllm_dependency.py", 185),
237
    ],
238
239
}

Lianmin Zheng's avatar
Lianmin Zheng committed
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283

def auto_partition(files, rank, size):
    """
    Partition files into size sublists with approximately equal sums of estimated times
    using stable sorting, and return the partition for the specified rank.

    Args:
        files (list): List of file objects with estimated_time attribute
        rank (int): Index of the partition to return (0 to size-1)
        size (int): Number of partitions

    Returns:
        list: List of file objects in the specified rank's partition
    """
    weights = [f.estimated_time for f in files]

    if not weights or size <= 0 or size > len(weights):
        return []

    # Create list of (weight, original_index) tuples
    # Using negative index as secondary key to maintain original order for equal weights
    indexed_weights = [(w, -i) for i, w in enumerate(weights)]
    # Stable sort in descending order by weight
    # If weights are equal, larger (negative) index comes first (i.e., earlier original position)
    indexed_weights = sorted(indexed_weights, reverse=True)

    # Extract original indices (negate back to positive)
    indexed_weights = [(w, -i) for w, i in indexed_weights]

    # Initialize partitions and their sums
    partitions = [[] for _ in range(size)]
    sums = [0.0] * size

    # Greedy approach: assign each weight to partition with smallest current sum
    for weight, idx in indexed_weights:
        # Find partition with minimum sum
        min_sum_idx = sums.index(min(sums))
        partitions[min_sum_idx].append(idx)
        sums[min_sum_idx] += weight

    # Return the files corresponding to the indices in the specified rank's partition
    indices = partitions[rank]
    return [files[i] for i in indices]

284
285
286
287
288
289

if __name__ == "__main__":
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "--timeout-per-file",
        type=int,
290
        default=1800,
291
292
293
294
295
296
297
298
299
        help="The time limit for running one file in seconds.",
    )
    arg_parser.add_argument(
        "--suite",
        type=str,
        default=list(suites.keys())[0],
        choices=list(suites.keys()) + ["all"],
        help="The suite to run",
    )
300
301
302
303
304
305
306
307
308
309
310
311
    arg_parser.add_argument(
        "--range-begin",
        type=int,
        default=0,
        help="The begin index of the range of the files to run.",
    )
    arg_parser.add_argument(
        "--range-end",
        type=int,
        default=None,
        help="The end index of the range of the files to run.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
312
313
314
315
316
317
318
319
320
321
    arg_parser.add_argument(
        "--auto-partition-id",
        type=int,
        help="Use auto load balancing. The part id.",
    )
    arg_parser.add_argument(
        "--auto-partition-size",
        type=int,
        help="Use auto load balancing. The number of parts.",
    )
322
    args = arg_parser.parse_args()
Lianmin Zheng's avatar
Lianmin Zheng committed
323
    print(f"{args=}")
324
325
326
327
328
329

    if args.suite == "all":
        files = glob.glob("**/test_*.py", recursive=True)
    else:
        files = suites[args.suite]

Lianmin Zheng's avatar
Lianmin Zheng committed
330
331
332
333
    if args.auto_partition_size:
        files = auto_partition(files, args.auto_partition_id, args.auto_partition_size)
    else:
        files = files[args.range_begin : args.range_end]
334

Lianmin Zheng's avatar
Lianmin Zheng committed
335
    print("The running tests are ", [f.name for f in files])
336

337
338
    exit_code = run_unittest_files(files, args.timeout_per_file)
    exit(exit_code)