run_suite.py 13.2 KB
Newer Older
1
2
import argparse
import glob
Lianmin Zheng's avatar
Lianmin Zheng committed
3
from dataclasses import dataclass
4
5
6

from sglang.test.test_utils import run_unittest_files

Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10
11
12
13

@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


14
suites = {
15
    "per-commit": [
16
        TestFile("models/lora/test_lora.py", 200),
17
        TestFile("models/lora/test_lora_backend.py", 99),
18
        TestFile("models/lora/test_multi_lora_backend.py", 60),
19
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
20
        TestFile("models/lora/test_lora_update.py", 400),
21
        TestFile("models/test_embedding_models.py", 73),
22
        # TestFile("models/test_clip_models.py", 52),
woodx's avatar
woodx committed
23
24
        TestFile("models/test_encoder_embedding_models.py", 100),
        TestFile("models/test_cross_encoder_models.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
25
        TestFile("models/test_compressed_tensors_models.py", 42),
Lianmin Zheng's avatar
Lianmin Zheng committed
26
        TestFile("models/test_generation_models.py", 103),
27
        # TestFile("models/test_gme_qwen_models.py", 45),
Lianmin Zheng's avatar
Lianmin Zheng committed
28
        # TestFile("models/test_grok_models.py", 60),  # Disabled due to illegal memory access
Lianmin Zheng's avatar
Lianmin Zheng committed
29
        TestFile("models/test_qwen_models.py", 82),
Lianmin Zheng's avatar
Lianmin Zheng committed
30
        TestFile("models/test_reward_models.py", 132),
31
        TestFile("models/test_vlm_models.py", 437),
32
        TestFile("models/test_transformers_models.py", 320),
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/features/test_cache_report.py", 100),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
52
        TestFile("test_abort.py", 51),
53
        TestFile("test_block_int8.py", 22),
Lianmin Zheng's avatar
Lianmin Zheng committed
54
        TestFile("test_create_kvindices.py", 2),
55
        TestFile("test_chunked_prefill.py", 313),
56
        TestFile("test_eagle_infer_a.py", 370),
57
        TestFile("test_eagle_infer_b.py", 700),
Lianmin Zheng's avatar
Lianmin Zheng committed
58
59
        TestFile("test_ebnf_constrained.py", 108),
        TestFile("test_eval_fp8_accuracy.py", 303),
60
        TestFile("test_fa3.py", 376),
61
        # TestFile("test_flashmla.py", 352),
62
        TestFile("test_fp8_kernel.py", 8),
63
        TestFile("test_function_call_parser.py", 10),
Lianmin Zheng's avatar
Lianmin Zheng committed
64
65
        TestFile("test_fused_moe.py", 30),
        TestFile("test_hicache.py", 116),
66
        TestFile("test_hicache_mla.py", 127),
67
        TestFile("test_hidden_states.py", 55),
68
        TestFile("test_int8_kernel.py", 8),
Lianmin Zheng's avatar
Lianmin Zheng committed
69
        TestFile("test_input_embeddings.py", 38),
70
        TestFile("test_io_struct.py", 8),
71
        TestFile("test_jinja_template_utils.py", 1),
72
        TestFile("test_metrics.py", 32),
73
        TestFile("test_mla.py", 167),
74
        TestFile("test_mla_deepseek_v3.py", 700),
75
76
77
        TestFile("test_mla_int8_deepseek_v3.py", 429),
        TestFile("test_mla_flashinfer.py", 302),
        TestFile("test_mla_fp8.py", 93),
Lianmin Zheng's avatar
Lianmin Zheng committed
78
        TestFile("test_no_chunked_prefill.py", 108),
79
        TestFile("test_no_overlap_scheduler.py", 234),
Lianmin Zheng's avatar
Lianmin Zheng committed
80
        TestFile("test_penalty.py", 41),
Lianmin Zheng's avatar
Lianmin Zheng committed
81
        TestFile("test_page_size.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
82
        TestFile("test_pytorch_sampling_backend.py", 66),
83
        TestFile("test_radix_attention.py", 105),
Lianmin Zheng's avatar
Lianmin Zheng committed
84
85
86
        TestFile("test_regex_constrained.py", 64),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
87
        TestFile("test_skip_tokenizer_init.py", 117),
88
        TestFile("test_srt_engine.py", 261),
Lianmin Zheng's avatar
Lianmin Zheng committed
89
        TestFile("test_srt_endpoint.py", 130),
90
        TestFile("test_start_profile.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
91
        TestFile("test_torch_compile.py", 76),
Lianmin Zheng's avatar
Lianmin Zheng committed
92
        TestFile("test_torch_compile_moe.py", 172),
93
        TestFile("test_torch_native_attention_backend.py", 123),
Lianmin Zheng's avatar
Lianmin Zheng committed
94
95
        TestFile("test_torchao.py", 70),
        TestFile("test_triton_attention_kernels.py", 4),
96
        TestFile("test_triton_attention_backend.py", 150),
Lianmin Zheng's avatar
Lianmin Zheng committed
97
        TestFile("test_triton_moe_channel_fp8_kernel.py", 25),
98
        TestFile("test_triton_sliding_window.py", 250),
Lianmin Zheng's avatar
Lianmin Zheng committed
99
100
101
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_update_weights_from_tensor.py", 48),
        TestFile("test_vertex_endpoint.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
102
        TestFile("test_vision_chunked_prefill.py", 175),
103
        TestFile("test_vlm_input_format.py", 300),
104
105
        TestFile("test_vision_openai_server_a.py", 584),
        TestFile("test_vision_openai_server_b.py", 556),
Lianmin Zheng's avatar
Lianmin Zheng committed
106
        TestFile("test_w8a8_quantization.py", 46),
107
        TestFile("test_reasoning_parser.py", 5),
108
    ],
109
    "per-commit-amd": [
110
111
112
        TestFile("models/lora/test_lora_backend.py", 99),
        TestFile("models/lora/test_multi_lora_backend.py", 60),
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
113
114
115
116
        TestFile("test_mla.py", 242),
        TestFile("test_mla_deepseek_v3.py", 221),
        TestFile("test_torch_compile.py", 76),
        TestFile("test_torch_compile_moe.py", 172),
117
118
        TestFile("models/test_qwen_models.py", 82),
        TestFile("models/test_reward_models.py", 132),
119
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
120
121
122
123
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
124
125
126
127
        TestFile("test_abort.py", 51),
        TestFile("test_block_int8.py", 22),
        TestFile("test_create_kvindices.py", 2),
        TestFile("test_chunked_prefill.py", 313),
128
129
        TestFile("test_eval_fp8_accuracy.py", 303),
        TestFile("test_function_call_parser.py", 10),
130
        TestFile("test_fused_moe.py", 30),
131
132
133
        TestFile("test_input_embeddings.py", 38),
        TestFile("test_metrics.py", 32),
        TestFile("test_no_chunked_prefill.py", 108),
134
        # TestFile("test_no_overlap_scheduler.py", 234), # Disabled temporarily and track in #7703
135
136
137
138
139
140
141
142
143
144
145
        TestFile("test_penalty.py", 41),
        TestFile("test_page_size.py", 60),
        TestFile("test_pytorch_sampling_backend.py", 66),
        TestFile("test_radix_attention.py", 105),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
        TestFile("test_skip_tokenizer_init.py", 117),
        TestFile("test_torch_native_attention_backend.py", 123),
        TestFile("test_triton_attention_backend.py", 150),
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_vertex_endpoint.py", 31),
146
        # TestFile("test_vision_chunked_prefill.py", 175), # Disabled temporarily and track in #7701
147
        TestFile("test_reasoning_parser.py", 5),
148
        TestFile("test_rope_rocm.py", 3),
149
    ],
150
151
152
    "per-commit-npu": [
        TestFile("test_ascend_attention_backend.py", 400),
    ],
153
    "per-commit-2-gpu": [
154
155
156
157
158
159
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
        TestFile("test_dp_attention.py", 137),
        TestFile("test_mla_tp.py", 170),
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
160
        TestFile("test_release_memory_occupation.py", 44),
161
    ],
162
    "per-commit-2-gpu-amd": [
163
164
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
165
        TestFile("test_mla_tp.py", 170),
166
167
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
168
    ],
169
170
171
    "per-commit-4-gpu": [
        TestFile("test_local_attn.py", 250),
        TestFile("test_pp_single_node.py", 150),
172
        TestFile("test_multi_instance_release_memory_occupation.py", 64),
173
    ],
174
175
176
    "per-commit-4-gpu-deepep": [
        TestFile("test_deepep_small.py", 531),
    ],
177
178
179
    "per-commit-4-gpu-amd": [
        TestFile("test_pp_single_node.py", 150),
    ],
Stefan He's avatar
Stefan He committed
180
    "per-commit-8-gpu": [
181
182
        # Disabled because it hangs on the CI.
        # TestFile("test_moe_ep.py", 181),
183
184
185
        TestFile("test_disaggregation.py", 270),
        TestFile("test_disaggregation_different_tp.py", 155),
        TestFile("test_full_deepseek_v3.py", 463),
Stefan He's avatar
Stefan He committed
186
    ],
187
188
189
    "per-commit-8-gpu-deepep": [
        TestFile("test_deepep_large.py", 485),
    ],
190
191
192
    "per-commit-8-gpu-amd": [
        TestFile("test_full_deepseek_v3.py", 250),
    ],
193
194
    "per-commit-cpu": [
        TestFile("cpu/test_activation.py"),
195
        TestFile("cpu/test_binding.py"),
196
197
198
        TestFile("cpu/test_decode.py"),
        TestFile("cpu/test_extend.py"),
        TestFile("cpu/test_gemm.py"),
199
        TestFile("cpu/test_mla.py"),
200
201
202
        TestFile("cpu/test_moe.py"),
        TestFile("cpu/test_norm.py"),
        TestFile("cpu/test_qkv_proj_with_rope.py"),
203
        TestFile("cpu/test_rope.py"),
204
        TestFile("cpu/test_shared_expert.py"),
205
        TestFile("cpu/test_topk.py"),
YanbingJiang's avatar
YanbingJiang committed
206
        TestFile("test_intel_amx_attention_backend.py"),
207
    ],
208
    "nightly": [
Lianmin Zheng's avatar
Lianmin Zheng committed
209
        TestFile("test_nightly_gsm8k_eval.py"),
210
    ],
211
212
213
    "nightly-amd": [
        TestFile("test_nightly_gsm8k_eval_amd.py"),
    ],
214
215
    "vllm_dependency_test": [
        TestFile("test_awq.py"),
216
        TestFile("test_bnb.py"),
217
        TestFile("test_gguf.py", 78),
218
        TestFile("test_gptqmodel_dynamic.py", 72),
219
        TestFile("test_vllm_dependency.py"),
220
    ],
221
222
}

Lianmin Zheng's avatar
Lianmin Zheng committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266

def auto_partition(files, rank, size):
    """
    Partition files into size sublists with approximately equal sums of estimated times
    using stable sorting, and return the partition for the specified rank.

    Args:
        files (list): List of file objects with estimated_time attribute
        rank (int): Index of the partition to return (0 to size-1)
        size (int): Number of partitions

    Returns:
        list: List of file objects in the specified rank's partition
    """
    weights = [f.estimated_time for f in files]

    if not weights or size <= 0 or size > len(weights):
        return []

    # Create list of (weight, original_index) tuples
    # Using negative index as secondary key to maintain original order for equal weights
    indexed_weights = [(w, -i) for i, w in enumerate(weights)]
    # Stable sort in descending order by weight
    # If weights are equal, larger (negative) index comes first (i.e., earlier original position)
    indexed_weights = sorted(indexed_weights, reverse=True)

    # Extract original indices (negate back to positive)
    indexed_weights = [(w, -i) for w, i in indexed_weights]

    # Initialize partitions and their sums
    partitions = [[] for _ in range(size)]
    sums = [0.0] * size

    # Greedy approach: assign each weight to partition with smallest current sum
    for weight, idx in indexed_weights:
        # Find partition with minimum sum
        min_sum_idx = sums.index(min(sums))
        partitions[min_sum_idx].append(idx)
        sums[min_sum_idx] += weight

    # Return the files corresponding to the indices in the specified rank's partition
    indices = partitions[rank]
    return [files[i] for i in indices]

267
268
269
270
271
272

if __name__ == "__main__":
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "--timeout-per-file",
        type=int,
273
        default=1800,
274
275
276
277
278
279
280
281
282
        help="The time limit for running one file in seconds.",
    )
    arg_parser.add_argument(
        "--suite",
        type=str,
        default=list(suites.keys())[0],
        choices=list(suites.keys()) + ["all"],
        help="The suite to run",
    )
283
284
285
286
287
288
289
290
291
292
293
294
    arg_parser.add_argument(
        "--range-begin",
        type=int,
        default=0,
        help="The begin index of the range of the files to run.",
    )
    arg_parser.add_argument(
        "--range-end",
        type=int,
        default=None,
        help="The end index of the range of the files to run.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
295
296
297
298
299
300
301
302
303
304
    arg_parser.add_argument(
        "--auto-partition-id",
        type=int,
        help="Use auto load balancing. The part id.",
    )
    arg_parser.add_argument(
        "--auto-partition-size",
        type=int,
        help="Use auto load balancing. The number of parts.",
    )
305
    args = arg_parser.parse_args()
Lianmin Zheng's avatar
Lianmin Zheng committed
306
    print(f"{args=}")
307
308
309
310
311
312

    if args.suite == "all":
        files = glob.glob("**/test_*.py", recursive=True)
    else:
        files = suites[args.suite]

Lianmin Zheng's avatar
Lianmin Zheng committed
313
314
315
316
    if args.auto_partition_size:
        files = auto_partition(files, args.auto_partition_id, args.auto_partition_size)
    else:
        files = files[args.range_begin : args.range_end]
317

Lianmin Zheng's avatar
Lianmin Zheng committed
318
    print("The running tests are ", [f.name for f in files])
319

320
321
    exit_code = run_unittest_files(files, args.timeout_per_file)
    exit(exit_code)