run_suite.py 13.2 KB
Newer Older
1
2
import argparse
import glob
Lianmin Zheng's avatar
Lianmin Zheng committed
3
from dataclasses import dataclass
4
5
6

from sglang.test.test_utils import run_unittest_files

Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10
11
12
13

@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


14
suites = {
15
    "per-commit": [
16
        TestFile("models/lora/test_lora.py", 200),
17
        TestFile("models/lora/test_lora_backend.py", 99),
18
        TestFile("models/lora/test_multi_lora_backend.py", 60),
19
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
20
        TestFile("models/lora/test_lora_update.py", 400),
21
        TestFile("models/test_embedding_models.py", 73),
22
        # TestFile("models/test_clip_models.py", 52),
woodx's avatar
woodx committed
23
24
        TestFile("models/test_encoder_embedding_models.py", 100),
        TestFile("models/test_cross_encoder_models.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
25
        TestFile("models/test_compressed_tensors_models.py", 42),
Lianmin Zheng's avatar
Lianmin Zheng committed
26
        TestFile("models/test_generation_models.py", 103),
27
        # TestFile("models/test_gme_qwen_models.py", 45),
Lianmin Zheng's avatar
Lianmin Zheng committed
28
        # TestFile("models/test_grok_models.py", 60),  # Disabled due to illegal memory access
Lianmin Zheng's avatar
Lianmin Zheng committed
29
        TestFile("models/test_qwen_models.py", 82),
Lianmin Zheng's avatar
Lianmin Zheng committed
30
        TestFile("models/test_reward_models.py", 132),
31
        TestFile("models/test_vlm_models.py", 437),
32
        TestFile("models/test_transformers_models.py", 320),
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/features/test_cache_report.py", 100),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
52
        TestFile("test_abort.py", 51),
53
        TestFile("test_block_int8.py", 22),
Lianmin Zheng's avatar
Lianmin Zheng committed
54
        TestFile("test_create_kvindices.py", 2),
55
        TestFile("test_chunked_prefill.py", 313),
56
57
        TestFile("test_eagle_infer_a.py", 370),
        TestFile("test_eagle_infer_b.py", 270),
Lianmin Zheng's avatar
Lianmin Zheng committed
58
59
        TestFile("test_ebnf_constrained.py", 108),
        TestFile("test_eval_fp8_accuracy.py", 303),
60
        TestFile("test_fa3.py", 376),
61
        # TestFile("test_flashmla.py", 352),
62
        TestFile("test_fp8_kernel.py", 8),
63
        TestFile("test_function_call_parser.py", 10),
Lianmin Zheng's avatar
Lianmin Zheng committed
64
65
        TestFile("test_fused_moe.py", 30),
        TestFile("test_hicache.py", 116),
66
        TestFile("test_hicache_mla.py", 127),
67
        TestFile("test_hidden_states.py", 55),
68
        TestFile("test_int8_kernel.py", 8),
Lianmin Zheng's avatar
Lianmin Zheng committed
69
        TestFile("test_input_embeddings.py", 38),
70
        TestFile("test_jinja_template_utils.py", 1),
71
        TestFile("test_metrics.py", 32),
72
73
74
75
76
        TestFile("test_mla.py", 167),
        TestFile("test_mla_deepseek_v3.py", 342),
        TestFile("test_mla_int8_deepseek_v3.py", 429),
        TestFile("test_mla_flashinfer.py", 302),
        TestFile("test_mla_fp8.py", 93),
Lianmin Zheng's avatar
Lianmin Zheng committed
77
        TestFile("test_no_chunked_prefill.py", 108),
78
        TestFile("test_no_overlap_scheduler.py", 234),
Lianmin Zheng's avatar
Lianmin Zheng committed
79
        TestFile("test_penalty.py", 41),
Lianmin Zheng's avatar
Lianmin Zheng committed
80
        TestFile("test_page_size.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
81
        TestFile("test_pytorch_sampling_backend.py", 66),
82
        TestFile("test_radix_attention.py", 105),
Lianmin Zheng's avatar
Lianmin Zheng committed
83
84
85
        TestFile("test_regex_constrained.py", 64),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
86
        TestFile("test_skip_tokenizer_init.py", 117),
87
        TestFile("test_srt_engine.py", 261),
Lianmin Zheng's avatar
Lianmin Zheng committed
88
        TestFile("test_srt_endpoint.py", 130),
Lianmin Zheng's avatar
Lianmin Zheng committed
89
        TestFile("test_torch_compile.py", 76),
Lianmin Zheng's avatar
Lianmin Zheng committed
90
        TestFile("test_torch_compile_moe.py", 172),
91
        TestFile("test_torch_native_attention_backend.py", 123),
Lianmin Zheng's avatar
Lianmin Zheng committed
92
93
        TestFile("test_torchao.py", 70),
        TestFile("test_triton_attention_kernels.py", 4),
94
        TestFile("test_triton_attention_backend.py", 150),
Lianmin Zheng's avatar
Lianmin Zheng committed
95
        TestFile("test_triton_moe_channel_fp8_kernel.py", 25),
96
        TestFile("test_triton_sliding_window.py", 250),
Lianmin Zheng's avatar
Lianmin Zheng committed
97
98
99
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_update_weights_from_tensor.py", 48),
        TestFile("test_vertex_endpoint.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
100
        TestFile("test_vision_chunked_prefill.py", 175),
101
        TestFile("test_vlm_input_format.py", 300),
102
103
        TestFile("test_vision_openai_server_a.py", 584),
        TestFile("test_vision_openai_server_b.py", 556),
Lianmin Zheng's avatar
Lianmin Zheng committed
104
        TestFile("test_w8a8_quantization.py", 46),
105
        TestFile("test_reasoning_parser.py", 5),
106
    ],
107
    "per-commit-amd": [
108
109
110
        TestFile("models/lora/test_lora_backend.py", 99),
        TestFile("models/lora/test_multi_lora_backend.py", 60),
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
111
112
113
114
        TestFile("test_mla.py", 242),
        TestFile("test_mla_deepseek_v3.py", 221),
        TestFile("test_torch_compile.py", 76),
        TestFile("test_torch_compile_moe.py", 172),
115
116
        TestFile("models/test_qwen_models.py", 82),
        TestFile("models/test_reward_models.py", 132),
117
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
118
119
120
121
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
122
123
124
125
        TestFile("test_abort.py", 51),
        TestFile("test_block_int8.py", 22),
        TestFile("test_create_kvindices.py", 2),
        TestFile("test_chunked_prefill.py", 313),
126
127
128
129
130
        TestFile("test_eval_fp8_accuracy.py", 303),
        TestFile("test_function_call_parser.py", 10),
        TestFile("test_input_embeddings.py", 38),
        TestFile("test_metrics.py", 32),
        TestFile("test_no_chunked_prefill.py", 108),
131
        # TestFile("test_no_overlap_scheduler.py", 234), # Disabled temporarily and track in #7703
132
133
134
135
136
137
138
139
140
141
142
        TestFile("test_penalty.py", 41),
        TestFile("test_page_size.py", 60),
        TestFile("test_pytorch_sampling_backend.py", 66),
        TestFile("test_radix_attention.py", 105),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
        TestFile("test_skip_tokenizer_init.py", 117),
        TestFile("test_torch_native_attention_backend.py", 123),
        TestFile("test_triton_attention_backend.py", 150),
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_vertex_endpoint.py", 31),
143
        # TestFile("test_vision_chunked_prefill.py", 175), # Disabled temporarily and track in #7701
144
        TestFile("test_reasoning_parser.py", 5),
145
    ],
146
147
148
    "per-commit-npu": [
        TestFile("test_ascend_attention_backend.py", 400),
    ],
149
    "per-commit-2-gpu": [
150
151
152
153
154
155
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
        TestFile("test_dp_attention.py", 137),
        TestFile("test_mla_tp.py", 170),
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
156
        TestFile("test_release_memory_occupation.py", 44),
157
    ],
158
    "per-commit-2-gpu-amd": [
159
160
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
161
        TestFile("test_mla_tp.py", 170),
162
163
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
164
    ],
165
166
167
    "per-commit-4-gpu": [
        TestFile("test_local_attn.py", 250),
        TestFile("test_pp_single_node.py", 150),
168
        TestFile("test_multi_instance_release_memory_occupation.py", 64),
169
    ],
170
171
172
    "per-commit-4-gpu-amd": [
        TestFile("test_pp_single_node.py", 150),
    ],
Stefan He's avatar
Stefan He committed
173
    "per-commit-8-gpu": [
174
175
176
177
178
        # Disabled deepep tests temporarily because it takes too much time.
        # TODO: re-enable them after reducing the test time with compilation cache and smaller models.
        # TestFile("test_deepep_intranode.py", 50),
        # TestFile("test_deepep_low_latency.py", 50),
        # TestFile("test_moe_deepep_eval_accuracy_large.py", 250),
179
180
        # Disabled because it hangs on the CI.
        # TestFile("test_moe_ep.py", 181),
181
182
183
        TestFile("test_disaggregation.py", 270),
        TestFile("test_disaggregation_different_tp.py", 155),
        TestFile("test_full_deepseek_v3.py", 463),
Stefan He's avatar
Stefan He committed
184
    ],
185
186
187
    "per-commit-8-gpu-amd": [
        TestFile("test_full_deepseek_v3.py", 250),
    ],
188
189
    "per-commit-cpu": [
        TestFile("cpu/test_activation.py"),
190
        TestFile("cpu/test_binding.py"),
191
192
193
        TestFile("cpu/test_decode.py"),
        TestFile("cpu/test_extend.py"),
        TestFile("cpu/test_gemm.py"),
194
        TestFile("cpu/test_mla.py"),
195
196
197
        TestFile("cpu/test_moe.py"),
        TestFile("cpu/test_norm.py"),
        TestFile("cpu/test_qkv_proj_with_rope.py"),
198
        TestFile("cpu/test_rope.py"),
199
        TestFile("cpu/test_shared_expert.py"),
200
        TestFile("cpu/test_topk.py"),
YanbingJiang's avatar
YanbingJiang committed
201
        TestFile("test_intel_amx_attention_backend.py"),
202
    ],
203
    "nightly": [
Lianmin Zheng's avatar
Lianmin Zheng committed
204
        TestFile("test_nightly_gsm8k_eval.py"),
205
    ],
206
207
208
    "nightly-amd": [
        TestFile("test_nightly_gsm8k_eval_amd.py"),
    ],
209
210
    "vllm_dependency_test": [
        TestFile("test_awq.py"),
211
        TestFile("test_bnb.py"),
212
        TestFile("test_gguf.py", 78),
213
        TestFile("test_gptqmodel_dynamic.py", 72),
214
        TestFile("test_vllm_dependency.py"),
215
    ],
216
217
}

Lianmin Zheng's avatar
Lianmin Zheng committed
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261

def auto_partition(files, rank, size):
    """
    Partition files into size sublists with approximately equal sums of estimated times
    using stable sorting, and return the partition for the specified rank.

    Args:
        files (list): List of file objects with estimated_time attribute
        rank (int): Index of the partition to return (0 to size-1)
        size (int): Number of partitions

    Returns:
        list: List of file objects in the specified rank's partition
    """
    weights = [f.estimated_time for f in files]

    if not weights or size <= 0 or size > len(weights):
        return []

    # Create list of (weight, original_index) tuples
    # Using negative index as secondary key to maintain original order for equal weights
    indexed_weights = [(w, -i) for i, w in enumerate(weights)]
    # Stable sort in descending order by weight
    # If weights are equal, larger (negative) index comes first (i.e., earlier original position)
    indexed_weights = sorted(indexed_weights, reverse=True)

    # Extract original indices (negate back to positive)
    indexed_weights = [(w, -i) for w, i in indexed_weights]

    # Initialize partitions and their sums
    partitions = [[] for _ in range(size)]
    sums = [0.0] * size

    # Greedy approach: assign each weight to partition with smallest current sum
    for weight, idx in indexed_weights:
        # Find partition with minimum sum
        min_sum_idx = sums.index(min(sums))
        partitions[min_sum_idx].append(idx)
        sums[min_sum_idx] += weight

    # Return the files corresponding to the indices in the specified rank's partition
    indices = partitions[rank]
    return [files[i] for i in indices]

262
263
264
265
266
267

if __name__ == "__main__":
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "--timeout-per-file",
        type=int,
268
        default=1800,
269
270
271
272
273
274
275
276
277
        help="The time limit for running one file in seconds.",
    )
    arg_parser.add_argument(
        "--suite",
        type=str,
        default=list(suites.keys())[0],
        choices=list(suites.keys()) + ["all"],
        help="The suite to run",
    )
278
279
280
281
282
283
284
285
286
287
288
289
    arg_parser.add_argument(
        "--range-begin",
        type=int,
        default=0,
        help="The begin index of the range of the files to run.",
    )
    arg_parser.add_argument(
        "--range-end",
        type=int,
        default=None,
        help="The end index of the range of the files to run.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
290
291
292
293
294
295
296
297
298
299
    arg_parser.add_argument(
        "--auto-partition-id",
        type=int,
        help="Use auto load balancing. The part id.",
    )
    arg_parser.add_argument(
        "--auto-partition-size",
        type=int,
        help="Use auto load balancing. The number of parts.",
    )
300
    args = arg_parser.parse_args()
Lianmin Zheng's avatar
Lianmin Zheng committed
301
    print(f"{args=}")
302
303
304
305
306
307

    if args.suite == "all":
        files = glob.glob("**/test_*.py", recursive=True)
    else:
        files = suites[args.suite]

Lianmin Zheng's avatar
Lianmin Zheng committed
308
309
310
311
    if args.auto_partition_size:
        files = auto_partition(files, args.auto_partition_id, args.auto_partition_size)
    else:
        files = files[args.range_begin : args.range_end]
312

Lianmin Zheng's avatar
Lianmin Zheng committed
313
    print("The running tests are ", [f.name for f in files])
314

315
316
    exit_code = run_unittest_files(files, args.timeout_per_file)
    exit(exit_code)