"llm/ext_server/utils.hpp" did not exist on "34d00f90b1cf9db0c59c953b3128f58d25073b36"
run_suite.py 13.6 KB
Newer Older
1
2
import argparse
import glob
Lianmin Zheng's avatar
Lianmin Zheng committed
3
from dataclasses import dataclass
4
5
6

from sglang.test.test_utils import run_unittest_files

Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10
11
12
13

@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


14
suites = {
15
    "per-commit": [
16
        TestFile("models/lora/test_lora.py", 200),
17
        TestFile("models/lora/test_lora_eviction.py", 200),
18
        TestFile("models/lora/test_lora_backend.py", 99),
19
        TestFile("models/lora/test_multi_lora_backend.py", 60),
20
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
21
        TestFile("models/lora/test_lora_update.py", 800),
22
        TestFile("models/lora/test_lora_qwen3.py", 97),
23
        TestFile("models/test_embedding_models.py", 73),
24
        # TestFile("models/test_clip_models.py", 52),
woodx's avatar
woodx committed
25
26
        TestFile("models/test_encoder_embedding_models.py", 100),
        TestFile("models/test_cross_encoder_models.py", 100),
Lianmin Zheng's avatar
Lianmin Zheng committed
27
        TestFile("models/test_compressed_tensors_models.py", 42),
Lianmin Zheng's avatar
Lianmin Zheng committed
28
        TestFile("models/test_generation_models.py", 103),
29
        # TestFile("models/test_gme_qwen_models.py", 45),
Lianmin Zheng's avatar
Lianmin Zheng committed
30
        # TestFile("models/test_grok_models.py", 60),  # Disabled due to illegal memory access
Lianmin Zheng's avatar
Lianmin Zheng committed
31
        TestFile("models/test_qwen_models.py", 82),
Lianmin Zheng's avatar
Lianmin Zheng committed
32
        TestFile("models/test_reward_models.py", 132),
33
        TestFile("models/test_vlm_models.py", 437),
34
        TestFile("models/test_transformers_models.py", 320),
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
        TestFile("openai_server/basic/test_protocol.py", 10),
        TestFile("openai_server/basic/test_serving_chat.py", 10),
        TestFile("openai_server/basic/test_serving_completions.py", 10),
        TestFile("openai_server/basic/test_serving_embedding.py", 10),
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
        TestFile("openai_server/basic/test_openai_server.py", 149),
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_json_constrained.py", 98),
        TestFile("openai_server/features/test_json_mode.py", 90),
        TestFile("openai_server/features/test_openai_server_ebnf.py", 95),
        TestFile("openai_server/features/test_openai_server_hidden_states.py", 240),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/function_call/test_openai_function_calling.py", 60),
        TestFile("openai_server/function_call/test_tool_choice.py", 226),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_matched_stop.py", 60),
        TestFile("openai_server/validation/test_openai_server_ignore_eos.py", 85),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
Lianmin Zheng's avatar
Lianmin Zheng committed
53
        TestFile("test_abort.py", 51),
54
        TestFile("test_block_int8.py", 22),
Lianmin Zheng's avatar
Lianmin Zheng committed
55
        TestFile("test_create_kvindices.py", 2),
56
        TestFile("test_chunked_prefill.py", 313),
57
        TestFile("test_eagle_infer_a.py", 370),
58
        TestFile("test_eagle_infer_b.py", 700),
Lianmin Zheng's avatar
Lianmin Zheng committed
59
60
        TestFile("test_ebnf_constrained.py", 108),
        TestFile("test_eval_fp8_accuracy.py", 303),
61
        TestFile("test_fa3.py", 376),
62
        # TestFile("test_flashmla.py", 352),
63
        TestFile("test_fp8_kernel.py", 8),
64
        TestFile("test_function_call_parser.py", 10),
Lianmin Zheng's avatar
Lianmin Zheng committed
65
66
        TestFile("test_fused_moe.py", 30),
        TestFile("test_hicache.py", 116),
67
        TestFile("test_hicache_mla.py", 127),
68
        TestFile("test_hicache_storage.py", 127),
69
        TestFile("test_hidden_states.py", 55),
70
        TestFile("test_int8_kernel.py", 8),
Lianmin Zheng's avatar
Lianmin Zheng committed
71
        TestFile("test_input_embeddings.py", 38),
72
        TestFile("test_io_struct.py", 8),
73
        TestFile("test_jinja_template_utils.py", 1),
74
        TestFile("test_metrics.py", 32),
75
        TestFile("test_mla.py", 167),
76
        TestFile("test_mla_deepseek_v3.py", 700),
77
78
79
        TestFile("test_mla_int8_deepseek_v3.py", 429),
        TestFile("test_mla_flashinfer.py", 302),
        TestFile("test_mla_fp8.py", 93),
Lianmin Zheng's avatar
Lianmin Zheng committed
80
        TestFile("test_no_chunked_prefill.py", 108),
81
        TestFile("test_no_overlap_scheduler.py", 234),
Lianmin Zheng's avatar
Lianmin Zheng committed
82
        TestFile("test_penalty.py", 41),
Lianmin Zheng's avatar
Lianmin Zheng committed
83
        TestFile("test_page_size.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
84
        TestFile("test_pytorch_sampling_backend.py", 66),
85
        TestFile("test_radix_attention.py", 105),
Lianmin Zheng's avatar
Lianmin Zheng committed
86
87
        TestFile("test_regex_constrained.py", 64),
        TestFile("test_retract_decode.py", 54),
88
        TestFile("test_request_queue_validation.py", 30),
Lianmin Zheng's avatar
Lianmin Zheng committed
89
        TestFile("test_server_args.py", 1),
90
        TestFile("test_skip_tokenizer_init.py", 117),
91
        TestFile("test_srt_engine.py", 261),
Lianmin Zheng's avatar
Lianmin Zheng committed
92
        TestFile("test_srt_endpoint.py", 130),
93
        TestFile("test_start_profile.py", 60),
Lianmin Zheng's avatar
Lianmin Zheng committed
94
        TestFile("test_torch_compile.py", 76),
Lianmin Zheng's avatar
Lianmin Zheng committed
95
        TestFile("test_torch_compile_moe.py", 172),
96
        TestFile("test_torch_native_attention_backend.py", 123),
Lianmin Zheng's avatar
Lianmin Zheng committed
97
98
        TestFile("test_torchao.py", 70),
        TestFile("test_triton_attention_kernels.py", 4),
99
        TestFile("test_triton_attention_backend.py", 150),
Lianmin Zheng's avatar
Lianmin Zheng committed
100
        TestFile("test_triton_moe_channel_fp8_kernel.py", 25),
101
        TestFile("test_triton_sliding_window.py", 250),
Lianmin Zheng's avatar
Lianmin Zheng committed
102
103
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_update_weights_from_tensor.py", 48),
104
        TestFile("test_utils_update_weights.py", 48),
Lianmin Zheng's avatar
Lianmin Zheng committed
105
        TestFile("test_vision_chunked_prefill.py", 175),
106
        TestFile("test_vlm_input_format.py", 300),
107
        TestFile("test_vision_openai_server_a.py", 584),
108
        TestFile("test_vision_openai_server_b.py", 620),
Lianmin Zheng's avatar
Lianmin Zheng committed
109
        TestFile("test_w8a8_quantization.py", 46),
110
        TestFile("test_reasoning_parser.py", 5),
111
        TestFile("test_hybrid_attn_backend.py", 100),
112
    ],
113
    "per-commit-amd": [
114
115
116
        TestFile("models/lora/test_lora_backend.py", 99),
        TestFile("models/lora/test_multi_lora_backend.py", 60),
        TestFile("models/lora/test_lora_cuda_graph.py", 250),
117
118
119
120
        TestFile("test_mla.py", 242),
        TestFile("test_mla_deepseek_v3.py", 221),
        TestFile("test_torch_compile.py", 76),
        TestFile("test_torch_compile_moe.py", 172),
121
122
        TestFile("models/test_qwen_models.py", 82),
        TestFile("models/test_reward_models.py", 132),
123
        TestFile("openai_server/basic/test_openai_embedding.py", 141),
124
125
126
127
        TestFile("openai_server/features/test_enable_thinking.py", 70),
        TestFile("openai_server/features/test_reasoning_content.py", 89),
        TestFile("openai_server/validation/test_large_max_new_tokens.py", 41),
        TestFile("openai_server/validation/test_request_length_validation.py", 31),
128
129
130
131
        TestFile("test_abort.py", 51),
        TestFile("test_block_int8.py", 22),
        TestFile("test_create_kvindices.py", 2),
        TestFile("test_chunked_prefill.py", 313),
132
133
        TestFile("test_eval_fp8_accuracy.py", 303),
        TestFile("test_function_call_parser.py", 10),
134
        TestFile("test_fused_moe.py", 30),
135
136
137
        TestFile("test_input_embeddings.py", 38),
        TestFile("test_metrics.py", 32),
        TestFile("test_no_chunked_prefill.py", 108),
138
        # TestFile("test_no_overlap_scheduler.py", 234), # Disabled temporarily and track in #7703
139
140
141
142
143
144
145
146
147
148
149
        TestFile("test_penalty.py", 41),
        TestFile("test_page_size.py", 60),
        TestFile("test_pytorch_sampling_backend.py", 66),
        TestFile("test_radix_attention.py", 105),
        TestFile("test_retract_decode.py", 54),
        TestFile("test_server_args.py", 1),
        TestFile("test_skip_tokenizer_init.py", 117),
        TestFile("test_torch_native_attention_backend.py", 123),
        TestFile("test_triton_attention_backend.py", 150),
        TestFile("test_update_weights_from_disk.py", 114),
        TestFile("test_vertex_endpoint.py", 31),
150
        # TestFile("test_vision_chunked_prefill.py", 175), # Disabled temporarily and track in #7701
151
        TestFile("test_reasoning_parser.py", 5),
152
        TestFile("test_rope_rocm.py", 3),
153
        TestFile("test_awq_dequant.py", 2),
154
    ],
155
156
157
158
159
160
161
162
    "per-commit-1-ascend-npu": [
        TestFile("test_ascend_tp1_bf16.py", 400),
    ],
    "per-commit-2-ascend-npu": [
        TestFile("test_ascend_tp2_bf16.py", 400),
    ],
    "per-commit-4-ascend-npu": [
        TestFile("test_ascend_mla_w8a8int8.py", 400),
163
    ],
164
    "per-commit-2-gpu": [
165
166
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
167
        TestFile("test_dp_attention.py", 277),
168
169
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
170
        TestFile("test_release_memory_occupation.py", 127),
171
    ],
172
    "per-commit-2-gpu-amd": [
173
174
175
176
        TestFile("models/lora/test_lora_tp.py", 116),
        TestFile("test_data_parallelism.py", 73),
        TestFile("test_patch_torch.py", 19),
        TestFile("test_update_weights_from_distributed.py", 103),
177
    ],
178
179
    "per-commit-4-gpu": [
        TestFile("test_local_attn.py", 250),
180
        TestFile("test_pp_single_node.py", 372),
181
        TestFile("test_multi_instance_release_memory_occupation.py", 64),
182
    ],
183
184
185
    "per-commit-4-gpu-deepep": [
        TestFile("test_deepep_small.py", 531),
    ],
186
187
188
    "per-commit-4-gpu-amd": [
        TestFile("test_pp_single_node.py", 150),
    ],
Stefan He's avatar
Stefan He committed
189
    "per-commit-8-gpu": [
190
191
        # Disabled because it hangs on the CI.
        # TestFile("test_moe_ep.py", 181),
192
        TestFile("test_disaggregation.py", 499),
193
        TestFile("test_disaggregation_different_tp.py", 155),
194
        TestFile("test_full_deepseek_v3.py", 333),
Stefan He's avatar
Stefan He committed
195
    ],
196
    "per-commit-8-gpu-deepep": [
197
        TestFile("test_deepep_large.py", 338),
198
    ],
199
200
201
    "per-commit-8-gpu-amd": [
        TestFile("test_full_deepseek_v3.py", 250),
    ],
fzyzcjy's avatar
fzyzcjy committed
202
203
204
    "per-commit-8-gpu-b200": [
        # add more here
    ],
205
206
    "per-commit-cpu": [
        TestFile("cpu/test_activation.py"),
207
        TestFile("cpu/test_binding.py"),
208
209
210
        TestFile("cpu/test_decode.py"),
        TestFile("cpu/test_extend.py"),
        TestFile("cpu/test_gemm.py"),
211
        TestFile("cpu/test_mla.py"),
212
213
214
        TestFile("cpu/test_moe.py"),
        TestFile("cpu/test_norm.py"),
        TestFile("cpu/test_qkv_proj_with_rope.py"),
215
        TestFile("cpu/test_rope.py"),
216
        TestFile("cpu/test_shared_expert.py"),
217
        TestFile("cpu/test_topk.py"),
YanbingJiang's avatar
YanbingJiang committed
218
        TestFile("test_intel_amx_attention_backend.py"),
219
    ],
220
    "nightly": [
Lianmin Zheng's avatar
Lianmin Zheng committed
221
        TestFile("test_nightly_gsm8k_eval.py"),
222
    ],
223
224
225
    "nightly-amd": [
        TestFile("test_nightly_gsm8k_eval_amd.py"),
    ],
226
    "vllm_dependency_test": [
227
228
229
230
231
        TestFile("test_awq.py", 163),
        TestFile("test_bnb.py", 5),
        TestFile("test_gguf.py", 96),
        TestFile("test_gptqmodel_dynamic.py", 102),
        TestFile("test_vllm_dependency.py", 185),
232
    ],
233
234
}

Lianmin Zheng's avatar
Lianmin Zheng committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

def auto_partition(files, rank, size):
    """
    Partition files into size sublists with approximately equal sums of estimated times
    using stable sorting, and return the partition for the specified rank.

    Args:
        files (list): List of file objects with estimated_time attribute
        rank (int): Index of the partition to return (0 to size-1)
        size (int): Number of partitions

    Returns:
        list: List of file objects in the specified rank's partition
    """
    weights = [f.estimated_time for f in files]

    if not weights or size <= 0 or size > len(weights):
        return []

    # Create list of (weight, original_index) tuples
    # Using negative index as secondary key to maintain original order for equal weights
    indexed_weights = [(w, -i) for i, w in enumerate(weights)]
    # Stable sort in descending order by weight
    # If weights are equal, larger (negative) index comes first (i.e., earlier original position)
    indexed_weights = sorted(indexed_weights, reverse=True)

    # Extract original indices (negate back to positive)
    indexed_weights = [(w, -i) for w, i in indexed_weights]

    # Initialize partitions and their sums
    partitions = [[] for _ in range(size)]
    sums = [0.0] * size

    # Greedy approach: assign each weight to partition with smallest current sum
    for weight, idx in indexed_weights:
        # Find partition with minimum sum
        min_sum_idx = sums.index(min(sums))
        partitions[min_sum_idx].append(idx)
        sums[min_sum_idx] += weight

    # Return the files corresponding to the indices in the specified rank's partition
    indices = partitions[rank]
    return [files[i] for i in indices]

279
280
281
282
283
284

if __name__ == "__main__":
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "--timeout-per-file",
        type=int,
285
        default=1800,
286
287
288
289
290
291
292
293
294
        help="The time limit for running one file in seconds.",
    )
    arg_parser.add_argument(
        "--suite",
        type=str,
        default=list(suites.keys())[0],
        choices=list(suites.keys()) + ["all"],
        help="The suite to run",
    )
295
296
297
298
299
300
301
302
303
304
305
306
    arg_parser.add_argument(
        "--range-begin",
        type=int,
        default=0,
        help="The begin index of the range of the files to run.",
    )
    arg_parser.add_argument(
        "--range-end",
        type=int,
        default=None,
        help="The end index of the range of the files to run.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
307
308
309
310
311
312
313
314
315
316
    arg_parser.add_argument(
        "--auto-partition-id",
        type=int,
        help="Use auto load balancing. The part id.",
    )
    arg_parser.add_argument(
        "--auto-partition-size",
        type=int,
        help="Use auto load balancing. The number of parts.",
    )
317
    args = arg_parser.parse_args()
Lianmin Zheng's avatar
Lianmin Zheng committed
318
    print(f"{args=}")
319
320
321
322
323
324

    if args.suite == "all":
        files = glob.glob("**/test_*.py", recursive=True)
    else:
        files = suites[args.suite]

Lianmin Zheng's avatar
Lianmin Zheng committed
325
326
327
328
    if args.auto_partition_size:
        files = auto_partition(files, args.auto_partition_id, args.auto_partition_size)
    else:
        files = files[args.range_begin : args.range_end]
329

Lianmin Zheng's avatar
Lianmin Zheng committed
330
    print("The running tests are ", [f.name for f in files])
331

332
333
    exit_code = run_unittest_files(files, args.timeout_per_file)
    exit(exit_code)