test_utils.py 35.4 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
"""Common utilities for testing and benchmarking"""
2

3
import argparse
4
import copy
5
import logging
6
import os
7
import random
8
import subprocess
9
import threading
10
import time
11
import unittest
12
from concurrent.futures import ThreadPoolExecutor
Byron Hsu's avatar
Byron Hsu committed
13
from dataclasses import dataclass
Liangsheng Yin's avatar
Liangsheng Yin committed
14
from functools import partial
15
from types import SimpleNamespace
16
from typing import Callable, List, Optional, Tuple
Liangsheng Yin's avatar
Liangsheng Yin committed
17

Lianmin Zheng's avatar
Lianmin Zheng committed
18
19
import numpy as np
import requests
20
21
import torch
import torch.nn.functional as F
Liangsheng Yin's avatar
Liangsheng Yin committed
22

23
from sglang.bench_serving import run_benchmark
Lianmin Zheng's avatar
Lianmin Zheng committed
24
from sglang.global_config import global_config
Ying Sheng's avatar
Ying Sheng committed
25
26
from sglang.lang.backend.openai import OpenAI
from sglang.lang.backend.runtime_endpoint import RuntimeEndpoint
27
28
from sglang.srt.utils import (
    get_bool_env_var,
29
    get_device,
30
31
32
33
    is_port_available,
    kill_process_tree,
    retry,
)
34
from sglang.test.run_eval import run_eval
35
from sglang.utils import get_exception_traceback
Liangsheng Yin's avatar
Liangsheng Yin committed
36

Lianmin Zheng's avatar
Lianmin Zheng committed
37
38
39
# General test models
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
40
DEFAULT_SMALL_MODEL_NAME_FOR_TEST_BASE = "meta-llama/Llama-3.2-1B"
Lianmin Zheng's avatar
Lianmin Zheng committed
41
42
43
44
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"
DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST = "Qwen/Qwen1.5-MoE-A2.7B"

# MLA test models
woodx's avatar
woodx committed
45
46
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST = "Alibaba-NLP/gte-Qwen2-1.5B-instruct"
DEFAULT_SMALL_CROSS_ENCODER_MODEL_NAME_FOR_TEST = "cross-encoder/ms-marco-MiniLM-L6-v2"
Lianmin Zheng's avatar
Lianmin Zheng committed
47
48
49
50
51
52
53
54
55
DEFAULT_MLA_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
DEFAULT_MLA_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
DEFAULT_MODEL_NAME_FOR_TEST_MLA = "lmsys/sglang-ci-dsv3-test"
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN = "lmsys/sglang-ci-dsv3-test-NextN"

# FP8 models
DEFAULT_MODEL_NAME_FOR_TEST_FP8 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
DEFAULT_MODEL_NAME_FOR_ACCURACY_TEST_FP8 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
DEFAULT_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST_FP8 = (
HandH1998's avatar
HandH1998 committed
56
57
    "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8-dynamic"
)
Lianmin Zheng's avatar
Lianmin Zheng committed
58
DEFAULT_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST_FP8 = (
59
60
61
    "nvidia/Llama-3.1-8B-Instruct-FP8"
)

Lianmin Zheng's avatar
Lianmin Zheng committed
62
63
64
# EAGLE
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST = "meta-llama/Llama-2-7b-chat-hf"
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST = "lmsys/sglang-EAGLE-llama2-chat-7B"
Stefan He's avatar
Stefan He committed
65
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3 = "jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
Lianmin Zheng's avatar
Lianmin Zheng committed
66
67

# Other use cases
Stefan He's avatar
Stefan He committed
68
69
70
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION = (
    "meta-llama/Llama-4-Scout-17B-16E-Instruct"
)
71
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST = "Alibaba-NLP/gte-Qwen2-1.5B-instruct"
Xihuai Wang's avatar
Xihuai Wang committed
72
DEFAULT_REASONING_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
Jinyan Chen's avatar
Jinyan Chen committed
73
DEFAULT_DEEPPEP_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-V3-0324"
74
75
76
DEFAULT_AWQ_MOE_MODEL_NAME_FOR_TEST = (
    "hugging-quants/Mixtral-8x7B-Instruct-v0.1-AWQ-INT4"
)
77
DEFAULT_ENABLE_THINKING_MODEL_NAME_FOR_TEST = "Qwen/Qwen3-30B-A3B"
Lianmin Zheng's avatar
Lianmin Zheng committed
78
79

# Nightly tests
80
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1 = "meta-llama/Llama-3.1-8B-Instruct,mistralai/Mistral-7B-Instruct-v0.3,deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct,google/gemma-2-27b-it"
81
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2 = "meta-llama/Llama-3.1-70B-Instruct,mistralai/Mixtral-8x7B-Instruct-v0.1,Qwen/Qwen2-57B-A14B-Instruct"
82
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8,neuralmagic/Mistral-7B-Instruct-v0.3-FP8,neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8,neuralmagic/gemma-2-2b-it-FP8"
Ke Bao's avatar
Ke Bao committed
83
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2 = "neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8,neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8,neuralmagic/Qwen2-72B-Instruct-FP8,neuralmagic/Qwen2-57B-A14B-Instruct-FP8,neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
84
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_QUANT_TP1 = "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4,hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4,hugging-quants/Mixtral-8x7B-Instruct-v0.1-AWQ-INT4"
85
DEFAULT_SMALL_MODEL_NAME_FOR_TEST_QWEN = "Qwen/Qwen2.5-1.5B-Instruct"
86
DEFAULT_SMALL_VLM_MODEL_NAME_FOR_TEST = "Qwen/Qwen2.5-VL-3B-Instruct"
87
88
89
90

DEFAULT_IMAGE_URL = "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true"
DEFAULT_VIDEO_URL = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"

91
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH = 600
Lianmin Zheng's avatar
Lianmin Zheng committed
92

93
94
95

def is_in_ci():
    """Return whether it is in CI runner."""
96
    return get_bool_env_var("SGLANG_IS_IN_CI")
97
98


99
100
101
102
103
def is_in_amd_ci():
    """Return whether it is in an AMD CI runner."""
    return get_bool_env_var("SGLANG_AMD_CI")


104
if is_in_ci():
105
106
107
    DEFAULT_PORT_FOR_SRT_TEST_RUNNER = (
        5000 + int(os.environ.get("CUDA_VISIBLE_DEVICES", "0")[0]) * 100
    )
108
else:
109
110
111
112
    DEFAULT_PORT_FOR_SRT_TEST_RUNNER = (
        7000 + int(os.environ.get("CUDA_VISIBLE_DEVICES", "0")[0]) * 100
    )
DEFAULT_URL_FOR_TEST = f"http://127.0.0.1:{DEFAULT_PORT_FOR_SRT_TEST_RUNNER + 1000}"
113

114
115
116
if is_in_amd_ci():
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH = 3000

Lianmin Zheng's avatar
Lianmin Zheng committed
117

Liangsheng Yin's avatar
Liangsheng Yin committed
118
119
def call_generate_lightllm(prompt, temperature, max_tokens, stop=None, url=None):
    assert url is not None
Lianmin Zheng's avatar
Lianmin Zheng committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134

    data = {
        "inputs": prompt,
        "parameters": {
            "temperature": temperature,
            "max_new_tokens": max_tokens,
            "stop_sequences": stop,
        },
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    pred = res.json()["generated_text"][0]
    return pred


135
136
137
138
139
140
141
142
143
144
145
def find_available_port(base_port: int):
    port = base_port + random.randint(100, 1000)
    while True:
        if is_port_available(port):
            return port
        if port < 60000:
            port += 42
        else:
            port -= 43


Liangsheng Yin's avatar
Liangsheng Yin committed
146
147
148
def call_generate_vllm(prompt, temperature, max_tokens, stop=None, n=1, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
    data = {
        "prompt": prompt,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "stop": stop,
        "n": n,
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    if n == 1:
        pred = res.json()["text"][0][len(prompt) :]
    else:
        pred = [x[len(prompt) :] for x in res.json()["text"]]
    return pred


165
def call_generate_outlines(
166
    prompt, temperature, max_tokens, stop=None, regex=None, n=1, url=None
167
):
Liangsheng Yin's avatar
Liangsheng Yin committed
168
169
    assert url is not None

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
    data = {
        "prompt": prompt,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "stop": stop,
        "regex": regex,
        "n": n,
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    if n == 1:
        pred = res.json()["text"][0][len(prompt) :]
    else:
        pred = [x[len(prompt) :] for x in res.json()["text"]]
    return pred


Liangsheng Yin's avatar
Liangsheng Yin committed
187
188
189
def call_generate_srt_raw(prompt, temperature, max_tokens, stop=None, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
    data = {
        "text": prompt,
        "sampling_params": {
            "temperature": temperature,
            "max_new_tokens": max_tokens,
            "stop": stop,
        },
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    obj = res.json()
    pred = obj["text"]
    return pred


Liangsheng Yin's avatar
Liangsheng Yin committed
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def call_generate_guidance(
    prompt, temperature, max_tokens, stop=None, n=1, regex=None, model=None
):
    assert model is not None
    from guidance import gen

    rets = []
    for _ in range(n):
        out = (
            model
            + prompt
            + gen(
                name="answer",
                max_tokens=max_tokens,
                temperature=temperature,
                stop=stop,
                regex=regex,
            )
        )
        rets.append(out["answer"])
    return rets if n > 1 else rets[0]


def call_select_lightllm(context, choices, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
    scores = []
    for i in range(len(choices)):
        data = {
            "inputs": context + choices[i],
            "parameters": {
                "max_new_tokens": 1,
            },
        }
        res = requests.post(url, json=data)
        assert res.status_code == 200
        scores.append(0)
    return np.argmax(scores)


Liangsheng Yin's avatar
Liangsheng Yin committed
245
246
247
def call_select_vllm(context, choices, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
248
249
250
251
252
253
254
255
256
    scores = []
    for i in range(len(choices)):
        data = {
            "prompt": context + choices[i],
            "max_tokens": 1,
            "prompt_logprobs": 1,
        }
        res = requests.post(url, json=data)
        assert res.status_code == 200
Lianmin Zheng's avatar
Lianmin Zheng committed
257
        scores.append(res.json().get("prompt_score", 0))
Lianmin Zheng's avatar
Lianmin Zheng committed
258
259
260
261
262
263
264
265
266
267
268
    return np.argmax(scores)

    """
    Modify vllm/entrypoints/api_server.py

    if final_output.prompt_logprobs is not None:
        score = np.mean([prob[t_id] for t_id, prob in zip(final_output.prompt_token_ids[1:], final_output.prompt_logprobs[1:])])
        ret["prompt_score"] = score
    """


Liangsheng Yin's avatar
Liangsheng Yin committed
269
270
271
272
273
274
275
276
def call_select_guidance(context, choices, model=None):
    assert model is not None
    from guidance import select

    out = model + context + select(choices, name="answer")
    return choices.index(out["answer"])


277
def add_common_other_args_and_parse(parser: argparse.ArgumentParser):
Lianmin Zheng's avatar
Lianmin Zheng committed
278
    parser.add_argument("--parallel", type=int, default=64)
Lianmin Zheng's avatar
Lianmin Zheng committed
279
280
281
282
283
284
    parser.add_argument("--host", type=str, default="http://127.0.0.1")
    parser.add_argument("--port", type=int, default=None)
    parser.add_argument(
        "--backend",
        type=str,
        required=True,
Liangsheng Yin's avatar
Liangsheng Yin committed
285
286
287
288
        choices=[
            "vllm",
            "outlines",
            "lightllm",
289
            "gserver",
Liangsheng Yin's avatar
Liangsheng Yin committed
290
291
292
293
            "guidance",
            "srt-raw",
            "llama.cpp",
        ],
Lianmin Zheng's avatar
Lianmin Zheng committed
294
    )
Liangsheng Yin's avatar
Liangsheng Yin committed
295
    parser.add_argument("--n-ctx", type=int, default=4096)
Lianmin Zheng's avatar
Lianmin Zheng committed
296
297
298
299
300
301
302
303
304
    parser.add_argument(
        "--model-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
    )
    parser.add_argument("--result-file", type=str, default="result.jsonl")
    args = parser.parse_args()

    if args.port is None:
        default_port = {
            "vllm": 21000,
Liangsheng Yin's avatar
Liangsheng Yin committed
305
            "outlines": 21000,
Lianmin Zheng's avatar
Lianmin Zheng committed
306
307
            "lightllm": 22000,
            "srt-raw": 30000,
308
            "gserver": 9988,
Lianmin Zheng's avatar
Lianmin Zheng committed
309
310
311
312
313
        }
        args.port = default_port.get(args.backend, None)
    return args


314
315
316
317
318
319
320
321
322
323
324
325
def auto_config_device() -> str:
    """Auto-config available device platform"""

    try:
        device = get_device()
    except (RuntimeError, ImportError) as e:
        print(f"Warning: {e} - Falling back to CPU")
        device = "cpu"

    return device


326
def add_common_sglang_args_and_parse(parser: argparse.ArgumentParser):
Lianmin Zheng's avatar
Lianmin Zheng committed
327
328
329
330
    parser.add_argument("--parallel", type=int, default=64)
    parser.add_argument("--host", type=str, default="http://127.0.0.1")
    parser.add_argument("--port", type=int, default=30000)
    parser.add_argument("--backend", type=str, default="srt")
331
332
333
334
335
336
337
    parser.add_argument(
        "--device",
        type=str,
        default="auto",
        choices=["auto", "cuda", "rocm", "cpu"],
        help="Device type (auto/cuda/rocm/cpu). Auto will detect available platforms",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
338
339
    parser.add_argument("--result-file", type=str, default="result.jsonl")
    args = parser.parse_args()
340

Lianmin Zheng's avatar
Lianmin Zheng committed
341
342
343
    return args


344
def select_sglang_backend(args: argparse.Namespace):
Lianmin Zheng's avatar
Lianmin Zheng committed
345
346
347
348
    if args.backend.startswith("srt"):
        if args.backend == "srt-no-parallel":
            global_config.enable_parallel_encoding = False
        backend = RuntimeEndpoint(f"{args.host}:{args.port}")
349
    elif args.backend.startswith("gpt-"):
Lianmin Zheng's avatar
Lianmin Zheng committed
350
351
352
353
        backend = OpenAI(args.backend)
    else:
        raise ValueError(f"Invalid backend: {args.backend}")
    return backend
Liangsheng Yin's avatar
Liangsheng Yin committed
354
355


356
def _get_call_generate(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
357
358
359
360
361
362
    if args.backend == "lightllm":
        return partial(call_generate_lightllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "vllm":
        return partial(call_generate_vllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "srt-raw":
        return partial(call_generate_srt_raw, url=f"{args.host}:{args.port}/generate")
363
364
    elif args.backend == "gserver":
        return partial(call_generate_gserver, url=f"{args.host}:{args.port}")
Liangsheng Yin's avatar
Liangsheng Yin committed
365
366
367
368
369
370
371
372
373
374
375
376
377
    elif args.backend == "outlines":
        return partial(call_generate_outlines, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "guidance":
        from guidance import models

        model = models.LlamaCpp(args.model_path, n_gpu_layers=-1, n_ctx=args.n_ctx)
        call_generate = partial(call_generate_guidance, model=model)
        call_generate("Hello,", 1.0, 8, ".")
        return call_generate
    else:
        raise ValueError(f"Invalid backend: {args.backend}")


378
def _get_call_select(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
    if args.backend == "lightllm":
        return partial(call_select_lightllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "vllm":
        return partial(call_select_vllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "guidance":
        from guidance import models

        model = models.LlamaCpp(args.model_path, n_gpu_layers=-1, n_ctx=args.n_ctx)
        call_select = partial(call_select_guidance, model=model)

        call_select("Hello,", ["world", "earth"])
        return call_select
    else:
        raise ValueError(f"Invalid backend: {args.backend}")


395
def get_call_generate(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
396
397
398
399
400
401
402
403
404
405
406
407
    call_generate = _get_call_generate(args)

    def func(*args, **kwargs):
        try:
            return call_generate(*args, **kwargs)
        except Exception:
            print("Exception in call_generate:\n" + get_exception_traceback())
            raise

    return func


408
def get_call_select(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
409
410
411
412
413
414
415
416
417
418
    call_select = _get_call_select(args)

    def func(*args, **kwargs):
        try:
            return call_select(*args, **kwargs)
        except Exception:
            print("Exception in call_select:\n" + get_exception_traceback())
            raise

    return func
419
420


421
def popen_launch_server(
422
423
424
425
    model: str,
    base_url: str,
    timeout: float,
    api_key: Optional[str] = None,
426
    other_args: list[str] = [],
427
    env: Optional[dict] = None,
428
    return_stdout_stderr: Optional[tuple] = None,
429
    device: str = "auto",
430
    pd_separated: bool = False,
431
):
432
433
434
435
436
437
438
439
440
441
442
443
444
    """Launch a server process with automatic device detection.

    Args:
        device: Device type ("auto", "cuda", "rocm" or "cpu").
                If "auto", will detect available platforms automatically.
    """
    # Auto-detect device if needed
    if device == "auto":
        device = auto_config_device()
        print(f"Auto-configed device: {device}", flush=True)
        other_args = list(other_args)
        other_args += ["--device", str(device)]

445
446
447
    _, host, port = base_url.split(":")
    host = host[2:]

448
    if pd_separated:
449
450
451
452
        command = "sglang.launch_pd_server"
    else:
        command = "sglang.launch_server"

453
454
455
    command = [
        "python3",
        "-m",
456
        command,
457
458
        "--model-path",
        model,
459
        *[str(x) for x in other_args],
460
    ]
Chayenne's avatar
Chayenne committed
461

462
    if pd_separated:
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
        command.extend(
            [
                "--lb-host",
                host,
                "--lb-port",
                port,
            ]
        )
    else:
        command.extend(
            [
                "--host",
                host,
                "--port",
                port,
            ]
        )

481
482
483
    if api_key:
        command += ["--api-key", api_key]

484
485
    print(f"command={' '.join(command)}")

486
487
488
    if return_stdout_stderr:
        process = subprocess.Popen(
            command,
489
490
            stdout=return_stdout_stderr[0],
            stderr=return_stdout_stderr[1],
491
492
493
494
495
            env=env,
            text=True,
        )
    else:
        process = subprocess.Popen(command, stdout=None, stderr=None, env=env)
496

497
    start_time = time.perf_counter()
498
    with requests.Session() as session:
499
        while time.perf_counter() - start_time < timeout:
500
501
502
503
504
505
506
507
508

            return_code = process.poll()
            if return_code is not None:
                # Server failed to start (non-zero exit code) or crashed
                raise Exception(
                    f"Server process exited with code {return_code}. "
                    "Check server logs for errors."
                )

509
510
511
512
513
514
515
516
517
518
519
520
521
            try:
                headers = {
                    "Content-Type": "application/json; charset=utf-8",
                    "Authorization": f"Bearer {api_key}",
                }
                response = session.get(
                    f"{base_url}/health_generate",
                    headers=headers,
                )
                if response.status_code == 200:
                    return process
            except requests.RequestException:
                pass
522
523
524

            return_code = process.poll()
            if return_code is not None:
fzyzcjy's avatar
fzyzcjy committed
525
526
527
                raise Exception(
                    f"Server unexpectedly exits ({return_code=}). Usually there will be error logs describing the cause far above this line."
                )
528

529
            time.sleep(10)
530
531

    kill_process_tree(process.pid)
532
    raise TimeoutError("Server failed to start within the timeout period.")
533
534


535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
def popen_launch_pd_server(
    model: str,
    base_url: str,
    timeout: float,
    api_key: Optional[str] = None,
    other_args: list[str] = (),
    env: Optional[dict] = None,
):
    _, host, port = base_url.split(":")
    host = host[2:]

    command = "sglang.launch_server"

    command = [
        "python3",
        "-m",
        command,
        "--model-path",
        model,
        *[str(x) for x in other_args],
    ]

    command.extend(
        [
            "--host",
            host,
            "--port",
            port,
        ]
    )

    if api_key:
        command += ["--api-key", api_key]

    print(f"command={' '.join(command)}")

571
    process = subprocess.Popen(command, stdout=None, stderr=None, env=env)
572

573
    return process
574
575


576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
def run_with_timeout(
    func: Callable,
    args: tuple = (),
    kwargs: Optional[dict] = None,
    timeout: float = None,
):
    """Run a function with timeout."""
    ret_value = []

    def _target_func():
        ret_value.append(func(*args, **(kwargs or {})))

    t = threading.Thread(target=_target_func)
    t.start()
    t.join(timeout=timeout)
    if t.is_alive():
        raise TimeoutError()

    if not ret_value:
        raise RuntimeError()

    return ret_value[0]


Byron Hsu's avatar
Byron Hsu committed
600
601
602
603
604
605
606
@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


def run_unittest_files(files: List[TestFile], timeout_per_file: float):
607
    tic = time.perf_counter()
608
609
    success = True

Lianmin Zheng's avatar
Lianmin Zheng committed
610
    for i, file in enumerate(files):
Lianmin Zheng's avatar
Lianmin Zheng committed
611
        filename, estimated_time = file.name, file.estimated_time
612
        process = None
613

Mingyi's avatar
Mingyi committed
614
        def run_one_file(filename):
615
616
            nonlocal process

Mingyi's avatar
Mingyi committed
617
            filename = os.path.join(os.getcwd(), filename)
Lianmin Zheng's avatar
Lianmin Zheng committed
618
            print(
Lianmin Zheng's avatar
Lianmin Zheng committed
619
                f".\n.\nBegin ({i}/{len(files) - 1}):\npython3 {filename}\n.\n.\n",
Lianmin Zheng's avatar
Lianmin Zheng committed
620
621
                flush=True,
            )
622
            tic = time.perf_counter()
Lianmin Zheng's avatar
Lianmin Zheng committed
623

Mingyi's avatar
Mingyi committed
624
625
626
627
            process = subprocess.Popen(
                ["python3", filename], stdout=None, stderr=None, env=os.environ
            )
            process.wait()
628
            elapsed = time.perf_counter() - tic
Lianmin Zheng's avatar
Lianmin Zheng committed
629
630

            print(
Lianmin Zheng's avatar
Lianmin Zheng committed
631
                f".\n.\nEnd ({i}/{len(files) - 1}):\n{filename=}, {elapsed=:.0f}, {estimated_time=}\n.\n.\n",
Lianmin Zheng's avatar
Lianmin Zheng committed
632
633
                flush=True,
            )
Mingyi's avatar
Mingyi committed
634
            return process.returncode
635
636

        try:
Mingyi's avatar
Mingyi committed
637
638
639
            ret_code = run_with_timeout(
                run_one_file, args=(filename,), timeout=timeout_per_file
            )
640
641
642
            assert (
                ret_code == 0
            ), f"expected return code 0, but {filename} returned {ret_code}"
643
        except TimeoutError:
644
            kill_process_tree(process.pid)
645
646
            time.sleep(5)
            print(
647
648
                f"\nTimeout after {timeout_per_file} seconds when running {filename}\n",
                flush=True,
649
            )
Mingyi's avatar
Mingyi committed
650
651
            success = False
            break
652
653

    if success:
654
        print(f"Success. Time elapsed: {time.perf_counter() - tic:.2f}s", flush=True)
655
    else:
656
        print(f"Fail. Time elapsed: {time.perf_counter() - tic:.2f}s", flush=True)
657
658

    return 0 if success else -1
659
660
661
662


def get_similarities(vec1, vec2):
    return F.cosine_similarity(torch.tensor(vec1), torch.tensor(vec2), dim=0)
663
664


665
666
667
668
669
670
def get_benchmark_args(
    base_url="",
    dataset_name="",
    dataset_path="",
    tokenizer="",
    num_prompts=500,
671
    sharegpt_output_len=None,
672
673
    random_input_len=4096,
    random_output_len=2048,
674
    sharegpt_context_len=None,
675
676
677
    request_rate=float("inf"),
    disable_stream=False,
    disable_ignore_eos=False,
678
    seed: int = 0,
679
    device="auto",
680
    pd_separated: bool = False,
681
682
683
684
685
686
687
688
689
690
691
):
    return SimpleNamespace(
        backend="sglang",
        base_url=base_url,
        host=None,
        port=None,
        dataset_name=dataset_name,
        dataset_path=dataset_path,
        model=None,
        tokenizer=tokenizer,
        num_prompts=num_prompts,
692
693
        sharegpt_output_len=sharegpt_output_len,
        sharegpt_context_len=sharegpt_context_len,
694
695
696
697
698
699
700
701
702
        random_input_len=random_input_len,
        random_output_len=random_output_len,
        random_range_ratio=0.0,
        request_rate=request_rate,
        multi=None,
        output_file=None,
        disable_tqdm=False,
        disable_stream=disable_stream,
        return_logprob=False,
703
        seed=seed,
704
705
706
707
708
        disable_ignore_eos=disable_ignore_eos,
        extra_request_body=None,
        apply_chat_template=False,
        profile=None,
        lora_name=None,
709
        prompt_suffix="",
710
        device=device,
711
        pd_separated=pd_separated,
712
713
714
    )


715
716
717
718
719
720
def run_bench_serving(
    model,
    num_prompts,
    request_rate,
    other_server_args,
    dataset_name="random",
721
722
    dataset_path="",
    tokenizer=None,
723
724
    random_input_len=4096,
    random_output_len=2048,
725
    sharegpt_context_len=None,
726
    disable_stream=False,
727
    disable_ignore_eos=False,
728
    need_warmup=False,
729
    seed: int = 0,
730
    device="auto",
731
):
732
733
    if device == "auto":
        device = auto_config_device()
734
735
736
737
738
739
740
741
742
743
    # Launch the server
    base_url = DEFAULT_URL_FOR_TEST
    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_server_args,
    )

    # Run benchmark
744
    args = get_benchmark_args(
745
        base_url=base_url,
746
        dataset_name=dataset_name,
747
748
        dataset_path=dataset_path,
        tokenizer=tokenizer,
749
        num_prompts=num_prompts,
750
751
        random_input_len=random_input_len,
        random_output_len=random_output_len,
752
        sharegpt_context_len=sharegpt_context_len,
753
        request_rate=request_rate,
754
        disable_stream=disable_stream,
755
        disable_ignore_eos=disable_ignore_eos,
756
        seed=seed,
757
        device=device,
758
759
760
    )

    try:
761
762
763
764
        if need_warmup:
            warmup_args = copy.deepcopy(args)
            warmup_args.num_prompts = 16
            run_benchmark(warmup_args)
765
766
        res = run_benchmark(args)
    finally:
767
        kill_process_tree(process.pid)
768
769
770

    assert res["completed"] == num_prompts
    return res
771
772


773
774
775
776
777
778
def run_bench_serving_multi(
    model,
    base_url,
    other_server_args,
    benchmark_args,
    need_warmup=False,
779
    pd_separated=False,
780
781
782
783
784
785
786
):
    # Launch the server
    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_server_args,
787
        pd_separated=pd_separated,
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
    )

    # run benchmark for all
    res_l = []
    try:
        for args in benchmark_args:
            if need_warmup:
                warmup_args = copy.deepcopy(args)
                warmup_args.num_prompts = 16
                run_benchmark(warmup_args)

            res = run_benchmark(args)
            res_l.append((args, res))
    finally:
        kill_process_tree(process.pid)

    return res_l


807
def run_bench_one_batch(model, other_args):
808
809
810
811
812
813
814
815
816
817
818
819
    """Launch a offline process with automatic device detection.

    Args:
        device: Device type ("auto", "cuda", "rocm" or "cpu").
                If "auto", will detect available platforms automatically.
    """
    # Auto-detect device if needed

    device = auto_config_device()
    print(f"Auto-configed device: {device}", flush=True)
    other_args += ["--device", str(device)]

820
821
822
    command = [
        "python3",
        "-m",
823
        "sglang.bench_one_batch",
824
825
826
827
828
829
        "--batch-size",
        "1",
        "--input",
        "128",
        "--output",
        "8",
830
        *[str(x) for x in other_args],
831
    ]
saienduri's avatar
saienduri committed
832
833
    if model is not None:
        command += ["--model-path", model]
834
835
836
837
838
839
840
841
842
843
844
845
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    try:
        stdout, stderr = process.communicate()
        output = stdout.decode()
        error = stderr.decode()
        print(f"Output: {output}", flush=True)
        print(f"Error: {error}", flush=True)

        lastline = output.split("\n")[-3]
        output_throughput = float(lastline.split(" ")[-2])
    finally:
846
        kill_process_tree(process.pid)
847
848

    return output_throughput
849
850


851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
def run_bench_offline_throughput(model, other_args):
    command = [
        "python3",
        "-m",
        "sglang.bench_offline_throughput",
        "--num-prompts",
        "1",
        "--dataset-name",
        "random",
        "--random-input-len",
        "256",
        "--random-output-len",
        "256",
        "--model-path",
        model,
        *[str(x) for x in other_args],
    ]

    print(f"{command=}")
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    try:
        stdout, stderr = process.communicate()
        output = stdout.decode()
        error = stderr.decode()
        print(f"Output: {output}", flush=True)
        print(f"Error: {error}", flush=True)

        output_throughput = -1
        for line in output.split("\n"):
            if "Last generation throughput (tok/s):" in line:
                output_throughput = float(line.split(":")[-1])
    finally:
        kill_process_tree(process.pid)

    return output_throughput


889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
def run_bench_one_batch_server(
    model,
    base_url,
    server_args,
    bench_args,
    other_server_args,
    simulate_spec_acc_lens=None,
):
    from sglang.bench_one_batch_server import run_benchmark

    if simulate_spec_acc_lens is not None:
        env = {**os.environ, "SIMULATE_ACC_LEN": str(simulate_spec_acc_lens)}
    else:
        env = None

    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_server_args,
        env=env,
    )
    try:
        run_benchmark(server_args=server_args, bench_args=bench_args)
    finally:
        kill_process_tree(process.pid)


917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
def lcs(X, Y):
    m = len(X)
    n = len(Y)
    L = [[0] * (n + 1) for _ in range(m + 1)]

    for i in range(m + 1):
        for j in range(n + 1):
            if i == 0 or j == 0:
                L[i][j] = 0
            elif X[i - 1] == Y[j - 1]:
                L[i][j] = L[i - 1][j - 1] + 1
            else:
                L[i][j] = max(L[i - 1][j], L[i][j - 1])

    return L[m][n]


def calculate_rouge_l(output_strs_list1, output_strs_list2):
    """calculate the ROUGE-L score"""
    rouge_l_scores = []

    for s1, s2 in zip(output_strs_list1, output_strs_list2):
        lcs_len = lcs(s1, s2)
        precision = lcs_len / len(s1) if len(s1) > 0 else 0
        recall = lcs_len / len(s2) if len(s2) > 0 else 0
        if precision + recall > 0:
            fmeasure = (2 * precision * recall) / (precision + recall)
        else:
            fmeasure = 0.0
        rouge_l_scores.append(fmeasure)

    return rouge_l_scores
949
950


951
952
STDERR_FILENAME = "/tmp/stderr.txt"
STDOUT_FILENAME = "/tmp/stdout.txt"
953
954


955
def read_output(output_lines: List[str], filename: str = STDERR_FILENAME):
956
    """Print the output in real time with another thread."""
957
    while not os.path.exists(filename):
958
        time.sleep(0.01)
959

960
961
    pt = 0
    while pt >= 0:
962
        if pt > 0 and not os.path.exists(filename):
963
            break
964
965
966
967
968
        try:
            lines = open(filename).readlines()
        except FileNotFoundError:
            print(f"{pt=}, {os.path.exists(filename)=}")
            raise
969
970
        for line in lines[pt:]:
            print(line, end="", flush=True)
971
            output_lines.append(line)
972
            pt += 1
973
        time.sleep(0.1)
974
975


976
977
def run_and_check_memory_leak(
    workload_func,
978
    disable_radix_cache,
979
    enable_mixed_chunk,
980
    disable_overlap,
981
    chunked_prefill_size,
982
    assert_has_abort,
983
):
984
985
986
987
988
989
    other_args = [
        "--chunked-prefill-size",
        str(chunked_prefill_size),
        "--log-level",
        "debug",
    ]
990
991
992
993
    if disable_radix_cache:
        other_args += ["--disable-radix-cache"]
    if enable_mixed_chunk:
        other_args += ["--enable-mixed-chunk"]
994
995
    if disable_overlap:
        other_args += ["--disable-overlap-schedule"]
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016

    model = DEFAULT_MODEL_NAME_FOR_TEST
    port = random.randint(4000, 5000)
    base_url = f"http://127.0.0.1:{port}"

    # Create files and launch the server
    stdout = open(STDOUT_FILENAME, "w")
    stderr = open(STDERR_FILENAME, "w")
    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_args,
        return_stdout_stderr=(stdout, stderr),
    )

    # Launch a thread to stream the output
    output_lines = []
    t = threading.Thread(target=read_output, args=(output_lines,))
    t.start()

1017
1018
    # Run the workload
    workload_func(base_url, model)
1019
1020

    # Clean up everything
1021
    kill_process_tree(process.pid)
1022
1023
    stdout.close()
    stderr.close()
1024
1025
1026
1027
    if os.path.exists(STDOUT_FILENAME):
        os.remove(STDOUT_FILENAME)
    if os.path.exists(STDERR_FILENAME):
        os.remove(STDERR_FILENAME)
Lianmin Zheng's avatar
Lianmin Zheng committed
1028
    kill_process_tree(process.pid)
1029
1030
1031
1032
1033
    t.join()

    # Assert success
    has_new_server = False
    has_leak = False
1034
    has_abort = False
1035
    for line in output_lines:
Lianmin Zheng's avatar
Lianmin Zheng committed
1036
        if "Uvicorn running" in line:
1037
1038
1039
            has_new_server = True
        if "leak" in line:
            has_leak = True
1040
1041
        if "Abort" in line:
            has_abort = True
1042
1043

    assert has_new_server
1044
    assert not has_leak
1045
1046
    if assert_has_abort:
        assert has_abort
1047
1048


1049
1050
1051
1052
def run_command_and_capture_output(command, env: Optional[dict] = None):
    stdout = open(STDOUT_FILENAME, "w")
    stderr = open(STDERR_FILENAME, "w")
    process = subprocess.Popen(
1053
        command, stdout=stdout, stderr=stdout, env=env, text=True
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
    )

    # Launch a thread to stream the output
    output_lines = []
    t = threading.Thread(target=read_output, args=(output_lines, STDOUT_FILENAME))
    t.start()

    # Join the process
    process.wait()

    stdout.close()
    stderr.close()
    if os.path.exists(STDOUT_FILENAME):
        os.remove(STDOUT_FILENAME)
    if os.path.exists(STDERR_FILENAME):
        os.remove(STDERR_FILENAME)
    kill_process_tree(process.pid)
    t.join()

    return output_lines


1076
1077
1078
def run_mmlu_test(
    disable_radix_cache=False,
    enable_mixed_chunk=False,
1079
    disable_overlap=False,
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
    chunked_prefill_size=32,
):
    def workload_func(base_url, model):
        # Run the eval
        args = SimpleNamespace(
            base_url=base_url,
            model=model,
            eval_name="mmlu",
            num_examples=128,
            num_threads=128,
        )

        try:
            metrics = run_eval(args)
Lianmin Zheng's avatar
Lianmin Zheng committed
1094
            assert metrics["score"] >= 0.65, f"{metrics=}"
1095
1096
1097
        finally:
            pass

Chayenne's avatar
Chayenne committed
1098
1099
1100
1101
    run_and_check_memory_leak(
        workload_func,
        disable_radix_cache,
        enable_mixed_chunk,
1102
        disable_overlap,
Chayenne's avatar
Chayenne committed
1103
        chunked_prefill_size,
1104
        assert_has_abort=False,
Chayenne's avatar
Chayenne committed
1105
    )
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136


def run_mulit_request_test(
    disable_radix_cache=False,
    enable_mixed_chunk=False,
    enable_overlap=False,
    chunked_prefill_size=32,
):
    def workload_func(base_url, model):
        def run_one(_):
            prompt = """
            System: You are a helpful assistant.
            User: What is the capital of France?
            Assistant: The capital of France is
            """

            response = requests.post(
                f"{base_url}/generate",
                json={
                    "text": prompt,
                    "sampling_params": {
                        "temperature": 0,
                        "max_new_tokens": 8,
                    },
                },
            )
            ret = response.json()

        with ThreadPoolExecutor(2) as executor:
            list(executor.map(run_one, list(range(4))))

Chayenne's avatar
Chayenne committed
1137
1138
1139
1140
1141
1142
    run_and_check_memory_leak(
        workload_func,
        disable_radix_cache,
        enable_mixed_chunk,
        enable_overlap,
        chunked_prefill_size,
1143
        assert_has_abort=False,
Chayenne's avatar
Chayenne committed
1144
    )
1145
1146
1147


def write_github_step_summary(content):
1148
1149
1150
1151
    if not os.environ.get("GITHUB_STEP_SUMMARY"):
        logging.warning("GITHUB_STEP_SUMMARY environment variable not set")
        return

1152
1153
    with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
        f.write(content)
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228


def run_logprob_check(self: unittest.TestCase, arg: Tuple):
    (
        input_len,
        output_len,
        temperature,
        logprob_start_len,
        return_logprob,
        top_logprobs_num,
    ) = arg
    input_ids = list(range(input_len))

    response = requests.post(
        self.base_url + "/generate",
        json={
            "input_ids": input_ids,
            "sampling_params": {
                "temperature": temperature,
                "max_new_tokens": output_len,
                "ignore_eos": True,
            },
            "return_logprob": return_logprob,
            "logprob_start_len": logprob_start_len,
            "top_logprobs_num": top_logprobs_num,
        },
    )
    response_json = response.json()

    res = response_json
    self.assertEqual(res["meta_info"]["prompt_tokens"], input_len)
    self.assertEqual(res["meta_info"]["completion_tokens"], output_len)

    # Test the number of tokens are correct
    if return_logprob:
        self.assertEqual(
            len(res["meta_info"]["input_token_logprobs"]) + logprob_start_len,
            res["meta_info"]["prompt_tokens"],
        )
        self.assertEqual(len(res["meta_info"]["output_token_logprobs"]), output_len)

        if top_logprobs_num:
            self.assertEqual(
                len(res["meta_info"]["input_top_logprobs"]) + logprob_start_len,
                res["meta_info"]["prompt_tokens"],
            )
            self.assertEqual(len(res["meta_info"]["output_top_logprobs"]), output_len)

            for i in range(output_len):
                self.assertEqual(
                    len(res["meta_info"]["output_top_logprobs"][i]),
                    top_logprobs_num,
                )

                # Test the top-1 tokens are the same as output tokens if temperature == 0
                if temperature == 0:
                    rank = 0
                    while rank < len(res["meta_info"]["output_top_logprobs"][i]):
                        try:
                            self.assertListEqual(
                                res["meta_info"]["output_token_logprobs"][i],
                                res["meta_info"]["output_top_logprobs"][i][rank],
                            )
                            break
                        except AssertionError:
                            # There's a tie. Allow the second item in this case.
                            if (
                                res["meta_info"]["output_top_logprobs"][i][rank][0]
                                == res["meta_info"]["output_top_logprobs"][i][rank + 1][
                                    0
                                ]
                            ):
                                rank += 1
                            else:
                                raise
1229
1230
1231
1232


class CustomTestCase(unittest.TestCase):
    def _callTestMethod(self, method):
1233
        max_retry = int(
Yineng Zhang's avatar
Yineng Zhang committed
1234
            os.environ.get("SGLANG_TEST_MAX_RETRY", "1" if is_in_ci() else "0")
1235
        )
1236
1237
1238
        retry(
            lambda: super(CustomTestCase, self)._callTestMethod(method),
            max_retry=max_retry,
1239
        )