test_utils.py 32.2 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
"""Common utilities for testing and benchmarking"""
2

3
import argparse
4
import copy
5
import logging
6
import os
7
import random
8
import subprocess
9
import threading
10
import time
11
import unittest
12
from concurrent.futures import ThreadPoolExecutor
Byron Hsu's avatar
Byron Hsu committed
13
from dataclasses import dataclass
Liangsheng Yin's avatar
Liangsheng Yin committed
14
from functools import partial
15
from types import SimpleNamespace
16
from typing import Callable, List, Optional, Tuple
Liangsheng Yin's avatar
Liangsheng Yin committed
17

Lianmin Zheng's avatar
Lianmin Zheng committed
18
19
import numpy as np
import requests
20
21
import torch
import torch.nn.functional as F
Liangsheng Yin's avatar
Liangsheng Yin committed
22

23
from sglang.bench_serving import run_benchmark
Lianmin Zheng's avatar
Lianmin Zheng committed
24
from sglang.global_config import global_config
Ying Sheng's avatar
Ying Sheng committed
25
26
from sglang.lang.backend.openai import OpenAI
from sglang.lang.backend.runtime_endpoint import RuntimeEndpoint
27
28
29
30
31
32
from sglang.srt.utils import (
    get_bool_env_var,
    is_port_available,
    kill_process_tree,
    retry,
)
33
from sglang.test.run_eval import run_eval
34
from sglang.utils import get_exception_traceback
Liangsheng Yin's avatar
Liangsheng Yin committed
35

Lianmin Zheng's avatar
Lianmin Zheng committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# General test models
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"
DEFAULT_SMALL_MOE_MODEL_NAME_FOR_TEST = "Qwen/Qwen1.5-MoE-A2.7B"

# MLA test models
DEFAULT_MLA_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
DEFAULT_MLA_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
DEFAULT_MODEL_NAME_FOR_TEST_MLA = "lmsys/sglang-ci-dsv3-test"
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN = "lmsys/sglang-ci-dsv3-test-NextN"

# FP8 models
DEFAULT_MODEL_NAME_FOR_TEST_FP8 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
DEFAULT_MODEL_NAME_FOR_ACCURACY_TEST_FP8 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8"
DEFAULT_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST_FP8 = (
HandH1998's avatar
HandH1998 committed
52
53
    "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8-dynamic"
)
Lianmin Zheng's avatar
Lianmin Zheng committed
54
DEFAULT_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST_FP8 = (
55
56
57
    "nvidia/Llama-3.1-8B-Instruct-FP8"
)

Lianmin Zheng's avatar
Lianmin Zheng committed
58
59
60
# EAGLE
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST = "meta-llama/Llama-2-7b-chat-hf"
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST = "lmsys/sglang-EAGLE-llama2-chat-7B"
Stefan He's avatar
Stefan He committed
61
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3 = "jamesliu1/sglang-EAGLE3-Llama-3.1-Instruct-8B"
Lianmin Zheng's avatar
Lianmin Zheng committed
62
63

# Other use cases
Stefan He's avatar
Stefan He committed
64
65
66
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION = (
    "meta-llama/Llama-4-Scout-17B-16E-Instruct"
)
67
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST = "Alibaba-NLP/gte-Qwen2-1.5B-instruct"
Xihuai Wang's avatar
Xihuai Wang committed
68
DEFAULT_REASONING_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
Jinyan Chen's avatar
Jinyan Chen committed
69
DEFAULT_DEEPPEP_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-V3-0324"
70
71
72
DEFAULT_AWQ_MOE_MODEL_NAME_FOR_TEST = (
    "hugging-quants/Mixtral-8x7B-Instruct-v0.1-AWQ-INT4"
)
73
DEFAULT_ENABLE_THINKING_MODEL_NAME_FOR_TEST = "Qwen/Qwen3-30B-A3B"
Lianmin Zheng's avatar
Lianmin Zheng committed
74
75

# Nightly tests
76
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1 = "meta-llama/Llama-3.1-8B-Instruct,mistralai/Mistral-7B-Instruct-v0.3,deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct,google/gemma-2-27b-it"
77
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2 = "meta-llama/Llama-3.1-70B-Instruct,mistralai/Mixtral-8x7B-Instruct-v0.1,Qwen/Qwen2-57B-A14B-Instruct"
78
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1 = "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8,neuralmagic/Mistral-7B-Instruct-v0.3-FP8,neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8,neuralmagic/gemma-2-2b-it-FP8"
Ke Bao's avatar
Ke Bao committed
79
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2 = "neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8,neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8,neuralmagic/Qwen2-72B-Instruct-FP8,neuralmagic/Qwen2-57B-A14B-Instruct-FP8,neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
80
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_QUANT_TP1 = "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4,hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4,hugging-quants/Mixtral-8x7B-Instruct-v0.1-AWQ-INT4"
81
DEFAULT_SMALL_MODEL_NAME_FOR_TEST_QWEN = "Qwen/Qwen2.5-1.5B-Instruct"
82
83
84
85
86
DEFAULT_SMALL_VLM_MODEL_NAME = "Qwen/Qwen2-VL-2B"

DEFAULT_IMAGE_URL = "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true"
DEFAULT_VIDEO_URL = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"

Lianmin Zheng's avatar
Lianmin Zheng committed
87
88
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH = 1000

89
90
91

def is_in_ci():
    """Return whether it is in CI runner."""
92
    return get_bool_env_var("SGLANG_IS_IN_CI")
93
94
95


if is_in_ci():
96
97
98
    DEFAULT_PORT_FOR_SRT_TEST_RUNNER = (
        5000 + int(os.environ.get("CUDA_VISIBLE_DEVICES", "0")[0]) * 100
    )
99
else:
100
101
102
103
    DEFAULT_PORT_FOR_SRT_TEST_RUNNER = (
        7000 + int(os.environ.get("CUDA_VISIBLE_DEVICES", "0")[0]) * 100
    )
DEFAULT_URL_FOR_TEST = f"http://127.0.0.1:{DEFAULT_PORT_FOR_SRT_TEST_RUNNER + 1000}"
104

Lianmin Zheng's avatar
Lianmin Zheng committed
105

Liangsheng Yin's avatar
Liangsheng Yin committed
106
107
def call_generate_lightllm(prompt, temperature, max_tokens, stop=None, url=None):
    assert url is not None
Lianmin Zheng's avatar
Lianmin Zheng committed
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122

    data = {
        "inputs": prompt,
        "parameters": {
            "temperature": temperature,
            "max_new_tokens": max_tokens,
            "stop_sequences": stop,
        },
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    pred = res.json()["generated_text"][0]
    return pred


123
124
125
126
127
128
129
130
131
132
133
def find_available_port(base_port: int):
    port = base_port + random.randint(100, 1000)
    while True:
        if is_port_available(port):
            return port
        if port < 60000:
            port += 42
        else:
            port -= 43


Liangsheng Yin's avatar
Liangsheng Yin committed
134
135
136
def call_generate_vllm(prompt, temperature, max_tokens, stop=None, n=1, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
    data = {
        "prompt": prompt,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "stop": stop,
        "n": n,
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    if n == 1:
        pred = res.json()["text"][0][len(prompt) :]
    else:
        pred = [x[len(prompt) :] for x in res.json()["text"]]
    return pred


153
def call_generate_outlines(
154
    prompt, temperature, max_tokens, stop=None, regex=None, n=1, url=None
155
):
Liangsheng Yin's avatar
Liangsheng Yin committed
156
157
    assert url is not None

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    data = {
        "prompt": prompt,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "stop": stop,
        "regex": regex,
        "n": n,
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    if n == 1:
        pred = res.json()["text"][0][len(prompt) :]
    else:
        pred = [x[len(prompt) :] for x in res.json()["text"]]
    return pred


Liangsheng Yin's avatar
Liangsheng Yin committed
175
176
177
def call_generate_srt_raw(prompt, temperature, max_tokens, stop=None, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
    data = {
        "text": prompt,
        "sampling_params": {
            "temperature": temperature,
            "max_new_tokens": max_tokens,
            "stop": stop,
        },
    }
    res = requests.post(url, json=data)
    assert res.status_code == 200
    obj = res.json()
    pred = obj["text"]
    return pred


Liangsheng Yin's avatar
Liangsheng Yin committed
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
def call_generate_guidance(
    prompt, temperature, max_tokens, stop=None, n=1, regex=None, model=None
):
    assert model is not None
    from guidance import gen

    rets = []
    for _ in range(n):
        out = (
            model
            + prompt
            + gen(
                name="answer",
                max_tokens=max_tokens,
                temperature=temperature,
                stop=stop,
                regex=regex,
            )
        )
        rets.append(out["answer"])
    return rets if n > 1 else rets[0]


def call_select_lightllm(context, choices, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
219
220
221
222
223
224
225
226
227
228
229
230
231
232
    scores = []
    for i in range(len(choices)):
        data = {
            "inputs": context + choices[i],
            "parameters": {
                "max_new_tokens": 1,
            },
        }
        res = requests.post(url, json=data)
        assert res.status_code == 200
        scores.append(0)
    return np.argmax(scores)


Liangsheng Yin's avatar
Liangsheng Yin committed
233
234
235
def call_select_vllm(context, choices, url=None):
    assert url is not None

Lianmin Zheng's avatar
Lianmin Zheng committed
236
237
238
239
240
241
242
243
244
    scores = []
    for i in range(len(choices)):
        data = {
            "prompt": context + choices[i],
            "max_tokens": 1,
            "prompt_logprobs": 1,
        }
        res = requests.post(url, json=data)
        assert res.status_code == 200
Lianmin Zheng's avatar
Lianmin Zheng committed
245
        scores.append(res.json().get("prompt_score", 0))
Lianmin Zheng's avatar
Lianmin Zheng committed
246
247
248
249
250
251
252
253
254
255
256
    return np.argmax(scores)

    """
    Modify vllm/entrypoints/api_server.py

    if final_output.prompt_logprobs is not None:
        score = np.mean([prob[t_id] for t_id, prob in zip(final_output.prompt_token_ids[1:], final_output.prompt_logprobs[1:])])
        ret["prompt_score"] = score
    """


Liangsheng Yin's avatar
Liangsheng Yin committed
257
258
259
260
261
262
263
264
def call_select_guidance(context, choices, model=None):
    assert model is not None
    from guidance import select

    out = model + context + select(choices, name="answer")
    return choices.index(out["answer"])


265
def add_common_other_args_and_parse(parser: argparse.ArgumentParser):
Lianmin Zheng's avatar
Lianmin Zheng committed
266
    parser.add_argument("--parallel", type=int, default=64)
Lianmin Zheng's avatar
Lianmin Zheng committed
267
268
269
270
271
272
    parser.add_argument("--host", type=str, default="http://127.0.0.1")
    parser.add_argument("--port", type=int, default=None)
    parser.add_argument(
        "--backend",
        type=str,
        required=True,
Liangsheng Yin's avatar
Liangsheng Yin committed
273
274
275
276
        choices=[
            "vllm",
            "outlines",
            "lightllm",
277
            "gserver",
Liangsheng Yin's avatar
Liangsheng Yin committed
278
279
280
281
            "guidance",
            "srt-raw",
            "llama.cpp",
        ],
Lianmin Zheng's avatar
Lianmin Zheng committed
282
    )
Liangsheng Yin's avatar
Liangsheng Yin committed
283
    parser.add_argument("--n-ctx", type=int, default=4096)
Lianmin Zheng's avatar
Lianmin Zheng committed
284
285
286
287
288
289
290
291
292
    parser.add_argument(
        "--model-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
    )
    parser.add_argument("--result-file", type=str, default="result.jsonl")
    args = parser.parse_args()

    if args.port is None:
        default_port = {
            "vllm": 21000,
Liangsheng Yin's avatar
Liangsheng Yin committed
293
            "outlines": 21000,
Lianmin Zheng's avatar
Lianmin Zheng committed
294
295
            "lightllm": 22000,
            "srt-raw": 30000,
296
            "gserver": 9988,
Lianmin Zheng's avatar
Lianmin Zheng committed
297
298
299
300
301
        }
        args.port = default_port.get(args.backend, None)
    return args


302
def add_common_sglang_args_and_parse(parser: argparse.ArgumentParser):
Lianmin Zheng's avatar
Lianmin Zheng committed
303
304
305
306
307
308
309
310
311
    parser.add_argument("--parallel", type=int, default=64)
    parser.add_argument("--host", type=str, default="http://127.0.0.1")
    parser.add_argument("--port", type=int, default=30000)
    parser.add_argument("--backend", type=str, default="srt")
    parser.add_argument("--result-file", type=str, default="result.jsonl")
    args = parser.parse_args()
    return args


312
def select_sglang_backend(args: argparse.Namespace):
Lianmin Zheng's avatar
Lianmin Zheng committed
313
314
315
316
    if args.backend.startswith("srt"):
        if args.backend == "srt-no-parallel":
            global_config.enable_parallel_encoding = False
        backend = RuntimeEndpoint(f"{args.host}:{args.port}")
317
    elif args.backend.startswith("gpt-"):
Lianmin Zheng's avatar
Lianmin Zheng committed
318
319
320
321
        backend = OpenAI(args.backend)
    else:
        raise ValueError(f"Invalid backend: {args.backend}")
    return backend
Liangsheng Yin's avatar
Liangsheng Yin committed
322
323


324
def _get_call_generate(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
325
326
327
328
329
330
    if args.backend == "lightllm":
        return partial(call_generate_lightllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "vllm":
        return partial(call_generate_vllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "srt-raw":
        return partial(call_generate_srt_raw, url=f"{args.host}:{args.port}/generate")
331
332
    elif args.backend == "gserver":
        return partial(call_generate_gserver, url=f"{args.host}:{args.port}")
Liangsheng Yin's avatar
Liangsheng Yin committed
333
334
335
336
337
338
339
340
341
342
343
344
345
    elif args.backend == "outlines":
        return partial(call_generate_outlines, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "guidance":
        from guidance import models

        model = models.LlamaCpp(args.model_path, n_gpu_layers=-1, n_ctx=args.n_ctx)
        call_generate = partial(call_generate_guidance, model=model)
        call_generate("Hello,", 1.0, 8, ".")
        return call_generate
    else:
        raise ValueError(f"Invalid backend: {args.backend}")


346
def _get_call_select(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
    if args.backend == "lightllm":
        return partial(call_select_lightllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "vllm":
        return partial(call_select_vllm, url=f"{args.host}:{args.port}/generate")
    elif args.backend == "guidance":
        from guidance import models

        model = models.LlamaCpp(args.model_path, n_gpu_layers=-1, n_ctx=args.n_ctx)
        call_select = partial(call_select_guidance, model=model)

        call_select("Hello,", ["world", "earth"])
        return call_select
    else:
        raise ValueError(f"Invalid backend: {args.backend}")


363
def get_call_generate(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
364
365
366
367
368
369
370
371
372
373
374
375
    call_generate = _get_call_generate(args)

    def func(*args, **kwargs):
        try:
            return call_generate(*args, **kwargs)
        except Exception:
            print("Exception in call_generate:\n" + get_exception_traceback())
            raise

    return func


376
def get_call_select(args: argparse.Namespace):
Liangsheng Yin's avatar
Liangsheng Yin committed
377
378
379
380
381
382
383
384
385
386
    call_select = _get_call_select(args)

    def func(*args, **kwargs):
        try:
            return call_select(*args, **kwargs)
        except Exception:
            print("Exception in call_select:\n" + get_exception_traceback())
            raise

    return func
387
388


389
def popen_launch_server(
390
391
392
393
    model: str,
    base_url: str,
    timeout: float,
    api_key: Optional[str] = None,
Mick's avatar
Mick committed
394
    other_args: list[str] = (),
395
    env: Optional[dict] = None,
396
    return_stdout_stderr: Optional[tuple] = None,
397
    pd_seperated: bool = False,
398
399
400
401
):
    _, host, port = base_url.split(":")
    host = host[2:]

402
403
404
405
406
    if pd_seperated:
        command = "sglang.launch_pd_server"
    else:
        command = "sglang.launch_server"

407
408
409
    command = [
        "python3",
        "-m",
410
        command,
411
412
        "--model-path",
        model,
413
        *[str(x) for x in other_args],
414
    ]
Chayenne's avatar
Chayenne committed
415

416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
    if pd_seperated:
        command.extend(
            [
                "--lb-host",
                host,
                "--lb-port",
                port,
            ]
        )
    else:
        command.extend(
            [
                "--host",
                host,
                "--port",
                port,
            ]
        )

435
436
437
    if api_key:
        command += ["--api-key", api_key]

438
439
    print(f"command={' '.join(command)}")

440
441
442
    if return_stdout_stderr:
        process = subprocess.Popen(
            command,
443
444
            stdout=return_stdout_stderr[0],
            stderr=return_stdout_stderr[1],
445
446
447
448
449
            env=env,
            text=True,
        )
    else:
        process = subprocess.Popen(command, stdout=None, stderr=None, env=env)
450
451

    start_time = time.time()
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
    with requests.Session() as session:
        while time.time() - start_time < timeout:
            try:
                headers = {
                    "Content-Type": "application/json; charset=utf-8",
                    "Authorization": f"Bearer {api_key}",
                }
                response = session.get(
                    f"{base_url}/health_generate",
                    headers=headers,
                )
                if response.status_code == 200:
                    return process
            except requests.RequestException:
                pass
467
468
469

            return_code = process.poll()
            if return_code is not None:
fzyzcjy's avatar
fzyzcjy committed
470
471
472
                raise Exception(
                    f"Server unexpectedly exits ({return_code=}). Usually there will be error logs describing the cause far above this line."
                )
473

474
            time.sleep(10)
475
476

    kill_process_tree(process.pid)
477
    raise TimeoutError("Server failed to start within the timeout period.")
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503


def run_with_timeout(
    func: Callable,
    args: tuple = (),
    kwargs: Optional[dict] = None,
    timeout: float = None,
):
    """Run a function with timeout."""
    ret_value = []

    def _target_func():
        ret_value.append(func(*args, **(kwargs or {})))

    t = threading.Thread(target=_target_func)
    t.start()
    t.join(timeout=timeout)
    if t.is_alive():
        raise TimeoutError()

    if not ret_value:
        raise RuntimeError()

    return ret_value[0]


Byron Hsu's avatar
Byron Hsu committed
504
505
506
507
508
509
510
@dataclass
class TestFile:
    name: str
    estimated_time: float = 60


def run_unittest_files(files: List[TestFile], timeout_per_file: float):
511
512
513
    tic = time.time()
    success = True

Lianmin Zheng's avatar
Lianmin Zheng committed
514
    for i, file in enumerate(files):
Lianmin Zheng's avatar
Lianmin Zheng committed
515
        filename, estimated_time = file.name, file.estimated_time
516
        process = None
517

Mingyi's avatar
Mingyi committed
518
        def run_one_file(filename):
519
520
            nonlocal process

Mingyi's avatar
Mingyi committed
521
            filename = os.path.join(os.getcwd(), filename)
Lianmin Zheng's avatar
Lianmin Zheng committed
522
            print(
Lianmin Zheng's avatar
Lianmin Zheng committed
523
                f".\n.\nBegin ({i}/{len(files) - 1}):\npython3 {filename}\n.\n.\n",
Lianmin Zheng's avatar
Lianmin Zheng committed
524
525
                flush=True,
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
526
527
            tic = time.time()

Mingyi's avatar
Mingyi committed
528
529
530
531
            process = subprocess.Popen(
                ["python3", filename], stdout=None, stderr=None, env=os.environ
            )
            process.wait()
Lianmin Zheng's avatar
Lianmin Zheng committed
532
533
534
            elapsed = time.time() - tic

            print(
Lianmin Zheng's avatar
Lianmin Zheng committed
535
                f".\n.\nEnd ({i}/{len(files) - 1}):\n{filename=}, {elapsed=:.0f}, {estimated_time=}\n.\n.\n",
Lianmin Zheng's avatar
Lianmin Zheng committed
536
537
                flush=True,
            )
Mingyi's avatar
Mingyi committed
538
            return process.returncode
539
540

        try:
Mingyi's avatar
Mingyi committed
541
542
543
            ret_code = run_with_timeout(
                run_one_file, args=(filename,), timeout=timeout_per_file
            )
544
545
546
            assert (
                ret_code == 0
            ), f"expected return code 0, but {filename} returned {ret_code}"
547
        except TimeoutError:
548
            kill_process_tree(process.pid)
549
550
            time.sleep(5)
            print(
551
552
                f"\nTimeout after {timeout_per_file} seconds when running {filename}\n",
                flush=True,
553
            )
Mingyi's avatar
Mingyi committed
554
555
            success = False
            break
556
557

    if success:
558
        print(f"Success. Time elapsed: {time.time() - tic:.2f}s", flush=True)
559
    else:
560
        print(f"Fail. Time elapsed: {time.time() - tic:.2f}s", flush=True)
561
562

    return 0 if success else -1
563
564
565
566


def get_similarities(vec1, vec2):
    return F.cosine_similarity(torch.tensor(vec1), torch.tensor(vec2), dim=0)
567
568


569
570
571
572
573
574
def get_benchmark_args(
    base_url="",
    dataset_name="",
    dataset_path="",
    tokenizer="",
    num_prompts=500,
575
    sharegpt_output_len=None,
576
577
    random_input_len=4096,
    random_output_len=2048,
578
    sharegpt_context_len=None,
579
580
581
    request_rate=float("inf"),
    disable_stream=False,
    disable_ignore_eos=False,
582
    seed: int = 0,
583
    pd_seperated: bool = False,
584
585
586
587
588
589
590
591
592
593
594
):
    return SimpleNamespace(
        backend="sglang",
        base_url=base_url,
        host=None,
        port=None,
        dataset_name=dataset_name,
        dataset_path=dataset_path,
        model=None,
        tokenizer=tokenizer,
        num_prompts=num_prompts,
595
596
        sharegpt_output_len=sharegpt_output_len,
        sharegpt_context_len=sharegpt_context_len,
597
598
599
600
601
602
603
604
605
        random_input_len=random_input_len,
        random_output_len=random_output_len,
        random_range_ratio=0.0,
        request_rate=request_rate,
        multi=None,
        output_file=None,
        disable_tqdm=False,
        disable_stream=disable_stream,
        return_logprob=False,
606
        seed=seed,
607
608
609
610
611
        disable_ignore_eos=disable_ignore_eos,
        extra_request_body=None,
        apply_chat_template=False,
        profile=None,
        lora_name=None,
612
613
        prompt_suffix="",
        pd_seperated=pd_seperated,
614
615
616
    )


617
618
619
620
621
622
def run_bench_serving(
    model,
    num_prompts,
    request_rate,
    other_server_args,
    dataset_name="random",
623
624
    dataset_path="",
    tokenizer=None,
625
626
    random_input_len=4096,
    random_output_len=2048,
627
    sharegpt_context_len=None,
628
    disable_stream=False,
629
    disable_ignore_eos=False,
630
    need_warmup=False,
631
    seed: int = 0,
632
):
633
634
635
636
637
638
639
640
641
642
    # Launch the server
    base_url = DEFAULT_URL_FOR_TEST
    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_server_args,
    )

    # Run benchmark
643
    args = get_benchmark_args(
644
        base_url=base_url,
645
        dataset_name=dataset_name,
646
647
        dataset_path=dataset_path,
        tokenizer=tokenizer,
648
        num_prompts=num_prompts,
649
650
        random_input_len=random_input_len,
        random_output_len=random_output_len,
651
        sharegpt_context_len=sharegpt_context_len,
652
        request_rate=request_rate,
653
        disable_stream=disable_stream,
654
        disable_ignore_eos=disable_ignore_eos,
655
        seed=seed,
656
657
658
    )

    try:
659
660
661
662
        if need_warmup:
            warmup_args = copy.deepcopy(args)
            warmup_args.num_prompts = 16
            run_benchmark(warmup_args)
663
664
        res = run_benchmark(args)
    finally:
665
        kill_process_tree(process.pid)
666
667
668

    assert res["completed"] == num_prompts
    return res
669
670


671
672
673
674
675
676
def run_bench_serving_multi(
    model,
    base_url,
    other_server_args,
    benchmark_args,
    need_warmup=False,
677
    pd_seperated=False,
678
679
680
681
682
683
684
):
    # Launch the server
    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_server_args,
685
        pd_seperated=pd_seperated,
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
    )

    # run benchmark for all
    res_l = []
    try:
        for args in benchmark_args:
            if need_warmup:
                warmup_args = copy.deepcopy(args)
                warmup_args.num_prompts = 16
                run_benchmark(warmup_args)

            res = run_benchmark(args)
            res_l.append((args, res))
    finally:
        kill_process_tree(process.pid)

    return res_l


705
def run_bench_one_batch(model, other_args):
706
707
708
    command = [
        "python3",
        "-m",
709
        "sglang.bench_one_batch",
710
711
712
713
714
715
        "--batch-size",
        "1",
        "--input",
        "128",
        "--output",
        "8",
716
        *[str(x) for x in other_args],
717
    ]
saienduri's avatar
saienduri committed
718
719
    if model is not None:
        command += ["--model-path", model]
720
721
722
723
724
725
726
727
728
729
730
731
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    try:
        stdout, stderr = process.communicate()
        output = stdout.decode()
        error = stderr.decode()
        print(f"Output: {output}", flush=True)
        print(f"Error: {error}", flush=True)

        lastline = output.split("\n")[-3]
        output_throughput = float(lastline.split(" ")[-2])
    finally:
732
        kill_process_tree(process.pid)
733
734

    return output_throughput
735
736


737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
def run_bench_offline_throughput(model, other_args):
    command = [
        "python3",
        "-m",
        "sglang.bench_offline_throughput",
        "--num-prompts",
        "1",
        "--dataset-name",
        "random",
        "--random-input-len",
        "256",
        "--random-output-len",
        "256",
        "--model-path",
        model,
        *[str(x) for x in other_args],
    ]

    print(f"{command=}")
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    try:
        stdout, stderr = process.communicate()
        output = stdout.decode()
        error = stderr.decode()
        print(f"Output: {output}", flush=True)
        print(f"Error: {error}", flush=True)

        output_throughput = -1
        for line in output.split("\n"):
            if "Last generation throughput (tok/s):" in line:
                output_throughput = float(line.split(":")[-1])
    finally:
        kill_process_tree(process.pid)

    return output_throughput


775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
def run_bench_one_batch_server(
    model,
    base_url,
    server_args,
    bench_args,
    other_server_args,
    simulate_spec_acc_lens=None,
):
    from sglang.bench_one_batch_server import run_benchmark

    if simulate_spec_acc_lens is not None:
        env = {**os.environ, "SIMULATE_ACC_LEN": str(simulate_spec_acc_lens)}
    else:
        env = None

    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_server_args,
        env=env,
    )
    try:
        run_benchmark(server_args=server_args, bench_args=bench_args)
    finally:
        kill_process_tree(process.pid)


803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
def lcs(X, Y):
    m = len(X)
    n = len(Y)
    L = [[0] * (n + 1) for _ in range(m + 1)]

    for i in range(m + 1):
        for j in range(n + 1):
            if i == 0 or j == 0:
                L[i][j] = 0
            elif X[i - 1] == Y[j - 1]:
                L[i][j] = L[i - 1][j - 1] + 1
            else:
                L[i][j] = max(L[i - 1][j], L[i][j - 1])

    return L[m][n]


def calculate_rouge_l(output_strs_list1, output_strs_list2):
    """calculate the ROUGE-L score"""
    rouge_l_scores = []

    for s1, s2 in zip(output_strs_list1, output_strs_list2):
        lcs_len = lcs(s1, s2)
        precision = lcs_len / len(s1) if len(s1) > 0 else 0
        recall = lcs_len / len(s2) if len(s2) > 0 else 0
        if precision + recall > 0:
            fmeasure = (2 * precision * recall) / (precision + recall)
        else:
            fmeasure = 0.0
        rouge_l_scores.append(fmeasure)

    return rouge_l_scores
835
836
837


STDERR_FILENAME = "stderr.txt"
838
STDOUT_FILENAME = "stdout.txt"
839
840


841
def read_output(output_lines: List[str], filename: str = STDERR_FILENAME):
842
    """Print the output in real time with another thread."""
843
    while not os.path.exists(filename):
844
845
        time.sleep(1)

846
847
    pt = 0
    while pt >= 0:
848
        if pt > 0 and not os.path.exists(filename):
849
            break
850
        lines = open(filename).readlines()
851
852
        for line in lines[pt:]:
            print(line, end="", flush=True)
853
            output_lines.append(line)
854
            pt += 1
855
        time.sleep(0.1)
856
857


858
859
def run_and_check_memory_leak(
    workload_func,
860
    disable_radix_cache,
861
    enable_mixed_chunk,
862
    disable_overlap,
863
    chunked_prefill_size,
864
    assert_has_abort,
865
):
866
867
868
869
870
871
    other_args = [
        "--chunked-prefill-size",
        str(chunked_prefill_size),
        "--log-level",
        "debug",
    ]
872
873
874
875
    if disable_radix_cache:
        other_args += ["--disable-radix-cache"]
    if enable_mixed_chunk:
        other_args += ["--enable-mixed-chunk"]
876
877
    if disable_overlap:
        other_args += ["--disable-overlap-schedule"]
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898

    model = DEFAULT_MODEL_NAME_FOR_TEST
    port = random.randint(4000, 5000)
    base_url = f"http://127.0.0.1:{port}"

    # Create files and launch the server
    stdout = open(STDOUT_FILENAME, "w")
    stderr = open(STDERR_FILENAME, "w")
    process = popen_launch_server(
        model,
        base_url,
        timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
        other_args=other_args,
        return_stdout_stderr=(stdout, stderr),
    )

    # Launch a thread to stream the output
    output_lines = []
    t = threading.Thread(target=read_output, args=(output_lines,))
    t.start()

899
900
    # Run the workload
    workload_func(base_url, model)
901
902

    # Clean up everything
903
    kill_process_tree(process.pid)
904
905
    stdout.close()
    stderr.close()
906
907
908
909
    if os.path.exists(STDOUT_FILENAME):
        os.remove(STDOUT_FILENAME)
    if os.path.exists(STDERR_FILENAME):
        os.remove(STDERR_FILENAME)
Lianmin Zheng's avatar
Lianmin Zheng committed
910
    kill_process_tree(process.pid)
911
912
913
914
915
    t.join()

    # Assert success
    has_new_server = False
    has_leak = False
916
    has_abort = False
917
    for line in output_lines:
Lianmin Zheng's avatar
Lianmin Zheng committed
918
        if "Uvicorn running" in line:
919
920
921
            has_new_server = True
        if "leak" in line:
            has_leak = True
922
923
        if "Abort" in line:
            has_abort = True
924
925

    assert has_new_server
926
    assert not has_leak
927
928
    if assert_has_abort:
        assert has_abort
929
930


931
932
933
934
def run_command_and_capture_output(command, env: Optional[dict] = None):
    stdout = open(STDOUT_FILENAME, "w")
    stderr = open(STDERR_FILENAME, "w")
    process = subprocess.Popen(
935
        command, stdout=stdout, stderr=stdout, env=env, text=True
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
    )

    # Launch a thread to stream the output
    output_lines = []
    t = threading.Thread(target=read_output, args=(output_lines, STDOUT_FILENAME))
    t.start()

    # Join the process
    process.wait()

    stdout.close()
    stderr.close()
    if os.path.exists(STDOUT_FILENAME):
        os.remove(STDOUT_FILENAME)
    if os.path.exists(STDERR_FILENAME):
        os.remove(STDERR_FILENAME)
    kill_process_tree(process.pid)
    t.join()

    return output_lines


958
959
960
def run_mmlu_test(
    disable_radix_cache=False,
    enable_mixed_chunk=False,
961
    disable_overlap=False,
962
963
964
965
966
967
968
969
970
971
972
973
974
975
    chunked_prefill_size=32,
):
    def workload_func(base_url, model):
        # Run the eval
        args = SimpleNamespace(
            base_url=base_url,
            model=model,
            eval_name="mmlu",
            num_examples=128,
            num_threads=128,
        )

        try:
            metrics = run_eval(args)
Lianmin Zheng's avatar
Lianmin Zheng committed
976
            assert metrics["score"] >= 0.65, f"{metrics=}"
977
978
979
        finally:
            pass

Chayenne's avatar
Chayenne committed
980
981
982
983
    run_and_check_memory_leak(
        workload_func,
        disable_radix_cache,
        enable_mixed_chunk,
984
        disable_overlap,
Chayenne's avatar
Chayenne committed
985
        chunked_prefill_size,
986
        assert_has_abort=False,
Chayenne's avatar
Chayenne committed
987
    )
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018


def run_mulit_request_test(
    disable_radix_cache=False,
    enable_mixed_chunk=False,
    enable_overlap=False,
    chunked_prefill_size=32,
):
    def workload_func(base_url, model):
        def run_one(_):
            prompt = """
            System: You are a helpful assistant.
            User: What is the capital of France?
            Assistant: The capital of France is
            """

            response = requests.post(
                f"{base_url}/generate",
                json={
                    "text": prompt,
                    "sampling_params": {
                        "temperature": 0,
                        "max_new_tokens": 8,
                    },
                },
            )
            ret = response.json()

        with ThreadPoolExecutor(2) as executor:
            list(executor.map(run_one, list(range(4))))

Chayenne's avatar
Chayenne committed
1019
1020
1021
1022
1023
1024
    run_and_check_memory_leak(
        workload_func,
        disable_radix_cache,
        enable_mixed_chunk,
        enable_overlap,
        chunked_prefill_size,
1025
        assert_has_abort=False,
Chayenne's avatar
Chayenne committed
1026
    )
1027
1028
1029


def write_github_step_summary(content):
1030
1031
1032
1033
    if not os.environ.get("GITHUB_STEP_SUMMARY"):
        logging.warning("GITHUB_STEP_SUMMARY environment variable not set")
        return

1034
1035
    with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
        f.write(content)
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110


def run_logprob_check(self: unittest.TestCase, arg: Tuple):
    (
        input_len,
        output_len,
        temperature,
        logprob_start_len,
        return_logprob,
        top_logprobs_num,
    ) = arg
    input_ids = list(range(input_len))

    response = requests.post(
        self.base_url + "/generate",
        json={
            "input_ids": input_ids,
            "sampling_params": {
                "temperature": temperature,
                "max_new_tokens": output_len,
                "ignore_eos": True,
            },
            "return_logprob": return_logprob,
            "logprob_start_len": logprob_start_len,
            "top_logprobs_num": top_logprobs_num,
        },
    )
    response_json = response.json()

    res = response_json
    self.assertEqual(res["meta_info"]["prompt_tokens"], input_len)
    self.assertEqual(res["meta_info"]["completion_tokens"], output_len)

    # Test the number of tokens are correct
    if return_logprob:
        self.assertEqual(
            len(res["meta_info"]["input_token_logprobs"]) + logprob_start_len,
            res["meta_info"]["prompt_tokens"],
        )
        self.assertEqual(len(res["meta_info"]["output_token_logprobs"]), output_len)

        if top_logprobs_num:
            self.assertEqual(
                len(res["meta_info"]["input_top_logprobs"]) + logprob_start_len,
                res["meta_info"]["prompt_tokens"],
            )
            self.assertEqual(len(res["meta_info"]["output_top_logprobs"]), output_len)

            for i in range(output_len):
                self.assertEqual(
                    len(res["meta_info"]["output_top_logprobs"][i]),
                    top_logprobs_num,
                )

                # Test the top-1 tokens are the same as output tokens if temperature == 0
                if temperature == 0:
                    rank = 0
                    while rank < len(res["meta_info"]["output_top_logprobs"][i]):
                        try:
                            self.assertListEqual(
                                res["meta_info"]["output_token_logprobs"][i],
                                res["meta_info"]["output_top_logprobs"][i][rank],
                            )
                            break
                        except AssertionError:
                            # There's a tie. Allow the second item in this case.
                            if (
                                res["meta_info"]["output_top_logprobs"][i][rank][0]
                                == res["meta_info"]["output_top_logprobs"][i][rank + 1][
                                    0
                                ]
                            ):
                                rank += 1
                            else:
                                raise
1111
1112
1113
1114


class CustomTestCase(unittest.TestCase):
    def _callTestMethod(self, method):
1115
        max_retry = int(
Yineng Zhang's avatar
Yineng Zhang committed
1116
            os.environ.get("SGLANG_TEST_MAX_RETRY", "1" if is_in_ci() else "0")
1117
        )
1118
1119
1120
        retry(
            lambda: super(CustomTestCase, self)._callTestMethod(method),
            max_retry=max_retry,
1121
        )