bench_serving.py 64.1 KB
Newer Older
zhyncs's avatar
zhyncs committed
1
2
# Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/backend_request_func.py
# Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/benchmark_serving.py
3

Ying Sheng's avatar
Ying Sheng committed
4
"""
5
Benchmark online serving with dynamic requests.
Ying Sheng's avatar
Ying Sheng committed
6
7

Usage:
8
python3 -m sglang.bench_serving --backend sglang --num-prompt 10
Ying Sheng's avatar
Ying Sheng committed
9

10
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompts 3000 --random-input 1024 --random-output 1024 --random-range-ratio 0.5
Ying Sheng's avatar
Ying Sheng committed
11
"""
zhyncs's avatar
zhyncs committed
12
13
14
15
16

import argparse
import asyncio
import json
import os
17
import pickle
zhyncs's avatar
zhyncs committed
18
19
20
21
22
23
import random
import resource
import sys
import time
import traceback
import warnings
24
from argparse import ArgumentParser
zhyncs's avatar
zhyncs committed
25
from dataclasses import dataclass, field
26
from datetime import datetime
27
from json import JSONDecodeError
28
from pathlib import Path
29
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
zhyncs's avatar
zhyncs committed
30
31
32
33
34
35
36
37
38
39
40
41
42

import aiohttp
import numpy as np
import requests
from tqdm.asyncio import tqdm
from transformers import (
    AutoTokenizer,
    PreTrainedTokenizer,
    PreTrainedTokenizerBase,
    PreTrainedTokenizerFast,
)

AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60)
43
ASSISTANT_SUFFIX = "Assistant:"
zhyncs's avatar
zhyncs committed
44

45
46
global args

zhyncs's avatar
zhyncs committed
47

Yineng Zhang's avatar
Yineng Zhang committed
48
49
50
51
52
53
# don't want to import sglang package here
def _get_bool_env_var(name: str, default: str = "false") -> bool:
    value = os.getenv(name, default)
    return value.lower() in ("true", "1")


zhyncs's avatar
zhyncs committed
54
55
56
57
58
59
60
@dataclass
class RequestFuncInput:
    prompt: str
    api_url: str
    prompt_len: int
    output_len: int
    model: str
61
    lora_name: str
62
    image_data: str
63
    extra_request_body: Dict[str, Any]
zhyncs's avatar
zhyncs committed
64
65
66
67
68
69
70
71
72
73
74


@dataclass
class RequestFuncOutput:
    generated_text: str = ""
    success: bool = False
    latency: float = 0.0
    ttft: float = 0.0  # Time to first token
    itl: List[float] = field(default_factory=list)  # List of inter-token latencies
    prompt_len: int = 0
    error: str = ""
75
    output_len: int = 0
zhyncs's avatar
zhyncs committed
76

77
78
79
80
81
82
    @staticmethod
    def init_new(request_func_input: RequestFuncInput):
        output = RequestFuncOutput()
        output.prompt_len = request_func_input.prompt_len
        return output

zhyncs's avatar
zhyncs committed
83
84
85
86
87

def remove_prefix(text: str, prefix: str) -> str:
    return text[len(prefix) :] if text.startswith(prefix) else text


88
89
90
91
def remove_suffix(text: str, suffix: str) -> str:
    return text[: -len(suffix)] if text.endswith(suffix) else text


92
93
94
95
96
97
98
99
def get_auth_headers() -> Dict[str, str]:
    api_key = os.environ.get("OPENAI_API_KEY")
    if api_key:
        return {"Authorization": f"Bearer {api_key}"}
    else:
        return {}


100
# trt llm does not support ignore_eos
101
102
103
104
105
106
107
108
109
110
111
112
# https://github.com/triton-inference-server/tensorrtllm_backend/issues/505
async def async_request_trt_llm(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    assert api_url.endswith("generate_stream")

    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "accumulate_tokens": True,
            "text_input": request_func_input.prompt,
zhyncs's avatar
zhyncs committed
113
            "temperature": 0.000001,
114
115
116
            "top_p": 1.0,
            "max_tokens": request_func_input.output_len,
            "stream": True,
Ying Sheng's avatar
Ying Sheng committed
117
118
            "min_length": request_func_input.output_len,
            "end_id": 1048576,
119
            **request_func_input.extra_request_body,
120
        }
121
122
123
        if args.disable_ignore_eos:
            del payload["min_length"]
            del payload["end_id"]
124
        output = RequestFuncOutput.init_new(request_func_input)
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143

        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(url=api_url, json=payload) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data:")

                        data = json.loads(chunk)
                        output.generated_text += data["text_output"]
                        timestamp = time.perf_counter()
                        # First token
                        if ttft == 0.0:
Xu Song's avatar
Xu Song committed
144
                            ttft = timestamp - st
145
146
147
148
149
150
151
152
153
154
                            output.ttft = ttft

                        # Decoding phase
                        else:
                            output.itl.append(timestamp - most_recent_timestamp)

                        most_recent_timestamp = timestamp

                    output.latency = most_recent_timestamp - st
                    output.success = True
Ying Sheng's avatar
Ying Sheng committed
155
                    output.output_len = request_func_input.output_len
156
157
158
159
160
161
162
163
164
165
166
167
168
169

                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

        if pbar:
            pbar.update(1)
        return output


zhyncs's avatar
zhyncs committed
170
171
172
173
174
175
176
177
178
179
# set ignore_eos True by default
async def async_request_openai_completions(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    assert api_url.endswith(
        "completions"
    ), "OpenAI Completions API URL must end with 'completions'."

Lianmin Zheng's avatar
Lianmin Zheng committed
180
181
    prompt = request_func_input.prompt

zhyncs's avatar
zhyncs committed
182
183
184
    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "model": request_func_input.model,
Lianmin Zheng's avatar
Lianmin Zheng committed
185
            "prompt": prompt,
zhyncs's avatar
zhyncs committed
186
187
188
            "temperature": 0.0,
            "best_of": 1,
            "max_tokens": request_func_input.output_len,
189
            "stream": not args.disable_stream,
190
            "ignore_eos": not args.disable_ignore_eos,
191
            **request_func_input.extra_request_body,
zhyncs's avatar
zhyncs committed
192
        }
193
        headers = get_auth_headers()
zhyncs's avatar
zhyncs committed
194

195
        output = RequestFuncOutput.init_new(request_func_input)
zhyncs's avatar
zhyncs committed
196
197

        generated_text = ""
198
        output_len = request_func_input.output_len
zhyncs's avatar
zhyncs committed
199
200
201
202
203
204
205
206
207
208
209
210
211
212
        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(
                url=api_url, json=payload, headers=headers
            ) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data: ")
213
                        latency = time.perf_counter() - st
zhyncs's avatar
zhyncs committed
214
                        if chunk == "[DONE]":
215
                            pass
zhyncs's avatar
zhyncs committed
216
217
218
219
220
221
222
223
224
225
226
227
228
229
                        else:
                            data = json.loads(chunk)

                            # NOTE: Some completion API might have a last
                            # usage summary response without a token so we
                            # want to check a token was generated
                            if data["choices"][0]["text"]:
                                timestamp = time.perf_counter()
                                # First token
                                if ttft == 0.0:
                                    ttft = time.perf_counter() - st
                                    output.ttft = ttft

                                # Decoding phase
230
231
                                else:
                                    output.itl.append(timestamp - most_recent_timestamp)
zhyncs's avatar
zhyncs committed
232
233
234

                                most_recent_timestamp = timestamp
                                generated_text += data["choices"][0]["text"]
Lzhang-hub's avatar
Lzhang-hub committed
235
                                output_len = (data.get("usage") or {}).get(
236
237
                                    "completion_tokens", output_len
                                )
zhyncs's avatar
zhyncs committed
238
239
240
241

                    output.generated_text = generated_text
                    output.success = True
                    output.latency = latency
242
                    output.output_len = output_len
zhyncs's avatar
zhyncs committed
243
244
245
246
247
248
249
250
251
252
253
254
255
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    if pbar:
        pbar.update(1)
    return output


256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
async def async_request_truss(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url

    prompt = request_func_input.prompt

    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "model": request_func_input.model,
            "prompt": prompt,
            "temperature": 0.0,
            "best_of": 1,
            "max_tokens": request_func_input.output_len,
            "stream": not args.disable_stream,
            "ignore_eos": not args.disable_ignore_eos,
            **request_func_input.extra_request_body,
        }
275
        headers = get_auth_headers()
276

277
        output = RequestFuncOutput.init_new(request_func_input)
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302

        generated_text = ""
        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(
                url=api_url, json=payload, headers=headers
            ) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data: ")
                        latency = time.perf_counter() - st
                        if chunk == "[DONE]":
                            pass
                        else:
                            data = json.loads(chunk)

                            # NOTE: Some completion API might have a last
                            # usage summary response without a token so we
                            # want to check a token was generated
303
                            if data["choices"][0]["text"]:
304
305
306
307
308
309
310
311
312
313
314
                                timestamp = time.perf_counter()
                                # First token
                                if ttft == 0.0:
                                    ttft = time.perf_counter() - st
                                    output.ttft = ttft

                                # Decoding phase
                                else:
                                    output.itl.append(timestamp - most_recent_timestamp)

                                most_recent_timestamp = timestamp
315
                                generated_text += data["choices"][0]["text"]
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

                    output.generated_text = generated_text
                    output.success = True
                    output.latency = latency
                    output.output_len = request_func_input.output_len
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    if pbar:
        pbar.update(1)
    return output


334
335
336
337
338
339
340
341
342
async def async_request_sglang_generate(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    prompt = request_func_input.prompt

    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
343
            ("text" if isinstance(prompt, str) else "input_ids"): prompt,
344
345
346
347
348
349
            "sampling_params": {
                "temperature": 0.0,
                "max_new_tokens": request_func_input.output_len,
                "ignore_eos": not args.disable_ignore_eos,
            },
            "stream": not args.disable_stream,
350
            "lora_path": request_func_input.lora_name,
351
352
            "return_logprob": args.return_logprob,
            "logprob_start_len": -1,
353
354
            **request_func_input.extra_request_body,
        }
355
356
357
358
359

        # Add image data if available
        if request_func_input.image_data:
            payload["image_data"] = request_func_input.image_data

360
        headers = get_auth_headers()
361

362
        output = RequestFuncOutput.init_new(request_func_input)
363
364

        generated_text = ""
365
        output_len = request_func_input.output_len
366
367
368
        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
369
        last_output_len = 0
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
        try:
            async with session.post(
                url=api_url, json=payload, headers=headers
            ) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue
                        # print(chunk_bytes)

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data: ")
                        latency = time.perf_counter() - st
                        if chunk == "[DONE]":
                            pass
                        else:
                            data = json.loads(chunk)

                            # NOTE: Some completion API might have a last
                            # usage summary response without a token so we
                            # want to check a token was generated
                            if data["text"]:
                                timestamp = time.perf_counter()
393
394
395
                                generated_text = data["text"]
                                output_len = data["meta_info"]["completion_tokens"]

396
397
398
399
400
401
402
                                # First token
                                if ttft == 0.0:
                                    ttft = time.perf_counter() - st
                                    output.ttft = ttft

                                # Decoding phase
                                else:
403
404
405
406
407
408
409
                                    num_new_tokens = output_len - last_output_len
                                    if num_new_tokens == 0:
                                        continue
                                    adjust_itl = (
                                        timestamp - most_recent_timestamp
                                    ) / num_new_tokens
                                    output.itl.extend([adjust_itl] * num_new_tokens)
410
411

                                most_recent_timestamp = timestamp
Lianmin Zheng's avatar
Lianmin Zheng committed
412
                                last_output_len = output_len
413
414
415
416

                    output.generated_text = generated_text
                    output.success = True
                    output.latency = latency
417
                    output.output_len = output_len
418
419
420
421
422
423
424
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))
425
            print(f"{output.error=}")
426
427
428
429
430
431

    if pbar:
        pbar.update(1)
    return output


432
async def async_request_gserver(
Lianmin Zheng's avatar
Lianmin Zheng committed
433
434
435
436
437
438
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    raise NotImplementedError()


439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
async def async_request_profile(api_url: str) -> RequestFuncOutput:
    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        output = RequestFuncOutput()
        try:
            async with session.post(url=api_url) as response:
                if response.status == 200:
                    output.success = True
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    return output


zhyncs's avatar
zhyncs committed
457
def get_model(pretrained_model_name_or_path: str) -> str:
458
    if os.getenv("SGLANG_USE_MODELSCOPE", "false").lower() == "true":
zhyncs's avatar
zhyncs committed
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
        import huggingface_hub.constants
        from modelscope import snapshot_download

        model_path = snapshot_download(
            model_id=pretrained_model_name_or_path,
            local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
            ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"],
        )

        return model_path
    return pretrained_model_name_or_path


def get_tokenizer(
    pretrained_model_name_or_path: str,
) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
475
476
477
478
    assert (
        pretrained_model_name_or_path is not None
        and pretrained_model_name_or_path != ""
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
479
480
481
482
483
484
485
    if pretrained_model_name_or_path.endswith(
        ".json"
    ) or pretrained_model_name_or_path.endswith(".model"):
        from sglang.srt.hf_transformers_utils import get_tokenizer

        return get_tokenizer(pretrained_model_name_or_path)

zhyncs's avatar
zhyncs committed
486
487
488
489
490
491
492
493
494
    if pretrained_model_name_or_path is not None and not os.path.exists(
        pretrained_model_name_or_path
    ):
        pretrained_model_name_or_path = get_model(pretrained_model_name_or_path)
    return AutoTokenizer.from_pretrained(
        pretrained_model_name_or_path, trust_remote_code=True
    )


495
496
def get_dataset(args, tokenizer):
    if args.dataset_name == "sharegpt":
497
        assert not args.tokenize_prompt
498
499
500
501
502
        input_requests = sample_sharegpt_requests(
            dataset_path=args.dataset_path,
            num_requests=args.num_prompts,
            tokenizer=tokenizer,
            fixed_output_len=args.sharegpt_output_len,
503
            context_len=args.sharegpt_context_len,
504
            prompt_suffix=args.prompt_suffix,
505
            apply_chat_template=args.apply_chat_template,
506
        )
507
    elif args.dataset_name.startswith("random"):
508
509
510
511
512
513
514
        input_requests = sample_random_requests(
            input_len=args.random_input_len,
            output_len=args.random_output_len,
            num_prompts=args.num_prompts,
            range_ratio=args.random_range_ratio,
            tokenizer=tokenizer,
            dataset_path=args.dataset_path,
515
            random_sample=args.dataset_name == "random",
516
            return_text=not args.tokenize_prompt,
517
518
        )
    elif args.dataset_name == "generated-shared-prefix":
519
        assert not args.tokenize_prompt
520
        input_requests = sample_generated_shared_prefix_requests(
521
522
523
524
525
            num_groups=args.gsp_num_groups,
            prompts_per_group=args.gsp_prompts_per_group,
            system_prompt_len=args.gsp_system_prompt_len,
            question_len=args.gsp_question_len,
            output_len=args.gsp_output_len,
526
            tokenizer=tokenizer,
527
            args=args,
528
        )
529
    elif args.dataset_name == "mmmu":
530
        assert not args.tokenize_prompt
531
532
533
534
535
536
        input_requests = sample_mmmu_requests(
            num_requests=args.num_prompts,
            tokenizer=tokenizer,
            fixed_output_len=args.random_output_len,
            random_sample=True,
        )
537
538
539
540
541
    else:
        raise ValueError(f"Unknown dataset: {args.dataset_name}")
    return input_requests


zhyncs's avatar
zhyncs committed
542
ASYNC_REQUEST_FUNCS = {
543
544
545
    "sglang": async_request_sglang_generate,
    "sglang-native": async_request_sglang_generate,
    "sglang-oai": async_request_openai_completions,
zhyncs's avatar
zhyncs committed
546
547
    "vllm": async_request_openai_completions,
    "lmdeploy": async_request_openai_completions,
548
    "trt": async_request_trt_llm,
549
    "gserver": async_request_gserver,
550
    "truss": async_request_truss,
zhyncs's avatar
zhyncs committed
551
552
553
554
555
556
557
558
}


@dataclass
class BenchmarkMetrics:
    completed: int
    total_input: int
    total_output: int
Ying Sheng's avatar
Ying Sheng committed
559
    total_output_retokenized: int
zhyncs's avatar
zhyncs committed
560
561
562
    request_throughput: float
    input_throughput: float
    output_throughput: float
Ying Sheng's avatar
Ying Sheng committed
563
    output_throughput_retokenized: float
564
565
    total_throughput: float
    total_throughput_retokenized: float
zhyncs's avatar
zhyncs committed
566
567
568
569
570
571
572
573
574
575
576
    mean_ttft_ms: float
    median_ttft_ms: float
    std_ttft_ms: float
    p99_ttft_ms: float
    mean_tpot_ms: float
    median_tpot_ms: float
    std_tpot_ms: float
    p99_tpot_ms: float
    mean_itl_ms: float
    median_itl_ms: float
    std_itl_ms: float
577
    p95_itl_ms: float
zhyncs's avatar
zhyncs committed
578
    p99_itl_ms: float
579
    max_itl_ms: float
zhyncs's avatar
zhyncs committed
580
581
    mean_e2e_latency_ms: float
    median_e2e_latency_ms: float
582
583
    std_e2e_latency_ms: float
    p99_e2e_latency_ms: float
584
    concurrency: float
zhyncs's avatar
zhyncs committed
585
586


Lianmin Zheng's avatar
Lianmin Zheng committed
587
SHAREGPT_URL = "https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json"
Lianmin Zheng's avatar
Lianmin Zheng committed
588
589


Lianmin Zheng's avatar
Lianmin Zheng committed
590
591
592
593
def download_and_cache_file(url: str, filename: Optional[str] = None):
    """Read and cache a file from a url."""
    if filename is None:
        filename = os.path.join("/tmp", url.split("/")[-1])
Lianmin Zheng's avatar
Lianmin Zheng committed
594

Lianmin Zheng's avatar
Lianmin Zheng committed
595
    # Check if the cache file already exists
596
    if is_file_valid_json(filename):
Lianmin Zheng's avatar
Lianmin Zheng committed
597
        return filename
Lianmin Zheng's avatar
Lianmin Zheng committed
598

Lianmin Zheng's avatar
Lianmin Zheng committed
599
    print(f"Downloading from {url} to {filename}")
Lianmin Zheng's avatar
Lianmin Zheng committed
600

Lianmin Zheng's avatar
Lianmin Zheng committed
601
602
603
    # Stream the response to show the progress bar
    response = requests.get(url, stream=True)
    response.raise_for_status()  # Check for request errors
Lianmin Zheng's avatar
Lianmin Zheng committed
604

Lianmin Zheng's avatar
Lianmin Zheng committed
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
    # Total size of the file in bytes
    total_size = int(response.headers.get("content-length", 0))
    chunk_size = 1024  # Download in chunks of 1KB

    # Use tqdm to display the progress bar
    with open(filename, "wb") as f, tqdm(
        desc=filename,
        total=total_size,
        unit="B",
        unit_scale=True,
        unit_divisor=1024,
    ) as bar:
        for chunk in response.iter_content(chunk_size=chunk_size):
            f.write(chunk)
            bar.update(len(chunk))

    return filename
Lianmin Zheng's avatar
Lianmin Zheng committed
622
623


624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
def is_file_valid_json(path):
    if not os.path.isfile(path):
        return False

    # TODO can fuse into the real file open later
    try:
        with open(path) as f:
            json.load(f)
        return True
    except JSONDecodeError as e:
        print(
            f"{path} exists but json loading fails ({e=}), thus treat as invalid file"
        )
        return False


640
641
642
643
644
645
646
@dataclass
class DatasetRow:
    prompt: str
    prompt_len: int
    output_len: int


647
648
649
650
651
def sample_mmmu_requests(
    num_requests: int,
    tokenizer: PreTrainedTokenizerBase,
    fixed_output_len: Optional[int] = None,
    random_sample: bool = True,
652
) -> List[DatasetRow]:
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
    """
    Sample requests from the MMMU dataset using HuggingFace datasets.

    Args:
        num_requests: Number of requests to sample.
        tokenizer: Tokenizer to use for token counting.
        fixed_output_len: If provided, use this fixed output length for all requests.
        random_sample: Whether to randomly sample or take the first N.

    Returns:
        List of tuples (prompt, prompt_token_len, output_token_len).
    """
    try:
        import base64
        import io

        from datasets import load_dataset
    except ImportError:
        raise ImportError("Please install datasets: pip install datasets")

    print("Loading MMMU dataset from HuggingFace...")

    try:
        print("Attempting to load MMMU Math dataset...")
        mmmu_dataset = load_dataset("MMMU/MMMU", "Math", split="test")
        print(
            f"Successfully loaded MMMU Math dataset from HuggingFace with {len(mmmu_dataset)} examples"
        )
    except Exception as e:
        print(f"Failed to load MMMU Math dataset: {e}")
        raise ValueError(f"Failed to load MMMU dataset: {e}")

    # Sample from the dataset
    if len(mmmu_dataset) > num_requests:
        if random_sample:
            # Random sample
            indices = random.sample(range(len(mmmu_dataset)), num_requests)
            sample_dataset = mmmu_dataset.select(indices)
        else:
            # Take first N
            sample_dataset = mmmu_dataset.select(
                range(min(num_requests, len(mmmu_dataset)))
            )
    else:
        print(f"Dataset has less than {num_requests} examples, using all examples")
        sample_dataset = mmmu_dataset

    print(f"Selected {len(sample_dataset)} examples for benchmarking")

    # Create prompts
    filtered_dataset = []

    for i, example in enumerate(sample_dataset):
        try:
            # Extract image_1
            image = example.get("image_1")

            if image is not None:
                if hasattr(image, "save"):
                    # Convert RGBA images to RGB before encoding
                    if image.mode == "RGBA":
                        image = image.convert("RGB")

                    # Encode image to base64
                    buffered = io.BytesIO()
                    image.save(buffered, format="JPEG")
                    img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
                    image_path = f"data:image/jpeg;base64,{img_str}"
                else:
                    continue

                # Extract the question
                question = example.get("question")

                # Create the prompt with image, question
                prompt = f"Question: {question}\n\nAnswer: "
                prompt = tokenizer.apply_chat_template(
                    [
                        {
                            "role": "user",
                            "content": [
                                {"type": "image_url", "image_url": {"url": image_path}},
                                {"type": "text", "text": prompt},
                            ],
                        }
                    ],
                    add_generation_prompt=True,
                    tokenize=False,
                )
                prompt = f"{prompt}"

                # Calculate token lengths
                # Note: This is approximate since we're not rendering the actual image tokens
                prompt_token_ids = tokenizer.encode(prompt)
                prompt_len = (
                    len(prompt_token_ids) + 512
                )  # Add estimate for image tokens

                output_len = fixed_output_len if fixed_output_len is not None else 256

753
754
755
756
757
                filtered_dataset.append(
                    DatasetRow(
                        prompt=prompt, prompt_len=prompt_len, output_len=output_len
                    )
                )
758
759
760
761
762
763
764
765

        except Exception as e:
            print(f"Error processing example {i}: {e}")

    print(f"\nCreated {len(filtered_dataset)} MMMU prompts")
    return filtered_dataset


zhyncs's avatar
zhyncs committed
766
767
768
769
770
def sample_sharegpt_requests(
    dataset_path: str,
    num_requests: int,
    tokenizer: PreTrainedTokenizerBase,
    fixed_output_len: Optional[int] = None,
771
    context_len: Optional[int] = None,
772
    prompt_suffix: Optional[str] = "",
773
    apply_chat_template=False,
774
) -> List[DatasetRow]:
zhyncs's avatar
zhyncs committed
775
776
777
    if fixed_output_len is not None and fixed_output_len < 4:
        raise ValueError("output_len too small")

Lianmin Zheng's avatar
Lianmin Zheng committed
778
    # Download sharegpt if necessary
779
    if not is_file_valid_json(dataset_path) and dataset_path == "":
Lianmin Zheng's avatar
Lianmin Zheng committed
780
        dataset_path = download_and_cache_file(SHAREGPT_URL)
zhyncs's avatar
zhyncs committed
781
782
783
784

    # Load the dataset.
    with open(dataset_path) as f:
        dataset = json.load(f)
785

zhyncs's avatar
zhyncs committed
786
    # Filter out the conversations with less than 2 turns.
787
788
789
790
791
    dataset = [
        data
        for data in dataset
        if len(data.get("conversations", data.get("conversation", []))) >= 2
    ]
zhyncs's avatar
zhyncs committed
792
793
    # Only keep the first two turns of each conversation.
    dataset = [
794
795
796
797
        (
            data.get("conversations", data.get("conversation", []))[0]["value"],
            data.get("conversations", data.get("conversation", []))[1]["value"],
        )
zhyncs's avatar
zhyncs committed
798
799
800
801
802
803
804
        for data in dataset
    ]

    # Shuffle the dataset.
    random.shuffle(dataset)

    # Filter out sequences that are too long or too short
805
    filtered_dataset: List[DatasetRow] = []
zhyncs's avatar
zhyncs committed
806
807
808
809
810
811
    for i in range(len(dataset)):
        if len(filtered_dataset) == num_requests:
            break

        # Tokenize the prompts and completions.
        prompt = dataset[i][0]
812
        if prompt_suffix:
813
814
815
816
817
            prompt = (
                remove_suffix(prompt, ASSISTANT_SUFFIX)
                + prompt_suffix
                + ASSISTANT_SUFFIX
            )
818
819
820
821
822
823
824
825
826

        if apply_chat_template:
            prompt = tokenizer.apply_chat_template(
                [{"role": "user", "content": prompt}],
                add_generation_prompt=True,
                tokenize=False,
            )
            prompt = prompt.replace(tokenizer.bos_token, "")

Lianmin Zheng's avatar
Lianmin Zheng committed
827
        prompt_token_ids = tokenizer.encode(prompt)
zhyncs's avatar
zhyncs committed
828
        completion = dataset[i][1]
Lianmin Zheng's avatar
Lianmin Zheng committed
829
        completion_token_ids = tokenizer.encode(completion)
zhyncs's avatar
zhyncs committed
830
831
832
833
        prompt_len = len(prompt_token_ids)
        output_len = (
            len(completion_token_ids) if fixed_output_len is None else fixed_output_len
        )
834

835
        if prompt_len < 2 or output_len < 2:
zhyncs's avatar
zhyncs committed
836
837
            # Prune too short sequences.
            continue
838
839

        if context_len and prompt_len + output_len > context_len:
zhyncs's avatar
zhyncs committed
840
841
            # Prune too long sequences.
            continue
842

843
844
845
        filtered_dataset.append(
            DatasetRow(prompt=prompt, prompt_len=prompt_len, output_len=output_len)
        )
zhyncs's avatar
zhyncs committed
846

847
848
    print(f"#Input tokens: {np.sum([x.prompt_len for x in filtered_dataset])}")
    print(f"#Output tokens: {np.sum([x.output_len for x in filtered_dataset])}")
zhyncs's avatar
zhyncs committed
849
850
851
    return filtered_dataset


852
853
854
855
856
857
def sample_random_requests(
    input_len: int,
    output_len: int,
    num_prompts: int,
    range_ratio: float,
    tokenizer: PreTrainedTokenizerBase,
Lianmin Zheng's avatar
Lianmin Zheng committed
858
    dataset_path: str,
859
    random_sample: bool = True,
860
    return_text: bool = True,
861
) -> List[DatasetRow]:
862
    input_lens = np.random.randint(
Yineng Zhang's avatar
Yineng Zhang committed
863
        max(int(input_len * range_ratio), 1),
864
865
866
867
868
869
870
871
        input_len + 1,
        size=num_prompts,
    )
    output_lens = np.random.randint(
        int(output_len * range_ratio),
        output_len + 1,
        size=num_prompts,
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
872

873
    if random_sample:
Lianmin Zheng's avatar
Lianmin Zheng committed
874
875
876
        # Sample token ids from ShareGPT and repeat/truncate them to satisfy the input_lens

        # Download sharegpt if necessary
877
        if not is_file_valid_json(dataset_path):
Lianmin Zheng's avatar
Lianmin Zheng committed
878
            dataset_path = download_and_cache_file(SHAREGPT_URL)
Lianmin Zheng's avatar
Lianmin Zheng committed
879
880
881
882
883

        # Load the dataset.
        with open(dataset_path) as f:
            dataset = json.load(f)
        # Filter out the conversations with less than 2 turns.
884
885
886
887
888
        dataset = [
            data
            for data in dataset
            if len(data.get("conversations", data.get("conversation", []))) >= 2
        ]
Lianmin Zheng's avatar
Lianmin Zheng committed
889
890
        # Only keep the first two turns of each conversation.
        dataset = [
891
892
893
894
            (
                data.get("conversations", data.get("conversation", []))[0]["value"],
                data.get("conversations", data.get("conversation", []))[1]["value"],
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
895
896
897
898
899
900
            for data in dataset
        ]
        # Shuffle the dataset.
        random.shuffle(dataset)

        # Filter out sequences that are too long or too short
901
        input_requests: List[DatasetRow] = []
902
903
904
905
906
        for data in dataset:
            i = len(input_requests)
            if i == num_prompts:
                break

Lianmin Zheng's avatar
Lianmin Zheng committed
907
            # Tokenize the prompts and completions.
908
            prompt = data[0]
Lianmin Zheng's avatar
Lianmin Zheng committed
909
            prompt_token_ids = tokenizer.encode(prompt)
Lianmin Zheng's avatar
Lianmin Zheng committed
910
911
            prompt_len = len(prompt_token_ids)

912
913
914
915
            # Skip empty prompt
            if prompt_len == 0:
                continue

Yineng Zhang's avatar
Yineng Zhang committed
916
            if prompt_len > input_lens[i]:
Lianmin Zheng's avatar
Lianmin Zheng committed
917
918
919
920
                input_ids = prompt_token_ids[: input_lens[i]]
            else:
                ratio = (input_lens[i] + prompt_len - 1) // prompt_len
                input_ids = (prompt_token_ids * ratio)[: input_lens[i]]
921
922
923
            input_content = input_ids
            if return_text:
                input_content = tokenizer.decode(input_content)
924
925
            input_requests.append(
                DatasetRow(
926
                    prompt=input_content,
927
928
929
930
                    prompt_len=int(input_lens[i]),
                    output_len=int(output_lens[i]),
                )
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
931
932
933
934
935
    else:
        # Sample token ids from random integers. This can cause some NaN issues.
        offsets = np.random.randint(0, tokenizer.vocab_size, size=num_prompts)
        input_requests = []
        for i in range(num_prompts):
936
937
938
939
940
941
            input_content = [
                (offsets[i] + i + j) % tokenizer.vocab_size
                for j in range(input_lens[i])
            ]
            if return_text:
                input_content = tokenizer.decode(input_content)
942
943
            input_requests.append(
                DatasetRow(
944
                    prompt=input_content,
945
946
947
948
                    prompt_len=int(input_lens[i]),
                    output_len=int(output_lens[i]),
                )
            )
949
950
951
952
953
954

    print(f"#Input tokens: {np.sum(input_lens)}")
    print(f"#Output tokens: {np.sum(output_lens)}")
    return input_requests


955
956
957
958
959
960
961
def gen_prompt(tokenizer, token_num):
    """Generate a random prompt of specified token length using tokenizer vocabulary."""
    all_available_tokens = list(tokenizer.get_vocab().values())
    selected_tokens = random.choices(all_available_tokens, k=token_num)
    return tokenizer.decode(selected_tokens)


962
963
964
965
966
967
def get_gen_prefix_cache_path(args, tokenizer):
    """Create cache directory under ~/.cache/sglang/benchmark"""
    cache_dir = Path.home() / ".cache" / "sglang" / "benchmark"

    # Create a unique cache filename based on the generation parameters
    cache_key = (
968
969
        f"gen_shared_prefix_{args.gsp_num_groups}_{args.gsp_prompts_per_group}_"
        f"{args.gsp_system_prompt_len}_{args.gsp_question_len}_{args.gsp_output_len}_"
970
971
972
973
974
        f"{tokenizer.__class__.__name__}.pkl"
    )
    return cache_dir / cache_key


975
976
977
978
979
980
981
def sample_generated_shared_prefix_requests(
    num_groups: int,
    prompts_per_group: int,
    system_prompt_len: int,
    question_len: int,
    output_len: int,
    tokenizer: PreTrainedTokenizerBase,
982
    args: argparse.Namespace,
983
) -> List[DatasetRow]:
984
985
986
987
988
989
990
    """Generate benchmark requests with shared system prompts using random tokens and caching."""
    cache_path = get_gen_prefix_cache_path(args, tokenizer)

    # Try to load from cache first
    if cache_path.exists():
        print(f"\nLoading cached generated input data from {cache_path}")
        with open(cache_path, "rb") as f:
991
992
            return pickle.load(f)

993
994
    print("\nGenerating new input data...")

995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
    # Generate system prompts for each group
    system_prompts = []
    for _ in range(num_groups):
        system_prompt = gen_prompt(tokenizer, system_prompt_len)
        system_prompts.append(system_prompt)

    # Generate questions
    questions = []
    for _ in range(num_groups * prompts_per_group):
        question = gen_prompt(tokenizer, question_len)
        questions.append(question)

    # Combine system prompts with questions
    input_requests = []
    total_input_tokens = 0
    total_output_tokens = 0

1012
    for group_idx in tqdm(range(num_groups), desc="Generating system prompt"):
1013
        system_prompt = system_prompts[group_idx]
1014
1015
1016
        for prompt_idx in tqdm(
            range(prompts_per_group), desc="Generating questions", leave=False
        ):
1017
1018
1019
1020
            question = questions[group_idx * prompts_per_group + prompt_idx]
            full_prompt = f"{system_prompt}\n\n{question}"
            prompt_len = len(tokenizer.encode(full_prompt))

1021
1022
1023
1024
1025
            input_requests.append(
                DatasetRow(
                    prompt=full_prompt, prompt_len=prompt_len, output_len=output_len
                )
            )
1026
1027
1028
            total_input_tokens += prompt_len
            total_output_tokens += output_len

1029
1030
1031
1032
    # Shuffle questions
    random.shuffle(input_requests)

    # Print statistics
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
    print(f"\nGenerated shared prefix dataset statistics:")
    print(f"Number of groups: {num_groups}")
    print(f"Prompts per group: {prompts_per_group}")
    print(f"Total prompts: {len(input_requests)}")
    print(f"Total input tokens: {total_input_tokens}")
    print(f"Total output tokens: {total_output_tokens}")
    print(
        f"Average system prompt length: {sum(len(tokenizer.encode(sp)) for sp in system_prompts) / len(system_prompts):.1f} tokens"
    )
    print(
        f"Average question length: {sum(len(tokenizer.encode(q)) for q in questions) / len(questions):.1f} tokens\n"
    )
1045
1046
1047
1048
1049
1050

    # Save to cache
    cache_path.parent.mkdir(parents=True, exist_ok=True)
    print(f"Caching generated input data to {cache_path}")
    with open(cache_path, "wb") as f:
        pickle.dump(input_requests, f)
1051
1052
1053
1054

    return input_requests


zhyncs's avatar
zhyncs committed
1055
async def get_request(
1056
    input_requests: List[DatasetRow],
zhyncs's avatar
zhyncs committed
1057
    request_rate: float,
1058
) -> AsyncGenerator[DatasetRow, None]:
zhyncs's avatar
zhyncs committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
    input_requests = iter(input_requests)
    for request in input_requests:
        yield request

        if request_rate == float("inf"):
            # If the request rate is infinity, then we don't need to wait.
            continue

        # Sample the request interval from the exponential distribution.
        interval = np.random.exponential(1.0 / request_rate)
        # The next request will be sent after the interval.
        await asyncio.sleep(interval)


def calculate_metrics(
1074
    input_requests: List[DatasetRow],
zhyncs's avatar
zhyncs committed
1075
1076
1077
    outputs: List[RequestFuncOutput],
    dur_s: float,
    tokenizer: PreTrainedTokenizerBase,
1078
    backend: str,
zhyncs's avatar
zhyncs committed
1079
) -> Tuple[BenchmarkMetrics, List[int]]:
Ying Sheng's avatar
Ying Sheng committed
1080
1081
    output_lens: List[int] = []
    retokenized_output_lens: List[int] = []
zhyncs's avatar
zhyncs committed
1082
1083
1084
1085
1086
    total_input = 0
    completed = 0
    itls: List[float] = []
    tpots: List[float] = []
    ttfts: List[float] = []
zhyncs's avatar
zhyncs committed
1087
    e2e_latencies: List[float] = []
zhyncs's avatar
zhyncs committed
1088
1089
    for i in range(len(outputs)):
        if outputs[i].success:
Ying Sheng's avatar
Ying Sheng committed
1090
1091
1092
            output_len = outputs[i].output_len
            output_lens.append(output_len)
            retokenized_output_len = len(
Lianmin Zheng's avatar
Lianmin Zheng committed
1093
                tokenizer.encode(outputs[i].generated_text, add_special_tokens=False)
Ying Sheng's avatar
Ying Sheng committed
1094
1095
            )
            retokenized_output_lens.append(retokenized_output_len)
1096
            total_input += input_requests[i].prompt_len
zhyncs's avatar
zhyncs committed
1097
1098
1099
1100
            if output_len > 1:
                tpots.append((outputs[i].latency - outputs[i].ttft) / (output_len - 1))
            itls += outputs[i].itl
            ttfts.append(outputs[i].ttft)
zhyncs's avatar
zhyncs committed
1101
1102
1103

            e2e_latencies.append(outputs[i].latency)

zhyncs's avatar
zhyncs committed
1104
1105
            completed += 1
        else:
Ying Sheng's avatar
Ying Sheng committed
1106
1107
            output_lens.append(0)
            retokenized_output_lens.append(0)
zhyncs's avatar
zhyncs committed
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117

    if completed == 0:
        warnings.warn(
            "All requests failed. This is likely due to a misconfiguration "
            "on the benchmark arguments.",
            stacklevel=2,
        )
    metrics = BenchmarkMetrics(
        completed=completed,
        total_input=total_input,
Ying Sheng's avatar
Ying Sheng committed
1118
1119
        total_output=sum(output_lens),
        total_output_retokenized=sum(retokenized_output_lens),
zhyncs's avatar
zhyncs committed
1120
1121
        request_throughput=completed / dur_s,
        input_throughput=total_input / dur_s,
Ying Sheng's avatar
Ying Sheng committed
1122
1123
        output_throughput=sum(output_lens) / dur_s,
        output_throughput_retokenized=sum(retokenized_output_lens) / dur_s,
1124
1125
1126
        total_throughput=(total_input + sum(output_lens)) / dur_s,
        total_throughput_retokenized=(total_input + sum(retokenized_output_lens))
        / dur_s,
zhyncs's avatar
zhyncs committed
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
        mean_ttft_ms=np.mean(ttfts or 0)
        * 1000,  # ttfts is empty if streaming is not supported by backend
        median_ttft_ms=np.median(ttfts or 0) * 1000,
        std_ttft_ms=np.std(ttfts or 0) * 1000,
        p99_ttft_ms=np.percentile(ttfts or 0, 99) * 1000,
        mean_tpot_ms=np.mean(tpots or 0) * 1000,
        median_tpot_ms=np.median(tpots or 0) * 1000,
        std_tpot_ms=np.std(tpots or 0) * 1000,
        p99_tpot_ms=np.percentile(tpots or 0, 99) * 1000,
        mean_itl_ms=np.mean(itls or 0) * 1000,
        median_itl_ms=np.median(itls or 0) * 1000,
        std_itl_ms=np.std(itls or 0) * 1000,
1139
        p95_itl_ms=np.percentile(itls or 0, 95) * 1000,
zhyncs's avatar
zhyncs committed
1140
        p99_itl_ms=np.percentile(itls or 0, 99) * 1000,
1141
        max_itl_ms=np.max(itls or 0) * 1000,
zhyncs's avatar
zhyncs committed
1142
1143
        mean_e2e_latency_ms=np.mean(e2e_latencies) * 1000,
        median_e2e_latency_ms=np.median(e2e_latencies) * 1000,
1144
1145
        std_e2e_latency_ms=np.std(e2e_latencies) * 1000,
        p99_e2e_latency_ms=np.percentile(e2e_latencies, 99) * 1000,
1146
        concurrency=np.sum(e2e_latencies) / dur_s,
zhyncs's avatar
zhyncs committed
1147
1148
    )

Ying Sheng's avatar
Ying Sheng committed
1149
    return metrics, output_lens
zhyncs's avatar
zhyncs committed
1150
1151
1152
1153
1154


async def benchmark(
    backend: str,
    api_url: str,
1155
    base_url: str,
zhyncs's avatar
zhyncs committed
1156
1157
    model_id: str,
    tokenizer: PreTrainedTokenizerBase,
1158
    input_requests: List[DatasetRow],
zhyncs's avatar
zhyncs committed
1159
    request_rate: float,
1160
    max_concurrency: Optional[int],
zhyncs's avatar
zhyncs committed
1161
    disable_tqdm: bool,
1162
    lora_names: List[str],
1163
    extra_request_body: Dict[str, Any],
1164
    profile: bool,
1165
    pd_separated: bool = False,
Yineng Zhang's avatar
Yineng Zhang committed
1166
    flush_cache: bool = False,
1167
    warmup_requests: int = 1,
zhyncs's avatar
zhyncs committed
1168
1169
1170
1171
1172
1173
):
    if backend in ASYNC_REQUEST_FUNCS:
        request_func = ASYNC_REQUEST_FUNCS[backend]
    else:
        raise ValueError(f"Unknown backend: {backend}")

1174
    # Limit concurrency
1175
1176
1177
1178
1179
1180
1181
1182
1183
    # From https://github.com/vllm-project/vllm/pull/9390
    semaphore = asyncio.Semaphore(max_concurrency) if max_concurrency else None

    async def limited_request_func(request_func_input, pbar):
        if semaphore is None:
            return await request_func(request_func_input=request_func_input, pbar=pbar)
        async with semaphore:
            return await request_func(request_func_input=request_func_input, pbar=pbar)

1184
    # Warmup
1185
    print(f"Starting warmup with {warmup_requests} sequences...")
1186
1187

    # Use the first request for all warmup iterations
1188
1189
1190
1191
1192
1193
    test_request = input_requests[0]
    test_prompt, test_prompt_len, test_output_len = (
        test_request.prompt,
        test_request.prompt_len,
        test_request.output_len,
    )
1194
    if lora_names is not None and len(lora_names) != 0:
1195
1196
1197
1198
        lora_name = lora_names[0]
    else:
        lora_name = None

1199
1200
1201
1202
1203
1204
1205
1206
1207
    if "(.*)", test_prompt)
        image_data = image_match.group(1) if image_match else None
        test_prompt = image_match.group(2) if image_match else test_prompt
    else:
        image_data = None

1208
    # Create the test input once
zhyncs's avatar
zhyncs committed
1209
1210
1211
1212
1213
    test_input = RequestFuncInput(
        model=model_id,
        prompt=test_prompt,
        api_url=api_url,
        prompt_len=test_prompt_len,
1214
        output_len=min(test_output_len, 32),
1215
        lora_name=lora_name,
1216
        image_data=image_data,
1217
        extra_request_body=extra_request_body,
zhyncs's avatar
zhyncs committed
1218
    )
1219
1220
1221

    # Run warmup requests
    warmup_tasks = []
1222
    for _ in range(warmup_requests):
1223
1224
1225
1226
1227
1228
1229
        warmup_tasks.append(
            asyncio.create_task(request_func(request_func_input=test_input))
        )

    warmup_outputs = await asyncio.gather(*warmup_tasks)

    # Check if at least one warmup request succeeded
1230
    if warmup_requests > 0 and not any(output.success for output in warmup_outputs):
zhyncs's avatar
zhyncs committed
1231
        raise ValueError(
1232
1233
            "Warmup failed - Please make sure benchmark arguments "
            f"are correctly specified. Error: {warmup_outputs[0].error}"
zhyncs's avatar
zhyncs committed
1234
1235
        )
    else:
1236
1237
1238
        print(
            f"Warmup completed with {args.warmup_requests} sequences. Starting main benchmark run..."
        )
zhyncs's avatar
zhyncs committed
1239

1240
    # Flush cache
Yineng Zhang's avatar
Yineng Zhang committed
1241
    if ("sglang" in backend and _get_bool_env_var("SGLANG_IS_IN_CI")) or flush_cache:
1242
        requests.post(base_url + "/flush_cache", headers=get_auth_headers())
1243
1244

    time.sleep(1.0)
1245

1246
    # Start profiler
1247
1248
1249
1250
1251
1252
1253
1254
    if profile:
        print("Starting profiler...")
        profile_output = await async_request_profile(
            api_url=base_url + "/start_profile"
        )
        if profile_output.success:
            print("Profiler started")

zhyncs's avatar
zhyncs committed
1255
1256
    pbar = None if disable_tqdm else tqdm(total=len(input_requests))

1257
    # Run all requests
zhyncs's avatar
zhyncs committed
1258
1259
1260
    benchmark_start_time = time.perf_counter()
    tasks: List[asyncio.Task] = []
    async for request in get_request(input_requests, request_rate):
1261
1262
1263
1264
1265
        prompt, prompt_len, output_len = (
            request.prompt,
            request.prompt_len,
            request.output_len,
        )
1266
        if lora_names is not None and len(lora_names) != 0:
1267
1268
1269
1270
1271
            idx = random.randint(0, len(lora_names) - 1)
            lora_name = lora_names[idx]
        else:
            lora_name = None

1272
1273
1274
1275
1276
1277
1278
1279
1280
        if "(.*)", prompt)
            image_data = image_match.group(1) if image_match else None
            prompt = image_match.group(2) if image_match else prompt
        else:
            image_data = None

zhyncs's avatar
zhyncs committed
1281
1282
1283
1284
1285
1286
        request_func_input = RequestFuncInput(
            model=model_id,
            prompt=prompt,
            api_url=api_url,
            prompt_len=prompt_len,
            output_len=output_len,
1287
            lora_name=lora_name,
1288
            image_data=image_data,
1289
            extra_request_body=extra_request_body,
zhyncs's avatar
zhyncs committed
1290
1291
1292
        )
        tasks.append(
            asyncio.create_task(
1293
                limited_request_func(request_func_input=request_func_input, pbar=pbar)
zhyncs's avatar
zhyncs committed
1294
1295
1296
1297
            )
        )
    outputs: List[RequestFuncOutput] = await asyncio.gather(*tasks)

1298
    # Stop profiler
1299
1300
1301
1302
1303
1304
    if profile:
        print("Stopping profiler...")
        profile_output = await async_request_profile(api_url=base_url + "/stop_profile")
        if profile_output.success:
            print("Profiler stopped")

zhyncs's avatar
zhyncs committed
1305
1306
1307
    if pbar is not None:
        pbar.close()

1308
1309
    if "sglang" in backend:
        server_info = requests.get(base_url + "/get_server_info")
Yineng Zhang's avatar
Yineng Zhang committed
1310
        if server_info.status_code == 200:
1311
1312
1313
1314
1315
1316
            server_info_json = server_info.json()
            if "decode" in server_info_json:
                server_info_json = server_info_json["decode"][0]
            accept_length = server_info_json["internal_states"][0].get(
                "avg_spec_accept_length", None
            )
1317
        else:
Yineng Zhang's avatar
Yineng Zhang committed
1318
            accept_length = None
1319
1320
1321
    else:
        accept_length = None

1322
    # Compute metrics and print results
zhyncs's avatar
zhyncs committed
1323
    benchmark_duration = time.perf_counter() - benchmark_start_time
Ying Sheng's avatar
Ying Sheng committed
1324
    metrics, output_lens = calculate_metrics(
zhyncs's avatar
zhyncs committed
1325
1326
1327
1328
        input_requests=input_requests,
        outputs=outputs,
        dur_s=benchmark_duration,
        tokenizer=tokenizer,
1329
        backend=backend,
zhyncs's avatar
zhyncs committed
1330
1331
1332
    )

    print("\n{s:{c}^{n}}".format(s=" Serving Benchmark Result ", n=50, c="="))
1333
    print("{:<40} {:<10}".format("Backend:", backend))
zhyncs's avatar
zhyncs committed
1334
    print("{:<40} {:<10}".format("Traffic request rate:", request_rate))
1335
1336
    print(
        "{:<40} {:<10}".format(
1337
            "Max request concurrency:",
1338
1339
1340
            max_concurrency if max_concurrency else "not set",
        )
    )
zhyncs's avatar
zhyncs committed
1341
1342
1343
1344
    print("{:<40} {:<10}".format("Successful requests:", metrics.completed))
    print("{:<40} {:<10.2f}".format("Benchmark duration (s):", benchmark_duration))
    print("{:<40} {:<10}".format("Total input tokens:", metrics.total_input))
    print("{:<40} {:<10}".format("Total generated tokens:", metrics.total_output))
Ying Sheng's avatar
Ying Sheng committed
1345
1346
1347
1348
1349
    print(
        "{:<40} {:<10}".format(
            "Total generated tokens (retokenized):", metrics.total_output_retokenized
        )
    )
zhyncs's avatar
zhyncs committed
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
    print(
        "{:<40} {:<10.2f}".format(
            "Request throughput (req/s):", metrics.request_throughput
        )
    )
    print(
        "{:<40} {:<10.2f}".format(
            "Input token throughput (tok/s):", metrics.input_throughput
        )
    )
    print(
        "{:<40} {:<10.2f}".format(
            "Output token throughput (tok/s):", metrics.output_throughput
        )
    )
1365
1366
1367
1368
1369
    print(
        "{:<40} {:<10.2f}".format(
            "Total token throughput (tok/s):", metrics.total_throughput
        )
    )
1370
    print("{:<40} {:<10.2f}".format("Concurrency:", metrics.concurrency))
1371
1372
    if accept_length:
        print("{:<40} {:<10.2f}".format("Accept length:", accept_length))
zhyncs's avatar
zhyncs committed
1373
1374
1375
1376
1377
1378
1379
1380
1381
    print("{s:{c}^{n}}".format(s="End-to-End Latency", n=50, c="-"))
    print(
        "{:<40} {:<10.2f}".format("Mean E2E Latency (ms):", metrics.mean_e2e_latency_ms)
    )
    print(
        "{:<40} {:<10.2f}".format(
            "Median E2E Latency (ms):", metrics.median_e2e_latency_ms
        )
    )
zhyncs's avatar
zhyncs committed
1382
1383
1384
1385
    print("{s:{c}^{n}}".format(s="Time to First Token", n=50, c="-"))
    print("{:<40} {:<10.2f}".format("Mean TTFT (ms):", metrics.mean_ttft_ms))
    print("{:<40} {:<10.2f}".format("Median TTFT (ms):", metrics.median_ttft_ms))
    print("{:<40} {:<10.2f}".format("P99 TTFT (ms):", metrics.p99_ttft_ms))
1386
    print("{s:{c}^{n}}".format(s="Inter-Token Latency", n=50, c="-"))
zhyncs's avatar
zhyncs committed
1387
1388
    print("{:<40} {:<10.2f}".format("Mean ITL (ms):", metrics.mean_itl_ms))
    print("{:<40} {:<10.2f}".format("Median ITL (ms):", metrics.median_itl_ms))
1389
    print("{:<40} {:<10.2f}".format("P95 ITL (ms):", metrics.p95_itl_ms))
zhyncs's avatar
zhyncs committed
1390
    print("{:<40} {:<10.2f}".format("P99 ITL (ms):", metrics.p99_itl_ms))
1391
    print("{:<40} {:<10.2f}".format("Max ITL (ms):", metrics.max_itl_ms))
zhyncs's avatar
zhyncs committed
1392
1393
    print("=" * 50)

zhyncs's avatar
zhyncs committed
1394
1395
1396
1397
1398
1399
    if (
        metrics.median_ttft_ms is not None
        and metrics.mean_itl_ms is not None
        and metrics.output_throughput is not None
    ):
        result = {
1400
            # Arguments
zhyncs's avatar
zhyncs committed
1401
1402
1403
            "backend": args.backend,
            "dataset_name": args.dataset_name,
            "request_rate": request_rate,
1404
            "max_concurrency": max_concurrency,
1405
1406
1407
1408
1409
1410
1411
            "sharegpt_output_len": args.sharegpt_output_len,
            "random_input_len": args.random_input_len,
            "random_output_len": args.random_output_len,
            "random_range_ratio": args.random_range_ratio,
            # Results
            "duration": benchmark_duration,
            "completed": metrics.completed,
1412
1413
1414
            "total_input_tokens": metrics.total_input,
            "total_output_tokens": metrics.total_output,
            "total_output_tokens_retokenized": metrics.total_output_retokenized,
1415
1416
1417
            "request_throughput": metrics.request_throughput,
            "input_throughput": metrics.input_throughput,
            "output_throughput": metrics.output_throughput,
1418
1419
            "mean_e2e_latency_ms": metrics.mean_e2e_latency_ms,
            "median_e2e_latency_ms": metrics.median_e2e_latency_ms,
1420
1421
            "std_e2e_latency_ms": metrics.std_e2e_latency_ms,
            "p99_e2e_latency_ms": metrics.p99_e2e_latency_ms,
1422
            "mean_ttft_ms": metrics.mean_ttft_ms,
1423
            "median_ttft_ms": metrics.median_ttft_ms,
1424
1425
1426
1427
1428
1429
            "std_ttft_ms": metrics.std_ttft_ms,
            "p99_ttft_ms": metrics.p99_ttft_ms,
            "mean_tpot_ms": metrics.mean_tpot_ms,
            "median_tpot_ms": metrics.median_tpot_ms,
            "std_tpot_ms": metrics.std_tpot_ms,
            "p99_tpot_ms": metrics.p99_tpot_ms,
1430
            "mean_itl_ms": metrics.mean_itl_ms,
1431
            "median_itl_ms": metrics.median_itl_ms,
1432
            "std_itl_ms": metrics.std_itl_ms,
1433
            "p95_itl_ms": metrics.p95_itl_ms,
1434
            "p99_itl_ms": metrics.p99_itl_ms,
1435
            "concurrency": metrics.concurrency,
1436
            "accept_length": accept_length,
zhyncs's avatar
zhyncs committed
1437
1438
1439
1440
        }
    else:
        print(f"Error running benchmark for request rate: {request_rate}")
        print("-" * 30)
1441

zhyncs's avatar
zhyncs committed
1442
1443
1444
1445
1446
    # Determine output file name
    if args.output_file:
        output_file_name = args.output_file
    else:
        now = datetime.now().strftime("%m%d")
1447
        if args.dataset_name.startswith("random"):
zhyncs's avatar
zhyncs committed
1448
            output_file_name = f"{args.backend}_{now}_{args.num_prompts}_{args.random_input_len}_{args.random_output_len}.jsonl"
1449
        else:
zhyncs's avatar
zhyncs committed
1450
            output_file_name = f"{args.backend}_{now}_{args.num_prompts}_sharegpt.jsonl"
1451

1452
1453
1454
1455
1456
1457
1458
1459
1460
    result_details = {
        "input_lens": [output.prompt_len for output in outputs],
        "output_lens": output_lens,
        "ttfts": [output.ttft for output in outputs],
        "itls": [output.itl for output in outputs],
        "generated_texts": [output.generated_text for output in outputs],
        "errors": [output.error for output in outputs],
    }

zhyncs's avatar
zhyncs committed
1461
1462
    # Append results to a JSONL file
    with open(output_file_name, "a") as file:
1463
1464
1465
1466
1467
1468
1469
        if args.output_details:
            result_for_dump = result | result_details
        else:
            result_for_dump = result
        file.write(json.dumps(result_for_dump) + "\n")

    return result | result_details
zhyncs's avatar
zhyncs committed
1470
1471


1472
1473
1474
1475
1476
1477
1478
1479
1480
def check_chat_template(model_path):
    try:
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        return "chat_template" in tokenizer.init_kwargs
    except Exception as e:
        print(f"Fail to load tokenizer config with error={e}")
        return False


1481
1482
1483
1484
1485
1486
def set_global_args(args_: argparse.Namespace):
    """Set the global args."""
    global args
    args = args_


1487
1488
1489
1490
def run_benchmark(args_: argparse.Namespace):
    global args
    args = args_

1491
1492
1493
1494
    # Set default value for max_concurrency if not present
    if not hasattr(args, "max_concurrency"):
        args.max_concurrency = None

1495
1496
1497
1498
    # Set default value for warmup_requests if not present
    if not hasattr(args, "warmup_requests"):
        args.warmup_requests = 1

1499
1500
1501
    if not hasattr(args, "output_details"):
        args.output_details = False

1502
1503
1504
    if not hasattr(args, "tokenize_prompt"):
        args.tokenize_prompt = False

1505
1506
    print(f"benchmark_args={args}")

Lianmin Zheng's avatar
Lianmin Zheng committed
1507
    # Set global environments
1508
    set_ulimit()
zhyncs's avatar
zhyncs committed
1509
1510
1511
    random.seed(args.seed)
    np.random.seed(args.seed)

1512
1513
1514
1515
    extra_request_body = {}
    if args.extra_request_body:
        extra_request_body = json.loads(args.extra_request_body)

1516
1517
1518
1519
1520
    if args.tokenize_prompt:
        assert (
            args.backend == "sglang"
        ), "`--tokenize-prompt` only compatible with `--backend sglang` currently"

Lianmin Zheng's avatar
Lianmin Zheng committed
1521
    # Set url
zhyncs's avatar
zhyncs committed
1522
1523
1524
    if args.port is None:
        args.port = {
            "sglang": 30000,
1525
1526
            "sglang-native": 30000,
            "sglang-oai": 30000,
zhyncs's avatar
zhyncs committed
1527
1528
            "lmdeploy": 23333,
            "vllm": 8000,
1529
            "trt": 8000,
1530
            "gserver": 9988,
1531
            "truss": 8080,
zhyncs's avatar
zhyncs committed
1532
1533
1534
1535
1536
1537
1538
1539
        }.get(args.backend, 30000)

    model_url = (
        f"{args.base_url}/v1/models"
        if args.base_url
        else f"http://{args.host}:{args.port}/v1/models"
    )

1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
    if args.backend in ["sglang", "sglang-native"]:
        api_url = (
            f"{args.base_url}/generate"
            if args.base_url
            else f"http://{args.host}:{args.port}/generate"
        )
    elif args.backend in ["sglang-oai", "vllm", "lmdeploy"]:
        api_url = (
            f"{args.base_url}/v1/completions"
            if args.base_url
            else f"http://{args.host}:{args.port}/v1/completions"
        )
    elif args.backend == "trt":
1553
1554
1555
1556
1557
1558
1559
1560
        api_url = (
            f"{args.base_url}/v2/models/ensemble/generate_stream"
            if args.base_url
            else f"http://{args.host}:{args.port}/v2/models/ensemble/generate_stream"
        )
        if args.model is None:
            print("Please provide a model using `--model` when using `trt` backend.")
            sys.exit(1)
1561
    elif args.backend == "gserver":
Lianmin Zheng's avatar
Lianmin Zheng committed
1562
1563
        api_url = args.base_url if args.base_url else f"{args.host}:{args.port}"
        args.model = args.model or "default"
1564
1565
1566
1567
1568
1569
    elif args.backend == "truss":
        api_url = (
            f"{args.base_url}/v1/models/model:predict"
            if args.base_url
            else f"http://{args.host}:{args.port}/v1/models/model:predict"
        )
1570
1571
1572
    base_url = (
        f"http://{args.host}:{args.port}" if args.base_url is None else args.base_url
    )
1573

Lianmin Zheng's avatar
Lianmin Zheng committed
1574
    # Get model name
zhyncs's avatar
zhyncs committed
1575
    if args.model is None:
1576
1577
1578
1579
1580
        if args.backend == "truss":
            print(
                "Please provide a model with `--model` when using truss backend. e.g. --model meta-llama/Llama-3.1-8B-Instruct"
            )
            sys.exit(1)
zhyncs's avatar
zhyncs committed
1581
        try:
1582
            response = requests.get(model_url, headers=get_auth_headers())
zhyncs's avatar
zhyncs committed
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
            model_list = response.json().get("data", [])
            args.model = model_list[0]["id"] if model_list else None
        except Exception as e:
            print(f"Failed to fetch model from {model_url}. Error: {e}")
            print(
                "Please specify the correct host and port using `--host` and `--port`."
            )
            sys.exit(1)

    if args.model is None:
        print("No model specified or found. Please provide a model using `--model`.")
        sys.exit(1)

1596
1597
1598
1599
1600
1601
    if not check_chat_template(args.model):
        print(
            "\nWARNING It is recommended to use the `Chat` or `Instruct` model for benchmarking.\n"
            "Because when the tokenizer counts the output tokens, if there is gibberish, it might count incorrectly.\n"
        )

zhyncs's avatar
zhyncs committed
1602
1603
    print(f"{args}\n")

Lianmin Zheng's avatar
Lianmin Zheng committed
1604
    # Read dataset
zhyncs's avatar
zhyncs committed
1605
1606
1607
1608
    backend = args.backend
    model_id = args.model
    tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model
    tokenizer = get_tokenizer(tokenizer_id)
1609
    input_requests = get_dataset(args, tokenizer)
zhyncs's avatar
zhyncs committed
1610

Yineng Zhang's avatar
Yineng Zhang committed
1611
1612
1613
1614
    # compatible with SimpleNamespace
    if not hasattr(args, "flush_cache"):
        args.flush_cache = False

1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
    return asyncio.run(
        benchmark(
            backend=backend,
            api_url=api_url,
            base_url=base_url,
            model_id=model_id,
            tokenizer=tokenizer,
            input_requests=input_requests,
            request_rate=args.request_rate,
            max_concurrency=args.max_concurrency,
            disable_tqdm=args.disable_tqdm,
1626
            lora_names=args.lora_name,
1627
1628
            extra_request_body=extra_request_body,
            profile=args.profile,
1629
            pd_separated=args.pd_separated,
Yineng Zhang's avatar
Yineng Zhang committed
1630
            flush_cache=args.flush_cache,
1631
            warmup_requests=args.warmup_requests,
Lianmin Zheng's avatar
Lianmin Zheng committed
1632
        )
1633
    )
zhyncs's avatar
zhyncs committed
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646


def set_ulimit(target_soft_limit=65535):
    resource_type = resource.RLIMIT_NOFILE
    current_soft, current_hard = resource.getrlimit(resource_type)

    if current_soft < target_soft_limit:
        try:
            resource.setrlimit(resource_type, (target_soft_limit, current_hard))
        except ValueError as e:
            print(f"Fail to set RLIMIT_NOFILE: {e}")


1647
1648
1649
1650
1651
1652
1653
class LoRAPathAction(argparse.Action):
    def __call__(self, parser, namespace, values, option_string=None):
        setattr(namespace, self.dest, [])
        for lora_name in values:
            getattr(namespace, self.dest).append(lora_name)


zhyncs's avatar
zhyncs committed
1654
if __name__ == "__main__":
1655
    parser = ArgumentParser(description="Benchmark the online serving throughput.")
zhyncs's avatar
zhyncs committed
1656
1657
1658
1659
    parser.add_argument(
        "--backend",
        type=str,
        choices=list(ASYNC_REQUEST_FUNCS.keys()),
1660
        default="sglang",
zhyncs's avatar
zhyncs committed
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
        help="Must specify a backend, depending on the LLM Inference Engine.",
    )
    parser.add_argument(
        "--base-url",
        type=str,
        default=None,
        help="Server or API base url if not using http host and port.",
    )
    parser.add_argument(
        "--host", type=str, default="0.0.0.0", help="Default host is 0.0.0.0."
    )
    parser.add_argument(
        "--port",
        type=int,
        help="If not set, the default port is configured according to its default value for different LLM Inference Engines.",
    )
    parser.add_argument(
1678
1679
1680
        "--dataset-name",
        type=str,
        default="sharegpt",
1681
        choices=["sharegpt", "random", "random-ids", "generated-shared-prefix", "mmmu"],
1682
1683
1684
1685
        help="Name of the dataset to benchmark on.",
    )
    parser.add_argument(
        "--dataset-path", type=str, default="", help="Path to the dataset."
zhyncs's avatar
zhyncs committed
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
    )
    parser.add_argument(
        "--model",
        type=str,
        help="Name or path of the model. If not set, the default model will request /v1/models for conf.",
    )
    parser.add_argument(
        "--tokenizer",
        type=str,
        help="Name or path of the tokenizer. If not set, using the model conf.",
    )
    parser.add_argument(
        "--num-prompts",
        type=int,
        default=1000,
        help="Number of prompts to process. Default is 1000.",
    )
    parser.add_argument(
        "--sharegpt-output-len",
        type=int,
        default=None,
        help="Output length for each request. Overrides the output length from the ShareGPT dataset.",
    )
1709
1710
1711
1712
1713
1714
    parser.add_argument(
        "--sharegpt-context-len",
        type=int,
        default=None,
        help="The context length of the model for the ShareGPT dataset. Requests longer than the context length will be dropped.",
    )
1715
1716
1717
    parser.add_argument(
        "--random-input-len",
        type=int,
1718
        default=1024,
1719
1720
1721
1722
        help="Number of input tokens per request, used only for random dataset.",
    )
    parser.add_argument(
        "--random-output-len",
1723
        default=1024,
1724
1725
1726
1727
1728
1729
        type=int,
        help="Number of output tokens per request, used only for random dataset.",
    )
    parser.add_argument(
        "--random-range-ratio",
        type=float,
Yineng Zhang's avatar
Yineng Zhang committed
1730
        default=0.0,
1731
1732
1733
        help="Range of sampled ratio of input/output length, "
        "used only for random dataset.",
    )
zhyncs's avatar
zhyncs committed
1734
1735
1736
    parser.add_argument(
        "--request-rate",
        type=float,
1737
        default=float("inf"),
zhyncs's avatar
zhyncs committed
1738
        help="Number of requests per second. If this is inf, then all the requests are sent at time 0. "
min-xu-et's avatar
min-xu-et committed
1739
        "Otherwise, we use Poisson process to synthesize the request arrival times. Default is inf.",
zhyncs's avatar
zhyncs committed
1740
    )
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
    parser.add_argument(
        "--max-concurrency",
        type=int,
        default=None,
        help="Maximum number of concurrent requests. This can be used "
        "to help simulate an environment where a higher level component "
        "is enforcing a maximum number of concurrent requests. While the "
        "--request-rate argument controls the rate at which requests are "
        "initiated, this argument will control how many are actually allowed "
        "to execute at a time. This means that when used in combination, the "
        "actual request rate may be lower than specified with --request-rate, "
        "if the server is not processing requests fast enough to keep up.",
    )
1754
    parser.add_argument("--output-file", type=str, help="Output JSONL file name.")
1755
1756
1757
    parser.add_argument(
        "--output-details", action="store_true", help="Output details of benchmarking."
    )
1758
1759
1760
1761
1762
    parser.add_argument(
        "--disable-tqdm",
        action="store_true",
        help="Specify to disable tqdm progress bar.",
    )
1763
1764
1765
1766
1767
    parser.add_argument(
        "--disable-stream",
        action="store_true",
        help="Disable streaming mode.",
    )
1768
    parser.add_argument(
1769
        "--return-logprob",
1770
        action="store_true",
1771
        help="Return logprob.",
1772
    )
1773
    parser.add_argument("--seed", type=int, default=1, help="The random seed.")
1774
    parser.add_argument(
1775
        "--disable-ignore-eos",
1776
        action="store_true",
1777
        help="Disable ignoring EOS.",
1778
    )
1779
1780
1781
1782
1783
1784
1785
    parser.add_argument(
        "--extra-request-body",
        metavar='{"key1": "value1", "key2": "value2"}',
        type=str,
        help="Append given JSON object to the request payload. You can use this to specify"
        "additional generate params like sampling params.",
    )
1786
1787
1788
1789
1790
    parser.add_argument(
        "--apply-chat-template",
        action="store_true",
        help="Apply chat template",
    )
1791
1792
1793
1794
1795
1796
1797
1798
1799
    parser.add_argument(
        "--profile",
        action="store_true",
        help="Use Torch Profiler. The endpoint must be launched with "
        "SGLANG_TORCH_PROFILER_DIR to enable profiler.",
    )
    parser.add_argument(
        "--lora-name",
        type=str,
1800
        nargs="*",
1801
        default=None,
1802
1803
        action=LoRAPathAction,
        help="The names of LoRA adapters. You can provide a list of names in the format {name} {name} {name}...",
1804
    )
1805
1806
1807
1808
1809
1810
1811
    parser.add_argument(
        "--prompt-suffix",
        type=str,
        default="",
        help="Suffix applied to the end of all user prompts, followed by assistant prompt suffix.",
    )
    parser.add_argument(
Yineng Zhang's avatar
Yineng Zhang committed
1812
        "--pd-separated",
1813
1814
1815
        action="store_true",
        help="Benchmark PD disaggregation server",
    )
Yineng Zhang's avatar
Yineng Zhang committed
1816
1817
1818
1819
1820
    parser.add_argument(
        "--flush-cache",
        action="store_true",
        help="Flush the cache before running the benchmark",
    )
1821
1822
1823
1824
1825
1826
    parser.add_argument(
        "--warmup-requests",
        type=int,
        default=1,
        help="Number of warmup requests to run before the benchmark",
    )
1827
1828
1829
1830
1831
    parser.add_argument(
        "--tokenize-prompt",
        action="store_true",
        help="Use integer ids instead of string for inputs. Useful to control prompt lengths accurately",
    )
1832
1833
1834

    group = parser.add_argument_group("generated-shared-prefix dataset arguments")
    group.add_argument(
1835
        "--gsp-num-groups",
1836
1837
1838
1839
1840
        type=int,
        default=64,
        help="Number of system prompt groups for generated-shared-prefix dataset",
    )
    group.add_argument(
1841
        "--gsp-prompts-per-group",
1842
1843
1844
1845
1846
        type=int,
        default=16,
        help="Number of prompts per system prompt group for generated-shared-prefix dataset",
    )
    group.add_argument(
1847
        "--gsp-system-prompt-len",
1848
1849
1850
1851
1852
        type=int,
        default=2048,
        help="Target length in tokens for system prompts in generated-shared-prefix dataset",
    )
    group.add_argument(
1853
        "--gsp-question-len",
1854
1855
1856
1857
1858
        type=int,
        default=128,
        help="Target length in tokens for questions in generated-shared-prefix dataset",
    )
    group.add_argument(
1859
        "--gsp-output-len",
1860
1861
1862
1863
        type=int,
        default=256,
        help="Target length in tokens for outputs in generated-shared-prefix dataset",
    )
zhyncs's avatar
zhyncs committed
1864
    args = parser.parse_args()
1865
    run_benchmark(args)