bench_serving.py 51.9 KB
Newer Older
zhyncs's avatar
zhyncs committed
1
2
# Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/backend_request_func.py
# Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/benchmark_serving.py
3

Ying Sheng's avatar
Ying Sheng committed
4
"""
5
Benchmark online serving with dynamic requests.
Ying Sheng's avatar
Ying Sheng committed
6
7

Usage:
8
python3 -m sglang.bench_serving --backend sglang --num-prompt 10
Ying Sheng's avatar
Ying Sheng committed
9

10
11
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompts 3000 --random-input 1024 --random-output 1024 --random-range-ratio 0.5
python3 -m sglang.bench_serving --backend sglang --dataset-name random --request-rate-range 1,2,4,8,16,32 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --multi
Ying Sheng's avatar
Ying Sheng committed
12
"""
zhyncs's avatar
zhyncs committed
13
14
15
16
17

import argparse
import asyncio
import json
import os
18
import pickle
zhyncs's avatar
zhyncs committed
19
20
21
22
23
24
import random
import resource
import sys
import time
import traceback
import warnings
25
from argparse import ArgumentParser
zhyncs's avatar
zhyncs committed
26
from dataclasses import dataclass, field
27
from datetime import datetime
28
from pathlib import Path
29
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
zhyncs's avatar
zhyncs committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43

import aiohttp
import numpy as np
import requests
from tqdm.asyncio import tqdm
from transformers import (
    AutoTokenizer,
    PreTrainedTokenizer,
    PreTrainedTokenizerBase,
    PreTrainedTokenizerFast,
)

AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60)

44
45
global args

zhyncs's avatar
zhyncs committed
46
47
48
49
50
51
52
53

@dataclass
class RequestFuncInput:
    prompt: str
    api_url: str
    prompt_len: int
    output_len: int
    model: str
54
    lora_name: str
55
    extra_request_body: Dict[str, Any]
zhyncs's avatar
zhyncs committed
56
57
58
59
60
61
62
63
64
65
66


@dataclass
class RequestFuncOutput:
    generated_text: str = ""
    success: bool = False
    latency: float = 0.0
    ttft: float = 0.0  # Time to first token
    itl: List[float] = field(default_factory=list)  # List of inter-token latencies
    prompt_len: int = 0
    error: str = ""
67
    output_len: int = 0
zhyncs's avatar
zhyncs committed
68
69
70
71
72
73


def remove_prefix(text: str, prefix: str) -> str:
    return text[len(prefix) :] if text.startswith(prefix) else text


74
75
76
77
78
79
80
81
82
83
84
85
86
# trt llm not support ignore_eos
# https://github.com/triton-inference-server/tensorrtllm_backend/issues/505
async def async_request_trt_llm(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    assert api_url.endswith("generate_stream")

    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "accumulate_tokens": True,
            "text_input": request_func_input.prompt,
zhyncs's avatar
zhyncs committed
87
            "temperature": 0.000001,
88
89
90
            "top_p": 1.0,
            "max_tokens": request_func_input.output_len,
            "stream": True,
Ying Sheng's avatar
Ying Sheng committed
91
92
            "min_length": request_func_input.output_len,
            "end_id": 1048576,
93
            **request_func_input.extra_request_body,
94
        }
95
96
97
        if args.disable_ignore_eos:
            del payload["min_length"]
            del payload["end_id"]
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
        output = RequestFuncOutput()
        output.prompt_len = request_func_input.prompt_len

        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(url=api_url, json=payload) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data:")

                        data = json.loads(chunk)
                        output.generated_text += data["text_output"]
                        timestamp = time.perf_counter()
                        # First token
                        if ttft == 0.0:
                            ttft = time.perf_counter() - st
                            output.ttft = ttft

                        # Decoding phase
                        else:
                            output.itl.append(timestamp - most_recent_timestamp)

                        most_recent_timestamp = timestamp

                    output.latency = most_recent_timestamp - st
                    output.success = True
Ying Sheng's avatar
Ying Sheng committed
130
                    output.output_len = request_func_input.output_len
131
132
133
134
135
136
137
138
139
140
141
142
143
144

                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

        if pbar:
            pbar.update(1)
        return output


zhyncs's avatar
zhyncs committed
145
146
147
148
149
150
151
152
153
154
# set ignore_eos True by default
async def async_request_openai_completions(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    assert api_url.endswith(
        "completions"
    ), "OpenAI Completions API URL must end with 'completions'."

Lianmin Zheng's avatar
Lianmin Zheng committed
155
156
    prompt = request_func_input.prompt

zhyncs's avatar
zhyncs committed
157
158
159
    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "model": request_func_input.model,
Lianmin Zheng's avatar
Lianmin Zheng committed
160
            "prompt": prompt,
zhyncs's avatar
zhyncs committed
161
162
163
            "temperature": 0.0,
            "best_of": 1,
            "max_tokens": request_func_input.output_len,
164
            "stream": not args.disable_stream,
165
            "ignore_eos": not args.disable_ignore_eos,
166
            **request_func_input.extra_request_body,
zhyncs's avatar
zhyncs committed
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
        }
        headers = {"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}"}

        output = RequestFuncOutput()
        output.prompt_len = request_func_input.prompt_len

        generated_text = ""
        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(
                url=api_url, json=payload, headers=headers
            ) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data: ")
188
                        latency = time.perf_counter() - st
zhyncs's avatar
zhyncs committed
189
                        if chunk == "[DONE]":
190
                            pass
zhyncs's avatar
zhyncs committed
191
192
193
194
195
196
197
198
199
200
201
202
203
204
                        else:
                            data = json.loads(chunk)

                            # NOTE: Some completion API might have a last
                            # usage summary response without a token so we
                            # want to check a token was generated
                            if data["choices"][0]["text"]:
                                timestamp = time.perf_counter()
                                # First token
                                if ttft == 0.0:
                                    ttft = time.perf_counter() - st
                                    output.ttft = ttft

                                # Decoding phase
205
206
                                else:
                                    output.itl.append(timestamp - most_recent_timestamp)
zhyncs's avatar
zhyncs committed
207
208
209
210
211
212
213

                                most_recent_timestamp = timestamp
                                generated_text += data["choices"][0]["text"]

                    output.generated_text = generated_text
                    output.success = True
                    output.latency = latency
214
                    output.output_len = request_func_input.output_len
zhyncs's avatar
zhyncs committed
215
216
217
218
219
220
221
222
223
224
225
226
227
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    if pbar:
        pbar.update(1)
    return output


228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
async def async_request_truss(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url

    prompt = request_func_input.prompt

    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "model": request_func_input.model,
            "prompt": prompt,
            "temperature": 0.0,
            "best_of": 1,
            "max_tokens": request_func_input.output_len,
            "stream": not args.disable_stream,
            "ignore_eos": not args.disable_ignore_eos,
            **request_func_input.extra_request_body,
        }
        headers = {"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}"}

        output = RequestFuncOutput()
        output.prompt_len = request_func_input.prompt_len

        generated_text = ""
        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(
                url=api_url, json=payload, headers=headers
            ) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data: ")
                        latency = time.perf_counter() - st
                        if chunk == "[DONE]":
                            pass
                        else:
                            data = json.loads(chunk)

                            # NOTE: Some completion API might have a last
                            # usage summary response without a token so we
                            # want to check a token was generated
                            if data["choices"][0]["delta"]["content"]:
                                timestamp = time.perf_counter()
                                # First token
                                if ttft == 0.0:
                                    ttft = time.perf_counter() - st
                                    output.ttft = ttft

                                # Decoding phase
                                else:
                                    output.itl.append(timestamp - most_recent_timestamp)

                                most_recent_timestamp = timestamp
                                generated_text += data["choices"][0]["delta"]["content"]

                    output.generated_text = generated_text
                    output.success = True
                    output.latency = latency
                    output.output_len = request_func_input.output_len
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    if pbar:
        pbar.update(1)
    return output


307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
async def async_request_sglang_generate(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    prompt = request_func_input.prompt

    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "text": prompt,
            "sampling_params": {
                "temperature": 0.0,
                "max_new_tokens": request_func_input.output_len,
                "ignore_eos": not args.disable_ignore_eos,
            },
            "stream": not args.disable_stream,
323
            "lora_path": request_func_input.lora_name,
324
325
            "return_logprob": args.return_logprob,
            "logprob_start_len": -1,
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
            **request_func_input.extra_request_body,
        }
        headers = {}

        output = RequestFuncOutput()
        output.prompt_len = request_func_input.prompt_len

        generated_text = ""
        ttft = 0.0
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(
                url=api_url, json=payload, headers=headers
            ) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        chunk_bytes = chunk_bytes.strip()
                        if not chunk_bytes:
                            continue
                        # print(chunk_bytes)

                        chunk = remove_prefix(chunk_bytes.decode("utf-8"), "data: ")
                        latency = time.perf_counter() - st
                        if chunk == "[DONE]":
                            pass
                        else:
                            data = json.loads(chunk)

                            # NOTE: Some completion API might have a last
                            # usage summary response without a token so we
                            # want to check a token was generated
                            if data["text"]:
                                timestamp = time.perf_counter()
                                # First token
                                if ttft == 0.0:
                                    ttft = time.perf_counter() - st
                                    output.ttft = ttft

                                # Decoding phase
                                else:
                                    output.itl.append(timestamp - most_recent_timestamp)

                                most_recent_timestamp = timestamp
                                generated_text = data["text"]

                    output.generated_text = generated_text
                    output.success = True
                    output.latency = latency
                    output.output_len = request_func_input.output_len
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    if pbar:
        pbar.update(1)
    return output


389
async def async_request_gserver(
Lianmin Zheng's avatar
Lianmin Zheng committed
390
391
392
393
394
395
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    raise NotImplementedError()


396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
async def async_request_profile(api_url: str) -> RequestFuncOutput:
    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        output = RequestFuncOutput()
        try:
            async with session.post(url=api_url) as response:
                if response.status == 200:
                    output.success = True
                else:
                    output.error = response.reason or ""
                    output.success = False
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    return output


zhyncs's avatar
zhyncs committed
414
def get_model(pretrained_model_name_or_path: str) -> str:
415
    if os.getenv("SGLANG_USE_MODELSCOPE", "false").lower() == "true":
zhyncs's avatar
zhyncs committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
        import huggingface_hub.constants
        from modelscope import snapshot_download

        model_path = snapshot_download(
            model_id=pretrained_model_name_or_path,
            local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
            ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"],
        )

        return model_path
    return pretrained_model_name_or_path


def get_tokenizer(
    pretrained_model_name_or_path: str,
) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
Lianmin Zheng's avatar
Lianmin Zheng committed
432
433
434
435
436
437
438
    if pretrained_model_name_or_path.endswith(
        ".json"
    ) or pretrained_model_name_or_path.endswith(".model"):
        from sglang.srt.hf_transformers_utils import get_tokenizer

        return get_tokenizer(pretrained_model_name_or_path)

zhyncs's avatar
zhyncs committed
439
440
441
442
443
444
445
446
447
    if pretrained_model_name_or_path is not None and not os.path.exists(
        pretrained_model_name_or_path
    ):
        pretrained_model_name_or_path = get_model(pretrained_model_name_or_path)
    return AutoTokenizer.from_pretrained(
        pretrained_model_name_or_path, trust_remote_code=True
    )


448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
def get_dataset(args, tokenizer):
    if args.dataset_name == "sharegpt":
        input_requests = sample_sharegpt_requests(
            dataset_path=args.dataset_path,
            num_requests=args.num_prompts,
            tokenizer=tokenizer,
            fixed_output_len=args.sharegpt_output_len,
        )
    elif args.dataset_name == "random":
        input_requests = sample_random_requests(
            input_len=args.random_input_len,
            output_len=args.random_output_len,
            num_prompts=args.num_prompts,
            range_ratio=args.random_range_ratio,
            tokenizer=tokenizer,
            dataset_path=args.dataset_path,
        )
    elif args.dataset_name == "generated-shared-prefix":
        input_requests = sample_generated_shared_prefix_requests(
            num_groups=args.gen_num_groups,
            prompts_per_group=args.gen_prompts_per_group,
            system_prompt_len=args.gen_system_prompt_len,
            question_len=args.gen_question_len,
            output_len=args.gen_output_len,
            tokenizer=tokenizer,
        )
    else:
        raise ValueError(f"Unknown dataset: {args.dataset_name}")
    return input_requests


zhyncs's avatar
zhyncs committed
479
ASYNC_REQUEST_FUNCS = {
480
481
482
    "sglang": async_request_sglang_generate,
    "sglang-native": async_request_sglang_generate,
    "sglang-oai": async_request_openai_completions,
zhyncs's avatar
zhyncs committed
483
484
    "vllm": async_request_openai_completions,
    "lmdeploy": async_request_openai_completions,
485
    "trt": async_request_trt_llm,
486
    "gserver": async_request_gserver,
487
    "truss": async_request_truss,
zhyncs's avatar
zhyncs committed
488
489
490
491
492
493
494
495
}


@dataclass
class BenchmarkMetrics:
    completed: int
    total_input: int
    total_output: int
Ying Sheng's avatar
Ying Sheng committed
496
    total_output_retokenized: int
zhyncs's avatar
zhyncs committed
497
498
499
    request_throughput: float
    input_throughput: float
    output_throughput: float
Ying Sheng's avatar
Ying Sheng committed
500
    output_throughput_retokenized: float
501
502
    total_throughput: float
    total_throughput_retokenized: float
zhyncs's avatar
zhyncs committed
503
504
505
506
507
508
509
510
511
512
513
514
    mean_ttft_ms: float
    median_ttft_ms: float
    std_ttft_ms: float
    p99_ttft_ms: float
    mean_tpot_ms: float
    median_tpot_ms: float
    std_tpot_ms: float
    p99_tpot_ms: float
    mean_itl_ms: float
    median_itl_ms: float
    std_itl_ms: float
    p99_itl_ms: float
zhyncs's avatar
zhyncs committed
515
516
    mean_e2e_latency_ms: float
    median_e2e_latency_ms: float
zhyncs's avatar
zhyncs committed
517
518


Lianmin Zheng's avatar
Lianmin Zheng committed
519
SHAREGPT_URL = "https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json"
Lianmin Zheng's avatar
Lianmin Zheng committed
520
521


Lianmin Zheng's avatar
Lianmin Zheng committed
522
523
524
525
def download_and_cache_file(url: str, filename: Optional[str] = None):
    """Read and cache a file from a url."""
    if filename is None:
        filename = os.path.join("/tmp", url.split("/")[-1])
Lianmin Zheng's avatar
Lianmin Zheng committed
526

Lianmin Zheng's avatar
Lianmin Zheng committed
527
528
529
    # Check if the cache file already exists
    if os.path.exists(filename):
        return filename
Lianmin Zheng's avatar
Lianmin Zheng committed
530

Lianmin Zheng's avatar
Lianmin Zheng committed
531
    print(f"Downloading from {url} to {filename}")
Lianmin Zheng's avatar
Lianmin Zheng committed
532

Lianmin Zheng's avatar
Lianmin Zheng committed
533
534
535
    # Stream the response to show the progress bar
    response = requests.get(url, stream=True)
    response.raise_for_status()  # Check for request errors
Lianmin Zheng's avatar
Lianmin Zheng committed
536

Lianmin Zheng's avatar
Lianmin Zheng committed
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
    # Total size of the file in bytes
    total_size = int(response.headers.get("content-length", 0))
    chunk_size = 1024  # Download in chunks of 1KB

    # Use tqdm to display the progress bar
    with open(filename, "wb") as f, tqdm(
        desc=filename,
        total=total_size,
        unit="B",
        unit_scale=True,
        unit_divisor=1024,
    ) as bar:
        for chunk in response.iter_content(chunk_size=chunk_size):
            f.write(chunk)
            bar.update(len(chunk))

    return filename
Lianmin Zheng's avatar
Lianmin Zheng committed
554
555


zhyncs's avatar
zhyncs committed
556
557
558
559
560
561
562
563
564
def sample_sharegpt_requests(
    dataset_path: str,
    num_requests: int,
    tokenizer: PreTrainedTokenizerBase,
    fixed_output_len: Optional[int] = None,
) -> List[Tuple[str, int, int]]:
    if fixed_output_len is not None and fixed_output_len < 4:
        raise ValueError("output_len too small")

Lianmin Zheng's avatar
Lianmin Zheng committed
565
    # Download sharegpt if necessary
Lianmin Zheng's avatar
Lianmin Zheng committed
566
567
    if not os.path.isfile(dataset_path):
        dataset_path = download_and_cache_file(SHAREGPT_URL)
zhyncs's avatar
zhyncs committed
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590

    # Load the dataset.
    with open(dataset_path) as f:
        dataset = json.load(f)
    # Filter out the conversations with less than 2 turns.
    dataset = [data for data in dataset if len(data["conversations"]) >= 2]
    # Only keep the first two turns of each conversation.
    dataset = [
        (data["conversations"][0]["value"], data["conversations"][1]["value"])
        for data in dataset
    ]

    # Shuffle the dataset.
    random.shuffle(dataset)

    # Filter out sequences that are too long or too short
    filtered_dataset: List[Tuple[str, int, int]] = []
    for i in range(len(dataset)):
        if len(filtered_dataset) == num_requests:
            break

        # Tokenize the prompts and completions.
        prompt = dataset[i][0]
Lianmin Zheng's avatar
Lianmin Zheng committed
591
        prompt_token_ids = tokenizer.encode(prompt)
zhyncs's avatar
zhyncs committed
592
        completion = dataset[i][1]
Lianmin Zheng's avatar
Lianmin Zheng committed
593
        completion_token_ids = tokenizer.encode(completion)
zhyncs's avatar
zhyncs committed
594
595
596
597
598
599
600
        prompt_len = len(prompt_token_ids)
        output_len = (
            len(completion_token_ids) if fixed_output_len is None else fixed_output_len
        )
        if prompt_len < 4 or output_len < 4:
            # Prune too short sequences.
            continue
Lianmin Zheng's avatar
Lianmin Zheng committed
601
602
603
        if prompt_len > 1024 or (
            prompt_len + output_len > 2048 and fixed_output_len is None
        ):
zhyncs's avatar
zhyncs committed
604
605
606
607
            # Prune too long sequences.
            continue
        filtered_dataset.append((prompt, prompt_len, output_len))

608
609
    print(f"#Input tokens: {np.sum([x[1] for x in filtered_dataset])}")
    print(f"#Output tokens: {np.sum([x[2] for x in filtered_dataset])}")
zhyncs's avatar
zhyncs committed
610
611
612
    return filtered_dataset


613
614
615
616
617
618
def sample_random_requests(
    input_len: int,
    output_len: int,
    num_prompts: int,
    range_ratio: float,
    tokenizer: PreTrainedTokenizerBase,
Lianmin Zheng's avatar
Lianmin Zheng committed
619
    dataset_path: str,
620
621
622
) -> List[Tuple[str, int, int]]:

    input_lens = np.random.randint(
Yineng Zhang's avatar
Yineng Zhang committed
623
        max(int(input_len * range_ratio), 1),
624
625
626
627
628
629
630
631
        input_len + 1,
        size=num_prompts,
    )
    output_lens = np.random.randint(
        int(output_len * range_ratio),
        output_len + 1,
        size=num_prompts,
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
632
633
634
635
636

    if True:
        # Sample token ids from ShareGPT and repeat/truncate them to satisfy the input_lens

        # Download sharegpt if necessary
Lianmin Zheng's avatar
Lianmin Zheng committed
637
638
        if not os.path.isfile(dataset_path):
            dataset_path = download_and_cache_file(SHAREGPT_URL)
Lianmin Zheng's avatar
Lianmin Zheng committed
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654

        # Load the dataset.
        with open(dataset_path) as f:
            dataset = json.load(f)
        # Filter out the conversations with less than 2 turns.
        dataset = [data for data in dataset if len(data["conversations"]) >= 2]
        # Only keep the first two turns of each conversation.
        dataset = [
            (data["conversations"][0]["value"], data["conversations"][1]["value"])
            for data in dataset
        ]
        # Shuffle the dataset.
        random.shuffle(dataset)

        # Filter out sequences that are too long or too short
        input_requests: List[Tuple[str, int, int]] = []
655
656
657
658
659
        for data in dataset:
            i = len(input_requests)
            if i == num_prompts:
                break

Lianmin Zheng's avatar
Lianmin Zheng committed
660
            # Tokenize the prompts and completions.
661
            prompt = data[0]
Lianmin Zheng's avatar
Lianmin Zheng committed
662
            prompt_token_ids = tokenizer.encode(prompt)
Lianmin Zheng's avatar
Lianmin Zheng committed
663
664
            prompt_len = len(prompt_token_ids)

665
666
667
668
            # Skip empty prompt
            if prompt_len == 0:
                continue

Yineng Zhang's avatar
Yineng Zhang committed
669
            if prompt_len > input_lens[i]:
Lianmin Zheng's avatar
Lianmin Zheng committed
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
                input_ids = prompt_token_ids[: input_lens[i]]
            else:
                ratio = (input_lens[i] + prompt_len - 1) // prompt_len
                input_ids = (prompt_token_ids * ratio)[: input_lens[i]]
            prompt = tokenizer.decode(input_ids)
            input_requests.append((prompt, int(input_lens[i]), int(output_lens[i])))
    else:
        # Sample token ids from random integers. This can cause some NaN issues.
        offsets = np.random.randint(0, tokenizer.vocab_size, size=num_prompts)
        input_requests = []
        for i in range(num_prompts):
            prompt = tokenizer.decode(
                [
                    (offsets[i] + i + j) % tokenizer.vocab_size
                    for j in range(input_lens[i])
                ]
            )
            input_requests.append((prompt, int(input_lens[i]), int(output_lens[i])))
688
689
690
691
692
693

    print(f"#Input tokens: {np.sum(input_lens)}")
    print(f"#Output tokens: {np.sum(output_lens)}")
    return input_requests


694
695
696
697
698
699
700
def gen_prompt(tokenizer, token_num):
    """Generate a random prompt of specified token length using tokenizer vocabulary."""
    all_available_tokens = list(tokenizer.get_vocab().values())
    selected_tokens = random.choices(all_available_tokens, k=token_num)
    return tokenizer.decode(selected_tokens)


701
702
703
704
705
706
707
708
709
710
711
712
713
def get_gen_prefix_cache_path(args, tokenizer):
    """Create cache directory under ~/.cache/sglang/benchmark"""
    cache_dir = Path.home() / ".cache" / "sglang" / "benchmark"

    # Create a unique cache filename based on the generation parameters
    cache_key = (
        f"gen_prefix_{args.gen_num_groups}_{args.gen_prompts_per_group}_"
        f"{args.gen_system_prompt_len}_{args.gen_question_len}_{args.gen_output_len}_"
        f"{tokenizer.__class__.__name__}.pkl"
    )
    return cache_dir / cache_key


714
715
716
717
718
719
720
721
def sample_generated_shared_prefix_requests(
    num_groups: int,
    prompts_per_group: int,
    system_prompt_len: int,
    question_len: int,
    output_len: int,
    tokenizer: PreTrainedTokenizerBase,
) -> List[Tuple[str, int, int]]:
722
723
724
725
726
727
728
    """Generate benchmark requests with shared system prompts using random tokens and caching."""
    cache_path = get_gen_prefix_cache_path(args, tokenizer)

    # Try to load from cache first
    if cache_path.exists():
        print(f"\nLoading cached generated input data from {cache_path}")
        with open(cache_path, "rb") as f:
729
730
            return pickle.load(f)

731
732
    print("\nGenerating new input data...")

733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
    # Generate system prompts for each group
    system_prompts = []
    for _ in range(num_groups):
        system_prompt = gen_prompt(tokenizer, system_prompt_len)
        system_prompts.append(system_prompt)

    # Generate questions
    questions = []
    for _ in range(num_groups * prompts_per_group):
        question = gen_prompt(tokenizer, question_len)
        questions.append(question)

    # Combine system prompts with questions
    input_requests = []
    total_input_tokens = 0
    total_output_tokens = 0

750
    for group_idx in tqdm(range(num_groups), desc="Generating system prompt"):
751
        system_prompt = system_prompts[group_idx]
752
753
754
        for prompt_idx in tqdm(
            range(prompts_per_group), desc="Generating questions", leave=False
        ):
755
756
757
758
759
760
761
762
            question = questions[group_idx * prompts_per_group + prompt_idx]
            full_prompt = f"{system_prompt}\n\n{question}"
            prompt_len = len(tokenizer.encode(full_prompt))

            input_requests.append((full_prompt, prompt_len, output_len))
            total_input_tokens += prompt_len
            total_output_tokens += output_len

763
764
765
766
    # Shuffle questions
    random.shuffle(input_requests)

    # Print statistics
767
768
769
770
771
772
773
774
775
776
777
778
    print(f"\nGenerated shared prefix dataset statistics:")
    print(f"Number of groups: {num_groups}")
    print(f"Prompts per group: {prompts_per_group}")
    print(f"Total prompts: {len(input_requests)}")
    print(f"Total input tokens: {total_input_tokens}")
    print(f"Total output tokens: {total_output_tokens}")
    print(
        f"Average system prompt length: {sum(len(tokenizer.encode(sp)) for sp in system_prompts) / len(system_prompts):.1f} tokens"
    )
    print(
        f"Average question length: {sum(len(tokenizer.encode(q)) for q in questions) / len(questions):.1f} tokens\n"
    )
779
780
781
782
783
784

    # Save to cache
    cache_path.parent.mkdir(parents=True, exist_ok=True)
    print(f"Caching generated input data to {cache_path}")
    with open(cache_path, "wb") as f:
        pickle.dump(input_requests, f)
785
786
787
788

    return input_requests


zhyncs's avatar
zhyncs committed
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
async def get_request(
    input_requests: List[Tuple[str, int, int]],
    request_rate: float,
) -> AsyncGenerator[Tuple[str, int, int], None]:
    input_requests = iter(input_requests)
    for request in input_requests:
        yield request

        if request_rate == float("inf"):
            # If the request rate is infinity, then we don't need to wait.
            continue

        # Sample the request interval from the exponential distribution.
        interval = np.random.exponential(1.0 / request_rate)
        # The next request will be sent after the interval.
        await asyncio.sleep(interval)


def calculate_metrics(
    input_requests: List[Tuple[str, int, int]],
    outputs: List[RequestFuncOutput],
    dur_s: float,
    tokenizer: PreTrainedTokenizerBase,
812
    backend: str,
zhyncs's avatar
zhyncs committed
813
) -> Tuple[BenchmarkMetrics, List[int]]:
Ying Sheng's avatar
Ying Sheng committed
814
815
    output_lens: List[int] = []
    retokenized_output_lens: List[int] = []
zhyncs's avatar
zhyncs committed
816
817
818
819
820
    total_input = 0
    completed = 0
    itls: List[float] = []
    tpots: List[float] = []
    ttfts: List[float] = []
zhyncs's avatar
zhyncs committed
821
    e2e_latencies: List[float] = []
zhyncs's avatar
zhyncs committed
822
823
    for i in range(len(outputs)):
        if outputs[i].success:
Ying Sheng's avatar
Ying Sheng committed
824
825
826
            output_len = outputs[i].output_len
            output_lens.append(output_len)
            retokenized_output_len = len(
Lianmin Zheng's avatar
Lianmin Zheng committed
827
                tokenizer.encode(outputs[i].generated_text, add_special_tokens=False)
Ying Sheng's avatar
Ying Sheng committed
828
829
            )
            retokenized_output_lens.append(retokenized_output_len)
zhyncs's avatar
zhyncs committed
830
831
832
833
834
            total_input += input_requests[i][1]
            if output_len > 1:
                tpots.append((outputs[i].latency - outputs[i].ttft) / (output_len - 1))
            itls += outputs[i].itl
            ttfts.append(outputs[i].ttft)
zhyncs's avatar
zhyncs committed
835
836
837

            e2e_latencies.append(outputs[i].latency)

zhyncs's avatar
zhyncs committed
838
839
            completed += 1
        else:
Ying Sheng's avatar
Ying Sheng committed
840
841
            output_lens.append(0)
            retokenized_output_lens.append(0)
zhyncs's avatar
zhyncs committed
842
843
844
845
846
847
848
849
850
851

    if completed == 0:
        warnings.warn(
            "All requests failed. This is likely due to a misconfiguration "
            "on the benchmark arguments.",
            stacklevel=2,
        )
    metrics = BenchmarkMetrics(
        completed=completed,
        total_input=total_input,
Ying Sheng's avatar
Ying Sheng committed
852
853
        total_output=sum(output_lens),
        total_output_retokenized=sum(retokenized_output_lens),
zhyncs's avatar
zhyncs committed
854
855
        request_throughput=completed / dur_s,
        input_throughput=total_input / dur_s,
Ying Sheng's avatar
Ying Sheng committed
856
857
        output_throughput=sum(output_lens) / dur_s,
        output_throughput_retokenized=sum(retokenized_output_lens) / dur_s,
858
859
860
        total_throughput=(total_input + sum(output_lens)) / dur_s,
        total_throughput_retokenized=(total_input + sum(retokenized_output_lens))
        / dur_s,
zhyncs's avatar
zhyncs committed
861
862
863
864
865
866
867
868
869
870
871
872
873
        mean_ttft_ms=np.mean(ttfts or 0)
        * 1000,  # ttfts is empty if streaming is not supported by backend
        median_ttft_ms=np.median(ttfts or 0) * 1000,
        std_ttft_ms=np.std(ttfts or 0) * 1000,
        p99_ttft_ms=np.percentile(ttfts or 0, 99) * 1000,
        mean_tpot_ms=np.mean(tpots or 0) * 1000,
        median_tpot_ms=np.median(tpots or 0) * 1000,
        std_tpot_ms=np.std(tpots or 0) * 1000,
        p99_tpot_ms=np.percentile(tpots or 0, 99) * 1000,
        mean_itl_ms=np.mean(itls or 0) * 1000,
        median_itl_ms=np.median(itls or 0) * 1000,
        std_itl_ms=np.std(itls or 0) * 1000,
        p99_itl_ms=np.percentile(itls or 0, 99) * 1000,
zhyncs's avatar
zhyncs committed
874
875
        mean_e2e_latency_ms=np.mean(e2e_latencies) * 1000,
        median_e2e_latency_ms=np.median(e2e_latencies) * 1000,
zhyncs's avatar
zhyncs committed
876
877
    )

Ying Sheng's avatar
Ying Sheng committed
878
    return metrics, output_lens
zhyncs's avatar
zhyncs committed
879
880
881
882
883


async def benchmark(
    backend: str,
    api_url: str,
884
    base_url: str,
zhyncs's avatar
zhyncs committed
885
886
887
888
    model_id: str,
    tokenizer: PreTrainedTokenizerBase,
    input_requests: List[Tuple[str, int, int]],
    request_rate: float,
889
    max_concurrency: Optional[int],
zhyncs's avatar
zhyncs committed
890
    disable_tqdm: bool,
891
    lora_name: str,
892
    extra_request_body: Dict[str, Any],
893
    profile: bool,
zhyncs's avatar
zhyncs committed
894
895
896
897
898
899
):
    if backend in ASYNC_REQUEST_FUNCS:
        request_func = ASYNC_REQUEST_FUNCS[backend]
    else:
        raise ValueError(f"Unknown backend: {backend}")

900
901
902
903
904
905
906
907
908
    # From https://github.com/vllm-project/vllm/pull/9390
    semaphore = asyncio.Semaphore(max_concurrency) if max_concurrency else None

    async def limited_request_func(request_func_input, pbar):
        if semaphore is None:
            return await request_func(request_func_input=request_func_input, pbar=pbar)
        async with semaphore:
            return await request_func(request_func_input=request_func_input, pbar=pbar)

zhyncs's avatar
zhyncs committed
909
910
911
912
913
914
915
    print("Starting initial single prompt test run...")
    test_prompt, test_prompt_len, test_output_len = input_requests[0]
    test_input = RequestFuncInput(
        model=model_id,
        prompt=test_prompt,
        api_url=api_url,
        prompt_len=test_prompt_len,
916
        output_len=min(test_output_len, 32),
917
        lora_name=lora_name,
918
        extra_request_body=extra_request_body,
zhyncs's avatar
zhyncs committed
919
920
921
922
923
924
925
926
    )
    test_output = await request_func(request_func_input=test_input)
    if not test_output.success:
        raise ValueError(
            "Initial test run failed - Please make sure benchmark arguments "
            f"are correctly specified. Error: {test_output.error}"
        )
    else:
927
        requests.post(base_url + "/flush_cache")
zhyncs's avatar
zhyncs committed
928
929
        print("Initial test run completed. Starting main benchmark run...")

930
931
    time.sleep(1.5)

932
933
934
935
936
937
938
939
    if profile:
        print("Starting profiler...")
        profile_output = await async_request_profile(
            api_url=base_url + "/start_profile"
        )
        if profile_output.success:
            print("Profiler started")

zhyncs's avatar
zhyncs committed
940
941
942
943
944
945
946
947
948
949
950
951
    pbar = None if disable_tqdm else tqdm(total=len(input_requests))

    benchmark_start_time = time.perf_counter()
    tasks: List[asyncio.Task] = []
    async for request in get_request(input_requests, request_rate):
        prompt, prompt_len, output_len = request
        request_func_input = RequestFuncInput(
            model=model_id,
            prompt=prompt,
            api_url=api_url,
            prompt_len=prompt_len,
            output_len=output_len,
952
            lora_name=lora_name,
953
            extra_request_body=extra_request_body,
zhyncs's avatar
zhyncs committed
954
955
956
        )
        tasks.append(
            asyncio.create_task(
957
                limited_request_func(request_func_input=request_func_input, pbar=pbar)
zhyncs's avatar
zhyncs committed
958
959
960
961
            )
        )
    outputs: List[RequestFuncOutput] = await asyncio.gather(*tasks)

962
963
964
965
966
967
    if profile:
        print("Stopping profiler...")
        profile_output = await async_request_profile(api_url=base_url + "/stop_profile")
        if profile_output.success:
            print("Profiler stopped")

zhyncs's avatar
zhyncs committed
968
969
970
971
972
    if pbar is not None:
        pbar.close()

    benchmark_duration = time.perf_counter() - benchmark_start_time

Ying Sheng's avatar
Ying Sheng committed
973
    metrics, output_lens = calculate_metrics(
zhyncs's avatar
zhyncs committed
974
975
976
977
        input_requests=input_requests,
        outputs=outputs,
        dur_s=benchmark_duration,
        tokenizer=tokenizer,
978
        backend=backend,
zhyncs's avatar
zhyncs committed
979
980
981
    )

    print("\n{s:{c}^{n}}".format(s=" Serving Benchmark Result ", n=50, c="="))
982
    print("{:<40} {:<10}".format("Backend:", backend))
zhyncs's avatar
zhyncs committed
983
    print("{:<40} {:<10}".format("Traffic request rate:", request_rate))
984
985
986
987
988
989
    print(
        "{:<40} {:<10}".format(
            "Max reqeuest concurrency:",
            max_concurrency if max_concurrency else "not set",
        )
    )
zhyncs's avatar
zhyncs committed
990
991
992
993
    print("{:<40} {:<10}".format("Successful requests:", metrics.completed))
    print("{:<40} {:<10.2f}".format("Benchmark duration (s):", benchmark_duration))
    print("{:<40} {:<10}".format("Total input tokens:", metrics.total_input))
    print("{:<40} {:<10}".format("Total generated tokens:", metrics.total_output))
Ying Sheng's avatar
Ying Sheng committed
994
995
996
997
998
    print(
        "{:<40} {:<10}".format(
            "Total generated tokens (retokenized):", metrics.total_output_retokenized
        )
    )
zhyncs's avatar
zhyncs committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
    print(
        "{:<40} {:<10.2f}".format(
            "Request throughput (req/s):", metrics.request_throughput
        )
    )
    print(
        "{:<40} {:<10.2f}".format(
            "Input token throughput (tok/s):", metrics.input_throughput
        )
    )
    print(
        "{:<40} {:<10.2f}".format(
            "Output token throughput (tok/s):", metrics.output_throughput
        )
    )
1014
1015
1016
1017
1018
    print(
        "{:<40} {:<10.2f}".format(
            "Total token throughput (tok/s):", metrics.total_throughput
        )
    )
zhyncs's avatar
zhyncs committed
1019
1020
1021
1022
1023
1024
1025
1026
1027
    print("{s:{c}^{n}}".format(s="End-to-End Latency", n=50, c="-"))
    print(
        "{:<40} {:<10.2f}".format("Mean E2E Latency (ms):", metrics.mean_e2e_latency_ms)
    )
    print(
        "{:<40} {:<10.2f}".format(
            "Median E2E Latency (ms):", metrics.median_e2e_latency_ms
        )
    )
zhyncs's avatar
zhyncs committed
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
    print("{s:{c}^{n}}".format(s="Time to First Token", n=50, c="-"))
    print("{:<40} {:<10.2f}".format("Mean TTFT (ms):", metrics.mean_ttft_ms))
    print("{:<40} {:<10.2f}".format("Median TTFT (ms):", metrics.median_ttft_ms))
    print("{:<40} {:<10.2f}".format("P99 TTFT (ms):", metrics.p99_ttft_ms))
    print(
        "{s:{c}^{n}}".format(s="Time per Output Token (excl. 1st token)", n=50, c="-")
    )
    print("{:<40} {:<10.2f}".format("Mean TPOT (ms):", metrics.mean_tpot_ms))
    print("{:<40} {:<10.2f}".format("Median TPOT (ms):", metrics.median_tpot_ms))
    print("{:<40} {:<10.2f}".format("P99 TPOT (ms):", metrics.p99_tpot_ms))
    print("{s:{c}^{n}}".format(s="Inter-token Latency", n=50, c="-"))
    print("{:<40} {:<10.2f}".format("Mean ITL (ms):", metrics.mean_itl_ms))
    print("{:<40} {:<10.2f}".format("Median ITL (ms):", metrics.median_itl_ms))
    print("{:<40} {:<10.2f}".format("P99 ITL (ms):", metrics.p99_itl_ms))
    print("=" * 50)

zhyncs's avatar
zhyncs committed
1044
1045
1046
1047
1048
1049
1050
1051
1052
    if (
        metrics.median_ttft_ms is not None
        and metrics.mean_itl_ms is not None
        and metrics.output_throughput is not None
    ):
        result = {
            "backend": args.backend,
            "dataset_name": args.dataset_name,
            "request_rate": request_rate,
1053
            "max_concurrency": max_concurrency,
1054
1055
1056
1057
1058
1059
1060
1061
            "total_input_tokens": metrics.total_input,
            "total_output_tokens": metrics.total_output,
            "total_output_tokens_retokenized": metrics.total_output_retokenized,
            "mean_e2e_latency_ms": metrics.mean_e2e_latency_ms,
            "median_e2e_latency_ms": metrics.median_e2e_latency_ms,
            "median_ttft_ms": metrics.median_ttft_ms,
            "median_itl_ms": metrics.median_itl_ms,
            "output_throughput": metrics.output_throughput,
zhyncs's avatar
zhyncs committed
1062
1063
1064
1065
            "sharegpt_output_len": args.sharegpt_output_len,
            "random_input_len": args.random_input_len,
            "random_output_len": args.random_output_len,
            "random_range_ratio": args.random_range_ratio,
1066
1067
            "duration": benchmark_duration,
            "completed": metrics.completed,
zhyncs's avatar
zhyncs committed
1068
1069
1070
1071
        }
    else:
        print(f"Error running benchmark for request rate: {request_rate}")
        print("-" * 30)
1072

zhyncs's avatar
zhyncs committed
1073
1074
1075
1076
1077
1078
1079
    # Determine output file name
    if args.output_file:
        output_file_name = args.output_file
    else:
        now = datetime.now().strftime("%m%d")
        if args.dataset_name == "random":
            output_file_name = f"{args.backend}_{now}_{args.num_prompts}_{args.random_input_len}_{args.random_output_len}.jsonl"
1080
        else:
zhyncs's avatar
zhyncs committed
1081
            output_file_name = f"{args.backend}_{now}_{args.num_prompts}_sharegpt.jsonl"
1082

zhyncs's avatar
zhyncs committed
1083
1084
1085
    # Append results to a JSONL file
    with open(output_file_name, "a") as file:
        file.write(json.dumps(result) + "\n")
1086

zhyncs's avatar
zhyncs committed
1087
1088
1089
1090
1091
    result = {
        "duration": benchmark_duration,
        "completed": metrics.completed,
        "total_input_tokens": metrics.total_input,
        "total_output_tokens": metrics.total_output,
Ying Sheng's avatar
Ying Sheng committed
1092
        "total_output_tokens_retokenized": metrics.total_output_retokenized,
zhyncs's avatar
zhyncs committed
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
        "request_throughput": metrics.request_throughput,
        "input_throughput": metrics.input_throughput,
        "output_throughput": metrics.output_throughput,
        "mean_ttft_ms": metrics.mean_ttft_ms,
        "median_ttft_ms": metrics.median_ttft_ms,
        "std_ttft_ms": metrics.std_ttft_ms,
        "p99_ttft_ms": metrics.p99_ttft_ms,
        "mean_tpot_ms": metrics.mean_tpot_ms,
        "median_tpot_ms": metrics.median_tpot_ms,
        "std_tpot_ms": metrics.std_tpot_ms,
        "p99_tpot_ms": metrics.p99_tpot_ms,
        "mean_itl_ms": metrics.mean_itl_ms,
        "median_itl_ms": metrics.median_itl_ms,
        "std_itl_ms": metrics.std_itl_ms,
        "p99_itl_ms": metrics.p99_itl_ms,
        "input_lens": [output.prompt_len for output in outputs],
Ying Sheng's avatar
Ying Sheng committed
1109
        "output_lens": output_lens,
zhyncs's avatar
zhyncs committed
1110
1111
1112
1113
        "ttfts": [output.ttft for output in outputs],
        "itls": [output.itl for output in outputs],
        "generated_texts": [output.generated_text for output in outputs],
        "errors": [output.error for output in outputs],
zhyncs's avatar
zhyncs committed
1114
1115
        "mean_e2e_latency_ms": metrics.mean_e2e_latency_ms,
        "median_e2e_latency_ms": metrics.median_e2e_latency_ms,
zhyncs's avatar
zhyncs committed
1116
1117
1118
1119
    }
    return result


1120
def parse_request_rate_range(request_rate_range):
zhyncs's avatar
zhyncs committed
1121
1122
1123
1124
1125
    if len(request_rate_range.split(",")) == 3:
        start, stop, step = map(int, request_rate_range.split(","))
        return list(range(start, stop, step))
    else:
        return list(map(int, request_rate_range.split(",")))
1126
1127


1128
1129
1130
1131
1132
1133
1134
1135
1136
def check_chat_template(model_path):
    try:
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        return "chat_template" in tokenizer.init_kwargs
    except Exception as e:
        print(f"Fail to load tokenizer config with error={e}")
        return False


1137
1138
1139
1140
def run_benchmark(args_: argparse.Namespace):
    global args
    args = args_

1141
1142
1143
1144
    # Set default value for max_concurrency if not present
    if not hasattr(args, "max_concurrency"):
        args.max_concurrency = None

Lianmin Zheng's avatar
Lianmin Zheng committed
1145
    # Set global environments
1146
    set_ulimit()
zhyncs's avatar
zhyncs committed
1147
1148
1149
    random.seed(args.seed)
    np.random.seed(args.seed)

1150
1151
1152
1153
    extra_request_body = {}
    if args.extra_request_body:
        extra_request_body = json.loads(args.extra_request_body)

Lianmin Zheng's avatar
Lianmin Zheng committed
1154
    # Set url
zhyncs's avatar
zhyncs committed
1155
1156
1157
    if args.port is None:
        args.port = {
            "sglang": 30000,
1158
1159
            "sglang-native": 30000,
            "sglang-oai": 30000,
zhyncs's avatar
zhyncs committed
1160
1161
            "lmdeploy": 23333,
            "vllm": 8000,
1162
            "trt": 8000,
1163
            "gserver": 9988,
1164
            "truss": 8080,
zhyncs's avatar
zhyncs committed
1165
1166
1167
1168
1169
1170
1171
1172
        }.get(args.backend, 30000)

    model_url = (
        f"{args.base_url}/v1/models"
        if args.base_url
        else f"http://{args.host}:{args.port}/v1/models"
    )

1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
    if args.backend in ["sglang", "sglang-native"]:
        api_url = (
            f"{args.base_url}/generate"
            if args.base_url
            else f"http://{args.host}:{args.port}/generate"
        )
    elif args.backend in ["sglang-oai", "vllm", "lmdeploy"]:
        api_url = (
            f"{args.base_url}/v1/completions"
            if args.base_url
            else f"http://{args.host}:{args.port}/v1/completions"
        )
    elif args.backend == "trt":
1186
1187
1188
1189
1190
1191
1192
1193
        api_url = (
            f"{args.base_url}/v2/models/ensemble/generate_stream"
            if args.base_url
            else f"http://{args.host}:{args.port}/v2/models/ensemble/generate_stream"
        )
        if args.model is None:
            print("Please provide a model using `--model` when using `trt` backend.")
            sys.exit(1)
1194
    elif args.backend == "gserver":
Lianmin Zheng's avatar
Lianmin Zheng committed
1195
1196
        api_url = args.base_url if args.base_url else f"{args.host}:{args.port}"
        args.model = args.model or "default"
1197
1198
1199
1200
1201
1202
    elif args.backend == "truss":
        api_url = (
            f"{args.base_url}/v1/models/model:predict"
            if args.base_url
            else f"http://{args.host}:{args.port}/v1/models/model:predict"
        )
1203
1204
1205
    base_url = (
        f"http://{args.host}:{args.port}" if args.base_url is None else args.base_url
    )
1206

Lianmin Zheng's avatar
Lianmin Zheng committed
1207
    # Get model name
zhyncs's avatar
zhyncs committed
1208
    if args.model is None:
1209
1210
1211
1212
1213
        if args.backend == "truss":
            print(
                "Please provide a model with `--model` when using truss backend. e.g. --model meta-llama/Llama-3.1-8B-Instruct"
            )
            sys.exit(1)
zhyncs's avatar
zhyncs committed
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
        try:
            response = requests.get(model_url)
            model_list = response.json().get("data", [])
            args.model = model_list[0]["id"] if model_list else None
        except Exception as e:
            print(f"Failed to fetch model from {model_url}. Error: {e}")
            print(
                "Please specify the correct host and port using `--host` and `--port`."
            )
            sys.exit(1)

    if args.model is None:
        print("No model specified or found. Please provide a model using `--model`.")
        sys.exit(1)

1229
1230
1231
1232
1233
1234
    if not check_chat_template(args.model):
        print(
            "\nWARNING It is recommended to use the `Chat` or `Instruct` model for benchmarking.\n"
            "Because when the tokenizer counts the output tokens, if there is gibberish, it might count incorrectly.\n"
        )

zhyncs's avatar
zhyncs committed
1235
1236
    print(f"{args}\n")

Lianmin Zheng's avatar
Lianmin Zheng committed
1237
    # Read dataset
zhyncs's avatar
zhyncs committed
1238
1239
1240
1241
1242
1243
    backend = args.backend
    model_id = args.model
    tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model

    tokenizer = get_tokenizer(tokenizer_id)

1244
    input_requests = get_dataset(args, tokenizer)
zhyncs's avatar
zhyncs committed
1245

Lianmin Zheng's avatar
Lianmin Zheng committed
1246
1247
1248
1249
1250
    if not args.multi:
        return asyncio.run(
            benchmark(
                backend=backend,
                api_url=api_url,
1251
                base_url=base_url,
Lianmin Zheng's avatar
Lianmin Zheng committed
1252
1253
1254
1255
                model_id=model_id,
                tokenizer=tokenizer,
                input_requests=input_requests,
                request_rate=args.request_rate,
1256
                max_concurrency=args.max_concurrency,
Lianmin Zheng's avatar
Lianmin Zheng committed
1257
                disable_tqdm=args.disable_tqdm,
1258
                lora_name=args.lora_name,
Lianmin Zheng's avatar
Lianmin Zheng committed
1259
                extra_request_body=extra_request_body,
1260
                profile=args.profile,
Lianmin Zheng's avatar
Lianmin Zheng committed
1261
1262
1263
1264
            )
        )
    else:
        # Benchmark multiple rps. TODO: use a fixed duration to compute num_prompts
1265
1266
1267
1268
1269
1270
1271
        request_rates = parse_request_rate_range(args.request_rate_range)

        for rate in request_rates:
            asyncio.run(
                benchmark(
                    backend=backend,
                    api_url=api_url,
1272
                    base_url=base_url,
1273
1274
1275
1276
                    model_id=model_id,
                    tokenizer=tokenizer,
                    input_requests=input_requests,
                    request_rate=rate,
1277
                    max_concurrency=args.max_concurrency,
1278
                    disable_tqdm=args.disable_tqdm,
1279
                    lora_name=args.lora_name,
1280
                    extra_request_body=extra_request_body,
1281
                    profile=args.profile,
1282
1283
                )
            )
zhyncs's avatar
zhyncs committed
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297


def set_ulimit(target_soft_limit=65535):
    resource_type = resource.RLIMIT_NOFILE
    current_soft, current_hard = resource.getrlimit(resource_type)

    if current_soft < target_soft_limit:
        try:
            resource.setrlimit(resource_type, (target_soft_limit, current_hard))
        except ValueError as e:
            print(f"Fail to set RLIMIT_NOFILE: {e}")


if __name__ == "__main__":
1298
    parser = ArgumentParser(description="Benchmark the online serving throughput.")
zhyncs's avatar
zhyncs committed
1299
1300
1301
1302
    parser.add_argument(
        "--backend",
        type=str,
        choices=list(ASYNC_REQUEST_FUNCS.keys()),
1303
        default="sglang",
zhyncs's avatar
zhyncs committed
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
        help="Must specify a backend, depending on the LLM Inference Engine.",
    )
    parser.add_argument(
        "--base-url",
        type=str,
        default=None,
        help="Server or API base url if not using http host and port.",
    )
    parser.add_argument(
        "--host", type=str, default="0.0.0.0", help="Default host is 0.0.0.0."
    )
    parser.add_argument(
        "--port",
        type=int,
        help="If not set, the default port is configured according to its default value for different LLM Inference Engines.",
    )
    parser.add_argument(
1321
1322
1323
        "--dataset-name",
        type=str,
        default="sharegpt",
1324
        choices=["sharegpt", "random", "generated-shared-prefix"],
1325
1326
1327
1328
        help="Name of the dataset to benchmark on.",
    )
    parser.add_argument(
        "--dataset-path", type=str, default="", help="Path to the dataset."
zhyncs's avatar
zhyncs committed
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
    )
    parser.add_argument(
        "--model",
        type=str,
        help="Name or path of the model. If not set, the default model will request /v1/models for conf.",
    )
    parser.add_argument(
        "--tokenizer",
        type=str,
        help="Name or path of the tokenizer. If not set, using the model conf.",
    )
    parser.add_argument(
        "--num-prompts",
        type=int,
        default=1000,
        help="Number of prompts to process. Default is 1000.",
    )
    parser.add_argument(
        "--sharegpt-output-len",
        type=int,
        default=None,
        help="Output length for each request. Overrides the output length from the ShareGPT dataset.",
    )
1352
1353
1354
    parser.add_argument(
        "--random-input-len",
        type=int,
1355
        default=1024,
1356
1357
1358
1359
        help="Number of input tokens per request, used only for random dataset.",
    )
    parser.add_argument(
        "--random-output-len",
1360
        default=1024,
1361
1362
1363
1364
1365
1366
        type=int,
        help="Number of output tokens per request, used only for random dataset.",
    )
    parser.add_argument(
        "--random-range-ratio",
        type=float,
Yineng Zhang's avatar
Yineng Zhang committed
1367
        default=0.0,
1368
1369
1370
        help="Range of sampled ratio of input/output length, "
        "used only for random dataset.",
    )
zhyncs's avatar
zhyncs committed
1371
1372
1373
    parser.add_argument(
        "--request-rate",
        type=float,
1374
        default=float("inf"),
zhyncs's avatar
zhyncs committed
1375
        help="Number of requests per second. If this is inf, then all the requests are sent at time 0. "
min-xu-et's avatar
min-xu-et committed
1376
        "Otherwise, we use Poisson process to synthesize the request arrival times. Default is inf.",
zhyncs's avatar
zhyncs committed
1377
    )
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
    parser.add_argument(
        "--max-concurrency",
        type=int,
        default=None,
        help="Maximum number of concurrent requests. This can be used "
        "to help simulate an environment where a higher level component "
        "is enforcing a maximum number of concurrent requests. While the "
        "--request-rate argument controls the rate at which requests are "
        "initiated, this argument will control how many are actually allowed "
        "to execute at a time. This means that when used in combination, the "
        "actual request rate may be lower than specified with --request-rate, "
        "if the server is not processing requests fast enough to keep up.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
1391
    parser.add_argument("--seed", type=int, default=1, help="The random seed.")
1392
1393
1394
1395
1396
1397
1398
1399
1400
    parser.add_argument(
        "--multi",
        action="store_true",
        help="Use request rate range rather than single value.",
    )
    parser.add_argument(
        "--request-rate-range",
        type=str,
        default="2,34,2",
zhyncs's avatar
zhyncs committed
1401
        help="Range of request rates in the format start,stop,step. Default is 2,34,2. It also supports a list of request rates, requiring the parameters to not equal three.",
1402
1403
    )
    parser.add_argument("--output-file", type=str, help="Output JSONL file name.")
1404
1405
1406
1407
1408
    parser.add_argument(
        "--disable-tqdm",
        action="store_true",
        help="Specify to disable tqdm progress bar.",
    )
1409
1410
1411
1412
1413
    parser.add_argument(
        "--disable-stream",
        action="store_true",
        help="Disable streaming mode.",
    )
1414
1415
1416
1417
1418
    parser.add_argument(
        "--disable-ignore-eos",
        action="store_true",
        help="Disable ignoring EOS.",
    )
1419
1420
1421
1422
1423
    parser.add_argument(
        "--return-logprob",
        action="store_true",
        help="Return logprob.",
    )
1424
1425
1426
1427
1428
1429
1430
    parser.add_argument(
        "--extra-request-body",
        metavar='{"key1": "value1", "key2": "value2"}',
        type=str,
        help="Append given JSON object to the request payload. You can use this to specify"
        "additional generate params like sampling params.",
    )
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462

    group = parser.add_argument_group("generated-shared-prefix dataset arguments")
    group.add_argument(
        "--gen-num-groups",
        type=int,
        default=64,
        help="Number of system prompt groups for generated-shared-prefix dataset",
    )
    group.add_argument(
        "--gen-prompts-per-group",
        type=int,
        default=16,
        help="Number of prompts per system prompt group for generated-shared-prefix dataset",
    )
    group.add_argument(
        "--gen-system-prompt-len",
        type=int,
        default=2048,
        help="Target length in tokens for system prompts in generated-shared-prefix dataset",
    )
    group.add_argument(
        "--gen-question-len",
        type=int,
        default=128,
        help="Target length in tokens for questions in generated-shared-prefix dataset",
    )
    group.add_argument(
        "--gen-output-len",
        type=int,
        default=256,
        help="Target length in tokens for outputs in generated-shared-prefix dataset",
    )
1463
1464
1465
1466
1467
1468
    parser.add_argument(
        "--profile",
        action="store_true",
        help="Use Torch Profiler. The endpoint must be launched with "
        "SGLANG_TORCH_PROFILER_DIR to enable profiler.",
    )
1469
1470
1471
1472
1473
1474
    parser.add_argument(
        "--lora-name",
        type=str,
        default=None,
        help="The name of LoRA adapter",
    )
zhyncs's avatar
zhyncs committed
1475
    args = parser.parse_args()
1476
    run_benchmark(args)