benchmark_throughput.py 27.9 KB
Newer Older
1
2
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
zhuwenwen's avatar
zhuwenwen committed
3
"""Benchmark offline inference throughput."""
4

zhuwenwen's avatar
zhuwenwen committed
5
import argparse
6
import dataclasses
zhuwenwen's avatar
zhuwenwen committed
7
import json
8
import os
zhuwenwen's avatar
zhuwenwen committed
9
10
import random
import time
11
12
13
14

from pathlib import Path
import warnings
from typing import Any, Optional, Union
zhuwenwen's avatar
zhuwenwen committed
15
16
17

import numpy as np
import torch
laibao's avatar
laibao committed
18
import uvloop
zhuwenwen's avatar
zhuwenwen committed
19
20
from tqdm import tqdm

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from vllm.inputs import PromptType
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerBase

from benchmark_dataset import (
    AIMODataset,
    BurstGPTDataset,
    ConversationDataset,
    InstructCoderDataset,
    RandomDataset,
    SampleRequest,
    ShareGPTDataset,
    SonnetDataset,
    VisionArenaDataset,
)
from benchmark_utils import convert_to_pytorch_benchmark_format, write_to_json
from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
laibao's avatar
laibao committed
37
from vllm.entrypoints.openai.api_server import (
38
39
40
41
42
43
    build_async_engine_client_from_engine_args,
)
from vllm.inputs import TextPrompt, TokensPrompt
from vllm.lora.request import LoRARequest
from vllm.outputs import RequestOutput
from vllm.sampling_params import BeamSearchParams
laibao's avatar
laibao committed
44
from vllm.utils import FlexibleArgumentParser, merge_async_iterators
zhuwenwen's avatar
zhuwenwen committed
45
46
47


def run_vllm(
48
    requests: list[SampleRequest],
zhuwenwen's avatar
zhuwenwen committed
49
    n: int,
50
51
52
53
    num_iters_warmup: int,
    engine_args: EngineArgs,
    disable_detokenize: bool = False,
) -> tuple[float, Optional[list[RequestOutput]]]:
zhuwenwen's avatar
zhuwenwen committed
54
55
    from vllm import LLM, SamplingParams

56
57
58
59
60
61
62
63
64
    llm = LLM(**dataclasses.asdict(engine_args))
    assert all(
        llm.llm_engine.model_config.max_model_len
        >= (request.prompt_len + request.expected_output_len)
        for request in requests
    ), (
        "Please ensure that max_model_len is greater than the sum of"
        " prompt_len and expected_output_len for all requests."
    )
zhuwenwen's avatar
zhuwenwen committed
65
    # Add the requests to the engine.
66
67
68
69
70
71
72
73
74
75
76
77
78
    prompts: list[Union[TextPrompt, TokensPrompt]] = []
    sampling_params: list[SamplingParams] = []
    for request in requests:
        prompts.append(
            TokensPrompt(
                prompt_token_ids=request.prompt["prompt_token_ids"],
                multi_modal_data=request.multi_modal_data,
            )
            if "prompt_token_ids" in request.prompt
            else TextPrompt(
                prompt=request.prompt, multi_modal_data=request.multi_modal_data
            )
        )
zhuwenwen's avatar
zhuwenwen committed
79
80
81
        sampling_params.append(
            SamplingParams(
                n=n,
82
                temperature=1.0,
zhuwenwen's avatar
zhuwenwen committed
83
84
                top_p=1.0,
                ignore_eos=True,
85
86
87
88
89
90
91
                max_tokens=request.expected_output_len,
                detokenize=not disable_detokenize,
            )
        )
    lora_requests: Optional[list[LoRARequest]] = None
    if engine_args.enable_lora:
        lora_requests = [request.lora_request for request in requests]
zhuwenwen's avatar
zhuwenwen committed
92
93

    # warmup
94
95
96
97
98
99
100
101
102
103
104
    warmup_sampling_params = SamplingParams(
        n=n,
        temperature=1.0,
        top_p=1.0,
        ignore_eos=True,
        max_tokens=10,
    )
    dummy_prompt_token_ids = np.random.randint(10000, size=(1,10))
    dummy_prompts: list[PromptType] = [{
        "prompt_token_ids": batch
    } for batch in dummy_prompt_token_ids.tolist()]
zhuwenwen's avatar
zhuwenwen committed
105
    
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
    use_beam_search = False
    
    print("Warming up...")
    for _ in tqdm(range(num_iters_warmup), desc="Warmup iterations"):
        if not use_beam_search:
            llm.generate(dummy_prompts, sampling_params=warmup_sampling_params, use_tqdm=False)
        else:
            llm.beam_search(
                dummy_prompts,
                BeamSearchParams(
                    beam_width=args.n,
                    max_tokens=args.output_len,
                    ignore_eos=True,
                ),
            )

    outputs = None
    if not use_beam_search:
        if args.profile:
            profile_dir = args.profile_result_dir
            if not profile_dir:
                profile_dir = Path(
                    "."
                ) / "vllm_benchmark_result" / f"latency_result_{time.time()}"
            print(f"Profiling (results will be saved to '{profile_dir}')...")
            with torch.profiler.profile(
                        activities=[torch.profiler.ProfilerActivity.CPU,
                                    torch.profiler.ProfilerActivity.CUDA,
                        ],record_shapes=True,
                        on_trace_ready=torch.profiler.tensorboard_trace_handler(str(profile_dir))
                        ) as prof:
                start = time.perf_counter()
                outputs = llm.generate(
                    prompts, sampling_params, lora_request=lora_requests, use_tqdm=True
                )
                end = time.perf_counter()
            print('Prepare time report')
            print(prof.key_averages(group_by_input_shape=True).table(sort_by="self_cuda_time_total", row_limit=-1))
        else:
            start = time.perf_counter()
            outputs = llm.generate(
                prompts, sampling_params, lora_request=lora_requests, use_tqdm=True
            )
            end = time.perf_counter()
laibao's avatar
laibao committed
150
    else:
151
152
        assert lora_requests is None, "BeamSearch API does not support LoRA"
        prompts = [request.prompt for request in requests]
laibao's avatar
laibao committed
153
        # output_len should be the same for all requests.
154
155
156
        output_len = requests[0].expected_output_len
        for request in requests:
            assert request.expected_output_len == output_len
laibao's avatar
laibao committed
157
        start = time.perf_counter()
158
159
160
161
162
163
164
165
        llm.beam_search(
            prompts,
            BeamSearchParams(
                beam_width=n,
                max_tokens=output_len,
                ignore_eos=True,
            ),
        )
laibao's avatar
laibao committed
166
        end = time.perf_counter()
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    return end - start, outputs


def run_vllm_chat(
    requests: list[SampleRequest],
    n: int,
    engine_args: EngineArgs,
    disable_detokenize: bool = False,
) -> tuple[float, list[RequestOutput]]:
    """
    Run vLLM chat benchmark. This function is recommended ONLY for benchmarking
    multimodal models as it properly handles multimodal inputs and chat
    formatting. For non-multimodal models, use run_vllm() instead.
    """
    from vllm import LLM, SamplingParams

    llm = LLM(**dataclasses.asdict(engine_args))

    assert all(
        llm.llm_engine.model_config.max_model_len
        >= (request.prompt_len + request.expected_output_len)
        for request in requests
    ), (
        "Please ensure that max_model_len is greater than the sum of "
        "prompt_len and expected_output_len for all requests."
    )

    prompts = []
    sampling_params: list[SamplingParams] = []
    for request in requests:
        prompts.append(request.prompt)
        sampling_params.append(
            SamplingParams(
                n=n,
                temperature=1.0,
                top_p=1.0,
                ignore_eos=True,
                max_tokens=request.expected_output_len,
                detokenize=not disable_detokenize,
            )
        )
    start = time.perf_counter()
    outputs = llm.chat(prompts, sampling_params, use_tqdm=True)
    end = time.perf_counter()
    return end - start, outputs
zhuwenwen's avatar
zhuwenwen committed
212
213


laibao's avatar
laibao committed
214
async def run_vllm_async(
215
    requests: list[SampleRequest],
laibao's avatar
laibao committed
216
    n: int,
217
    engine_args: AsyncEngineArgs,
laibao's avatar
laibao committed
218
    disable_frontend_multiprocessing: bool = False,
219
    disable_detokenize: bool = False,
laibao's avatar
laibao committed
220
221
222
223
) -> float:
    from vllm import SamplingParams

    async with build_async_engine_client_from_engine_args(
224
225
226
227
228
229
230
231
232
233
234
        engine_args, disable_frontend_multiprocessing
    ) as llm:
        model_config = await llm.get_model_config()
        assert all(
            model_config.max_model_len
            >= (request.prompt_len + request.expected_output_len)
            for request in requests
        ), (
            "Please ensure that max_model_len is greater than the sum of"
            " prompt_len and expected_output_len for all requests."
        )
laibao's avatar
laibao committed
235
236

        # Add the requests to the engine.
237
238
239
240
241
242
243
244
245
246
247
248
249
250
        prompts: list[Union[TextPrompt, TokensPrompt]] = []
        sampling_params: list[SamplingParams] = []
        lora_requests: list[Optional[LoRARequest]] = []
        for request in requests:
            prompts.append(
                TokensPrompt(
                    prompt_token_ids=request.prompt["prompt_token_ids"],
                    multi_modal_data=request.multi_modal_data,
                )
                if "prompt_token_ids" in request.prompt
                else TextPrompt(
                    prompt=request.prompt, multi_modal_data=request.multi_modal_data
                )
            )
laibao's avatar
laibao committed
251
252
253
            sampling_params.append(
                SamplingParams(
                    n=n,
254
                    temperature=1.0,
laibao's avatar
laibao committed
255
256
                    top_p=1.0,
                    ignore_eos=True,
257
258
259
260
261
                    max_tokens=request.expected_output_len,
                    detokenize=not disable_detokenize,
                )
            )
            lora_requests.append(request.lora_request)
laibao's avatar
laibao committed
262
263
264

        generators = []
        start = time.perf_counter()
265
266
267
268
        for i, (prompt, sp, lr) in enumerate(
            zip(prompts, sampling_params, lora_requests)
        ):
            generator = llm.generate(prompt, sp, lora_request=lr, request_id=f"test{i}")
laibao's avatar
laibao committed
269
270
271
272
273
274
275
276
            generators.append(generator)
        all_gens = merge_async_iterators(*generators)
        async for i, res in all_gens:
            pass
        end = time.perf_counter()
        return end - start


zhuwenwen's avatar
zhuwenwen committed
277
def run_hf(
278
    requests: list[SampleRequest],
zhuwenwen's avatar
zhuwenwen committed
279
280
281
282
283
    model: str,
    tokenizer: PreTrainedTokenizerBase,
    n: int,
    max_batch_size: int,
    trust_remote_code: bool,
284
    disable_detokenize: bool = False,
zhuwenwen's avatar
zhuwenwen committed
285
286
) -> float:
    llm = AutoModelForCausalLM.from_pretrained(
287
288
        model, torch_dtype=torch.float16, trust_remote_code=trust_remote_code
    )
zhuwenwen's avatar
zhuwenwen committed
289
290
291
292
293
294
295
    if llm.config.model_type == "llama":
        # To enable padding in the HF backend.
        tokenizer.pad_token = tokenizer.eos_token
    llm = llm.cuda()

    pbar = tqdm(total=len(requests))
    start = time.perf_counter()
296
    batch: list[str] = []
zhuwenwen's avatar
zhuwenwen committed
297
298
299
    max_prompt_len = 0
    max_output_len = 0
    for i in range(len(requests)):
300
301
302
        prompt = requests[i].prompt
        prompt_len = requests[i].prompt_len
        output_len = requests[i].expected_output_len
zhuwenwen's avatar
zhuwenwen committed
303
304
305
306
307
308
        # Add the prompt to the batch.
        batch.append(prompt)
        max_prompt_len = max(max_prompt_len, prompt_len)
        max_output_len = max(max_output_len, output_len)
        if len(batch) < max_batch_size and i != len(requests) - 1:
            # Check if we can add more requests to the batch.
309
310
311
312
313
314
            next_prompt_len = requests[i + 1].prompt_len
            next_output_len = requests[i + 1].expected_output_len
            if (
                max(max_prompt_len, next_prompt_len)
                + max(max_output_len, next_output_len)
            ) <= 2048:
zhuwenwen's avatar
zhuwenwen committed
315
316
317
318
                # We can add more requests to the batch.
                continue

        # Generate the sequences.
319
        input_ids = tokenizer(batch, return_tensors="pt", padding=True).input_ids
zhuwenwen's avatar
zhuwenwen committed
320
321
        llm_outputs = llm.generate(
            input_ids=input_ids.cuda(),
322
            do_sample=True,
zhuwenwen's avatar
zhuwenwen committed
323
324
325
326
327
328
            num_return_sequences=n,
            temperature=1.0,
            top_p=1.0,
            use_cache=True,
            max_new_tokens=max_output_len,
        )
329
330
331
        if not disable_detokenize:
            # Include the decoding time.
            tokenizer.batch_decode(llm_outputs, skip_special_tokens=True)
zhuwenwen's avatar
zhuwenwen committed
332
333
334
335
336
337
338
339
340
341
342
        pbar.update(len(batch))

        # Clear the batch.
        batch = []
        max_prompt_len = 0
        max_output_len = 0
    end = time.perf_counter()
    return end - start


def run_mii(
343
    requests: list[SampleRequest],
zhuwenwen's avatar
zhuwenwen committed
344
345
346
347
348
    model: str,
    tensor_parallel_size: int,
    output_len: int,
) -> float:
    from mii import client, serve
349

zhuwenwen's avatar
zhuwenwen committed
350
    llm = serve(model, tensor_parallel=tensor_parallel_size)
351
    prompts = [request.prompt for request in requests]
zhuwenwen's avatar
zhuwenwen committed
352
353
354
355
356
357
358
359
360

    start = time.perf_counter()
    llm.generate(prompts, max_new_tokens=output_len)
    end = time.perf_counter()
    client = client(model)
    client.terminate_server()
    return end - start


361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
def save_to_pytorch_benchmark_format(
    args: argparse.Namespace, results: dict[str, Any]
) -> None:
    pt_records = convert_to_pytorch_benchmark_format(
        args=args,
        metrics={
            "requests_per_second": [results["requests_per_second"]],
            "tokens_per_second": [results["tokens_per_second"]],
        },
        extra_info={
            k: results[k] for k in ["elapsed_time", "num_requests", "total_num_tokens"]
        },
    )
    if pt_records:
        # Don't use json suffix here as we don't want CI to pick it up
        pt_file = f"{os.path.splitext(args.output_json)[0]}.pytorch.json"
        write_to_json(pt_file, pt_records)


def get_requests(args, tokenizer):
    # Common parameters for all dataset types.
    common_kwargs = {
        "dataset_path": args.dataset_path,
        "random_seed": args.seed,
    }
    sample_kwargs = {
        "tokenizer": tokenizer,
        "lora_path": args.lora_path,
        "max_loras": args.max_loras,
        "num_requests": args.num_prompts,
        "input_len": args.input_len,
        "output_len": args.output_len,
    }

    if args.dataset_path is None or args.dataset_name == "random":
        sample_kwargs["range_ratio"] = args.random_range_ratio
        sample_kwargs["prefix_len"] = args.prefix_len
        dataset_cls = RandomDataset
    elif args.dataset_name == "sharegpt":
        dataset_cls = ShareGPTDataset
        if args.backend == "vllm-chat":
            sample_kwargs["enable_multimodal_chat"] = True
    elif args.dataset_name == "sonnet":
        assert tokenizer.chat_template or tokenizer.default_chat_template, (
            "Tokenizer/model must have chat template for sonnet dataset."
        )
        dataset_cls = SonnetDataset
        sample_kwargs["prefix_len"] = args.prefix_len
        sample_kwargs["return_prompt_formatted"] = True
    elif args.dataset_name == "burstgpt":
        dataset_cls = BurstGPTDataset
    elif args.dataset_name == "hf":
        if args.dataset_path in VisionArenaDataset.SUPPORTED_DATASET_PATHS:
            dataset_cls = VisionArenaDataset
            common_kwargs["dataset_subset"] = None
            common_kwargs["dataset_split"] = "train"
            sample_kwargs["enable_multimodal_chat"] = True
        elif args.dataset_path in InstructCoderDataset.SUPPORTED_DATASET_PATHS:
            dataset_cls = InstructCoderDataset
            common_kwargs["dataset_split"] = "train"
        elif args.dataset_path in ConversationDataset.SUPPORTED_DATASET_PATHS:
            dataset_cls = ConversationDataset
            common_kwargs["dataset_subset"] = args.hf_subset
            common_kwargs["dataset_split"] = args.hf_split
            sample_kwargs["enable_multimodal_chat"] = True
        elif args.dataset_path in AIMODataset.SUPPORTED_DATASET_PATHS:
            dataset_cls = AIMODataset
            common_kwargs["dataset_subset"] = None
            common_kwargs["dataset_split"] = "train"
    else:
        raise ValueError(f"Unknown dataset name: {args.dataset_name}")
    # Remove None values
    sample_kwargs = {k: v for k, v in sample_kwargs.items() if v is not None}
    return dataset_cls(**common_kwargs).sample(**sample_kwargs)


zhuwenwen's avatar
zhuwenwen committed
437
def main(args: argparse.Namespace):
438
439
    if args.seed is None:
        args.seed = 0
zhuwenwen's avatar
zhuwenwen committed
440
441
442
443
    print(args)
    random.seed(args.seed)
    # Sample the requests.
    tokenizer = AutoTokenizer.from_pretrained(
444
445
446
447
448
        args.tokenizer, trust_remote_code=args.trust_remote_code
    )
    requests = get_requests(args, tokenizer)
    is_multi_modal = any(request.multi_modal_data is not None for request in requests)
    request_outputs: Optional[list[RequestOutput]] = None
zhuwenwen's avatar
zhuwenwen committed
449
    if args.backend == "vllm":
laibao's avatar
laibao committed
450
        if args.async_engine:
451
452
453
454
455
456
457
458
459
            elapsed_time = uvloop.run(
                run_vllm_async(
                    requests,
                    args.n,
                    AsyncEngineArgs.from_cli_args(args),
                    args.disable_frontend_multiprocessing,
                    args.disable_detokenize,
                )
            )
laibao's avatar
laibao committed
460
        else:
461
462
463
464
465
466
467
            elapsed_time, request_outputs = run_vllm(
                requests,
                args.n,
                args.num_iters_warmup,
                EngineArgs.from_cli_args(args),
                args.disable_detokenize,
            )
zhuwenwen's avatar
zhuwenwen committed
468
469
    elif args.backend == "hf":
        assert args.tensor_parallel_size == 1
470
471
472
473
474
475
476
477
478
        elapsed_time = run_hf(
            requests,
            args.model,
            tokenizer,
            args.n,
            args.hf_max_batch_size,
            args.trust_remote_code,
            args.disable_detokenize,
        )
zhuwenwen's avatar
zhuwenwen committed
479
    elif args.backend == "mii":
480
481
482
483
484
485
486
        elapsed_time = run_mii(
            requests, args.model, args.tensor_parallel_size, args.output_len
        )
    elif args.backend == "vllm-chat":
        elapsed_time, request_outputs = run_vllm_chat(
            requests, args.n, EngineArgs.from_cli_args(args), args.disable_detokenize
        )
zhuwenwen's avatar
zhuwenwen committed
487
488
    else:
        raise ValueError(f"Unknown backend: {args.backend}")
489
490
491
492
493
494
495
496
497
498
499
500
501
502

    if request_outputs:
        # Note: with the vllm and vllm-chat backends,
        # we have request_outputs, which we use to count tokens.
        total_prompt_tokens = 0
        total_output_tokens = 0
        for ro in request_outputs:
            if not isinstance(ro, RequestOutput):
                continue
            total_prompt_tokens += (
                len(ro.prompt_token_ids) if ro.prompt_token_ids else 0
            )
            total_output_tokens += sum(len(o.token_ids) for o in ro.outputs if o)
        total_num_tokens = total_prompt_tokens + total_output_tokens
zhuwenwen's avatar
zhuwenwen committed
503
    else:
504
505
506
507
508
509
510
511
512
513
514
515
516
        total_num_tokens = sum(r.prompt_len + r.expected_output_len for r in requests)
        total_output_tokens = sum(r.expected_output_len for r in requests)
        total_prompt_tokens = total_num_tokens - total_output_tokens

    if is_multi_modal and args.backend != "vllm-chat":
        print(
            "\033[91mWARNING\033[0m: Multi-modal request with "
            f"{args.backend} backend detected. The "
            "following metrics are not accurate because image tokens are not"
            " counted. See vllm-project/vllm/issues/9778 for details."
        )
        # TODO(vllm-project/vllm/issues/9778): Count multi-modal token length.
        # vllm-chat backend counts the image tokens now
zhuwenwen's avatar
zhuwenwen committed
517

518
519
520
521
522
523
524
525
    print(f"Latency: {elapsed_time:.2f} s")
    print(
        f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, "
        f"{total_num_tokens / elapsed_time:.2f} total tokens/s, "
        f"{total_output_tokens / elapsed_time:.2f} output tokens/s"
    )
    print(f"Total num prompt tokens:  {total_prompt_tokens}")
    print(f"Total num output tokens:  {total_output_tokens}")
zhuwenwen's avatar
zhuwenwen committed
526
527
528
529
530
531
532
533
534
535
536
537

    # Output JSON results if specified
    if args.output_json:
        results = {
            "elapsed_time": elapsed_time,
            "num_requests": len(requests),
            "total_num_tokens": total_num_tokens,
            "requests_per_second": len(requests) / elapsed_time,
            "tokens_per_second": total_num_tokens / elapsed_time,
        }
        with open(args.output_json, "w") as f:
            json.dump(results, f, indent=4)
538
        save_to_pytorch_benchmark_format(args, results)
zhuwenwen's avatar
zhuwenwen committed
539
540


541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
def validate_args(args):
    """
    Validate command-line arguments.
    """

    # === Deprecation and Defaulting ===
    if args.dataset is not None:
        warnings.warn(
            "The '--dataset' argument will be deprecated in the next release. "
            "Please use '--dataset-name' and '--dataset-path' instead.",
            stacklevel=2,
        )
        args.dataset_path = args.dataset

    if not getattr(args, "tokenizer", None):
        args.tokenizer = args.model

    # === Backend Validation ===
    valid_backends = {"vllm", "hf", "mii", "vllm-chat"}
    if args.backend not in valid_backends:
        raise ValueError(f"Unsupported backend: {args.backend}")

    # === Dataset Configuration ===
    if not args.dataset and not args.dataset_path:
        print("When dataset path is not set, it will default to random dataset")
        args.dataset_name = "random"
        if args.input_len is None:
            raise ValueError("input_len must be provided for a random dataset")

    # === Dataset Name Specific Checks ===
    # --hf-subset and --hf-split: only used
    # when dataset_name is 'hf'
    if args.dataset_name != "hf" and (
        getattr(args, "hf_subset", None) is not None
        or getattr(args, "hf_split", None) is not None
    ):
        warnings.warn(
            "--hf-subset and --hf-split will be ignored \
                since --dataset-name is not 'hf'.",
            stacklevel=2,
        )
    elif args.dataset_name == "hf":
        if args.dataset_path in (
            VisionArenaDataset.SUPPORTED_DATASET_PATHS.keys()
            | ConversationDataset.SUPPORTED_DATASET_PATHS
        ):
            assert args.backend == "vllm-chat", (
                f"{args.dataset_path} needs to use vllm-chat as the backend."
            )  # noqa: E501
        elif args.dataset_path in (
            InstructCoderDataset.SUPPORTED_DATASET_PATHS
            | AIMODataset.SUPPORTED_DATASET_PATHS
        ):
            assert args.backend == "vllm", (
                f"{args.dataset_path} needs to use vllm as the backend."
            )  # noqa: E501
        else:
            raise ValueError(f"{args.dataset_path} is not supported by hf dataset.")

    # --random-range-ratio: only used when dataset_name is 'random'
    if args.dataset_name != "random" and args.random_range_ratio is not None:
        warnings.warn(
            "--random-range-ratio will be ignored since \
                --dataset-name is not 'random'.",
            stacklevel=2,
        )

    # --prefix-len: only used when dataset_name is 'random', 'sonnet', or not
    # set.
    if (
        args.dataset_name not in {"random", "sonnet", None}
        and args.prefix_len is not None
    ):
        warnings.warn(
            "--prefix-len will be ignored since --dataset-name\
                 is not 'random', 'sonnet', or not set.",
            stacklevel=2,
        )

    # === LoRA Settings ===
    if getattr(args, "enable_lora", False) and args.backend != "vllm":
        raise ValueError("LoRA benchmarking is only supported for vLLM backend")
    if getattr(args, "enable_lora", False) and args.lora_path is None:
        raise ValueError("LoRA path must be provided when enable_lora is True")

    # === Backend-specific Validations ===
    if args.backend == "hf" and args.hf_max_batch_size is None:
        raise ValueError("HF max batch size is required for HF backend")
    if args.backend != "hf" and args.hf_max_batch_size is not None:
        raise ValueError("HF max batch size is only for HF backend.")

    if (
        args.backend in {"hf", "mii"}
        and getattr(args, "quantization", None) is not None
    ):
        raise ValueError("Quantization is only for vLLM backend.")

    if args.backend == "mii" and args.dtype != "auto":
        raise ValueError("dtype must be auto for MII backend.")
    if args.backend == "mii" and args.n != 1:
        raise ValueError("n must be 1 for MII backend.")
    if args.backend == "mii" and args.tokenizer != args.model:
        raise ValueError("Tokenizer must be the same as the model for MII backend.")

    # --data-parallel is not supported currently.
    # https://github.com/vllm-project/vllm/issues/16222
    if args.data_parallel_size > 1:
        raise ValueError(
            "Data parallel is not supported in offline benchmark, \
            please use benchmark serving instead"
        )


def create_argument_parser():
laibao's avatar
laibao committed
655
    parser = FlexibleArgumentParser(description="Benchmark the throughput.")
zhuwenwen's avatar
zhuwenwen committed
656
    parser.add_argument(
657
        "--backend",
zhuwenwen's avatar
zhuwenwen committed
658
        type=str,
659
660
661
        choices=["vllm", "hf", "mii", "vllm-chat"],
        default="vllm",
    )
zhuwenwen's avatar
zhuwenwen committed
662
    parser.add_argument(
663
        "--dataset-name",
zhuwenwen's avatar
zhuwenwen committed
664
        type=str,
665
666
667
668
        choices=["sharegpt", "random", "sonnet", "burstgpt", "hf"],
        help="Name of the dataset to benchmark on.",
        default="sharegpt",
    )
zhuwenwen's avatar
zhuwenwen committed
669
    parser.add_argument(
670
        "--dataset",
zhuwenwen's avatar
zhuwenwen committed
671
672
        type=str,
        default=None,
673
674
675
676
677
678
679
680
681
682
683
684
685
686
        help="Path to the ShareGPT dataset, will be deprecated in\
            the next release. The dataset is expected to "
        "be a json in form of list[dict[..., conversations: "
        "list[dict[..., value: <prompt_or_response>]]]]",
    )
    parser.add_argument(
        "--dataset-path", type=str, default=None, help="Path to the dataset"
    )
    parser.add_argument(
        "--input-len",
        type=int,
        default=None,
        help="Input prompt length for each request",
    )
zhuwenwen's avatar
zhuwenwen committed
687
    parser.add_argument(
688
        "--output-len",
laibao's avatar
laibao committed
689
        type=int,
690
691
692
693
694
695
696
697
698
699
700
701
702
        default=None,
        help="Output length for each request. Overrides the "
        "output length from the dataset.",
    )
    parser.add_argument(
        "--n", type=int, default=1, help="Number of generated sequences per prompt."
    )
    parser.add_argument(
        "--num-iters-warmup", type=int, default=1, help="Number of iterations to run for warmup."
    )
    parser.add_argument(
        "--num-prompts", type=int, default=1000, help="Number of prompts to process."
    )
zhuwenwen's avatar
zhuwenwen committed
703
    parser.add_argument(
704
        '--profile',
zhuwenwen's avatar
zhuwenwen committed
705
        action='store_true',
706
        help='profile the generation process of a single batch')
zhuwenwen's avatar
zhuwenwen committed
707
    parser.add_argument(
708
        '--profile-result-dir',
zhuwenwen's avatar
zhuwenwen committed
709
710
        type=str,
        default=None,
711
712
        help=('path to save the pytorch profiler output. Can be visualized '
              'with ui.perfetto.dev or Tensorboard.'))
zhuwenwen's avatar
zhuwenwen committed
713
    parser.add_argument(
714
715
        "--hf-max-batch-size",
        type=int,
zhuwenwen's avatar
zhuwenwen committed
716
        default=None,
717
718
        help="Maximum batch size for HF backend.",
    )
laibao's avatar
laibao committed
719
    parser.add_argument(
720
        "--output-json",
laibao's avatar
laibao committed
721
        type=str,
722
723
724
        default=None,
        help="Path to save the throughput results in JSON format.",
    )
laibao's avatar
laibao committed
725
    parser.add_argument(
726
727
        "--async-engine",
        action="store_true",
laibao's avatar
laibao committed
728
        default=False,
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
        help="Use vLLM async engine rather than LLM class.",
    )
    parser.add_argument(
        "--disable-frontend-multiprocessing",
        action="store_true",
        default=False,
        help="Disable decoupled async engine frontend.",
    )
    parser.add_argument(
        "--disable-detokenize",
        action="store_true",
        help=(
            "Do not detokenize the response (i.e. do not include "
            "detokenization time in the measurement)"
        ),
    )
    # LoRA
    parser.add_argument(
        "--lora-path",
        type=str,
        default=None,
        help="Path to the LoRA adapters to use. This can be an absolute path, "
        "a relative path, or a Hugging Face model identifier.",
    )
    parser.add_argument(
        "--prefix-len",
        type=int,
        default=None,
        help=f"Number of prefix tokens to be used in RandomDataset "
        "and SonnetDataset. For RandomDataset, the total input "
        "length is the sum of prefix-len (default: "
        f"{RandomDataset.DEFAULT_PREFIX_LEN}) and a random context length "
        "sampled from [input_len * (1 - range_ratio), "
        "input_len * (1 + range_ratio)]. For SonnetDataset, "
        f"prefix_len (default: {SonnetDataset.DEFAULT_PREFIX_LEN}) "
        "controls how much of the input is fixed lines versus "
        "random lines, but the total input length remains approximately "
        "input_len tokens.",
    )
    # random dataset
    parser.add_argument(
        "--random-range-ratio",
        type=float,
        default=None,
        help=f"Range ratio (default : {RandomDataset.DEFAULT_RANGE_RATIO}) "
        "for sampling input/output length, "
        "used only for RandomDataset. Must be in the range [0, 1) to "
        "define a symmetric sampling range "
        "[length * (1 - range_ratio), length * (1 + range_ratio)].",
    )

    # hf dtaset
    parser.add_argument(
        "--hf-subset", type=str, default=None, help="Subset of the HF dataset."
    )
    parser.add_argument(
        "--hf-split", type=str, default=None, help="Split of the HF dataset."
    )

    parser = AsyncEngineArgs.add_cli_args(parser)

    return parser


if __name__ == "__main__":
    parser = create_argument_parser()
zhuwenwen's avatar
zhuwenwen committed
795
796
797
    args = parser.parse_args()
    if args.tokenizer is None:
        args.tokenizer = args.model
798
    validate_args(args)
laibao's avatar
laibao committed
799
    main(args)