benchmark_latency.py 5.54 KB
Newer Older
1
"""Benchmark the latency of processing a single batch of requests."""
2
3
import argparse
import time
4
5
from pathlib import Path
from typing import Optional
6
7
8

import numpy as np
import torch
9
from tqdm import tqdm
10

Woosuk Kwon's avatar
Woosuk Kwon committed
11
from vllm import LLM, SamplingParams
12
13
14


def main(args: argparse.Namespace):
15
16
17
    print(args)

    # NOTE(woosuk): If the request cannot be processed in a single batch,
Zhuohan Li's avatar
Zhuohan Li committed
18
    # the engine will automatically process the request in multiple batches.
19
20
    llm = LLM(
        model=args.model,
21
        tokenizer=args.tokenizer,
22
        quantization=args.quantization,
23
        tensor_parallel_size=args.tensor_parallel_size,
24
        trust_remote_code=args.trust_remote_code,
25
        dtype=args.dtype,
26
        enforce_eager=args.enforce_eager,
27
        kv_cache_dtype=args.kv_cache_dtype,
28
        device=args.device,
29
    )
30

Woosuk Kwon's avatar
Woosuk Kwon committed
31
32
33
34
35
    sampling_params = SamplingParams(
        n=args.n,
        temperature=0.0 if args.use_beam_search else 1.0,
        top_p=1.0,
        use_beam_search=args.use_beam_search,
36
        ignore_eos=True,
Woosuk Kwon's avatar
Woosuk Kwon committed
37
38
        max_tokens=args.output_len,
    )
39
    print(sampling_params)
40
41
42
43
    dummy_prompt_token_ids = np.random.randint(10000,
                                               size=(args.batch_size,
                                                     args.input_len))
    dummy_prompt_token_ids = dummy_prompt_token_ids.tolist()
44

45
46
47
48
49
50
51
52
53
    def run_to_completion(profile_dir: Optional[str] = None):
        if profile_dir:
            with torch.profiler.profile(
                    activities=[
                        torch.profiler.ProfilerActivity.CPU,
                        torch.profiler.ProfilerActivity.CUDA,
                    ],
                    on_trace_ready=torch.profiler.tensorboard_trace_handler(
                        str(profile_dir))) as p:
54
55
56
57
58
59
60
61
62
63
64
65
                llm.generate(prompt_token_ids=dummy_prompt_token_ids,
                             sampling_params=sampling_params,
                             use_tqdm=False)
            print(p.key_averages())
        else:
            start_time = time.perf_counter()
            llm.generate(prompt_token_ids=dummy_prompt_token_ids,
                         sampling_params=sampling_params,
                         use_tqdm=False)
            end_time = time.perf_counter()
            latency = end_time - start_time
            return latency
66

67
    print("Warming up...")
68
    run_to_completion(profile_dir=None)
69

70
    if args.profile:
71
72
        profile_dir = args.profile_result_dir
        if not profile_dir:
73
74
75
            profile_dir = Path(
                "."
            ) / "vllm_benchmark_result" / f"latency_result_{time.time()}"
76
        print(f"Profiling (results will be saved to '{profile_dir}')...")
77
        run_to_completion(profile_dir=profile_dir)
78
79
        return

80
81
    # Benchmark.
    latencies = []
82
    for _ in tqdm(range(args.num_iters), desc="Profiling iterations"):
83
        latencies.append(run_to_completion(profile_dir=None))
84
85
86
87
    print(f'Avg latency: {np.mean(latencies)} seconds')


if __name__ == '__main__':
88
    parser = argparse.ArgumentParser(
89
        description='Benchmark the latency of processing a single batch of '
90
        'requests till completion.')
91
    parser.add_argument('--model', type=str, default='facebook/opt-125m')
92
    parser.add_argument('--tokenizer', type=str, default=None)
93
94
    parser.add_argument('--quantization',
                        '-q',
CHU Tianxiang's avatar
CHU Tianxiang committed
95
                        choices=['awq', 'gptq', 'squeezellm', None],
96
                        default=None)
97
    parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)
98
99
100
    parser.add_argument('--input-len', type=int, default=32)
    parser.add_argument('--output-len', type=int, default=128)
    parser.add_argument('--batch-size', type=int, default=8)
101
102
103
    parser.add_argument('--n',
                        type=int,
                        default=1,
104
                        help='Number of generated sequences per prompt.')
105
    parser.add_argument('--use-beam-search', action='store_true')
106
107
108
    parser.add_argument('--num-iters',
                        type=int,
                        default=3,
109
                        help='Number of iterations to run.')
110
111
    parser.add_argument('--trust-remote-code',
                        action='store_true',
112
                        help='trust remote code from huggingface')
113
114
115
116
117
118
119
120
121
    parser.add_argument(
        '--dtype',
        type=str,
        default='auto',
        choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
        help='data type for model weights and activations. '
        'The "auto" option will use FP16 precision '
        'for FP32 and FP16 models, and BF16 precision '
        'for BF16 models.')
122
123
124
    parser.add_argument('--enforce-eager',
                        action='store_true',
                        help='enforce eager mode and disable CUDA graph')
125
126
127
128
129
130
131
    parser.add_argument(
        "--kv-cache-dtype",
        type=str,
        choices=['auto', 'fp8_e5m2'],
        default='auto',
        help=
        'Data type for kv cache storage. If "auto", will use model data type.')
132
133
134
135
    parser.add_argument(
        '--profile',
        action='store_true',
        help='profile the generation process of a single batch')
136
137
138
139
    parser.add_argument(
        '--profile-result-dir',
        type=str,
        default=None,
140
141
        help=('path to save the pytorch profiler output. Can be visualized '
              'with ui.perfetto.dev or Tensorboard.'))
142
143
144
145
146
147
    parser.add_argument(
        "--device",
        type=str,
        default="cuda",
        choices=["cuda"],
        help='device type for vLLM execution, supporting CUDA only currently.')
148
149
    args = parser.parse_args()
    main(args)