bench_throughput.py 10.7 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
"""Benchmark online serving throughput.

On the server side, run one of the following commands:
    (vLLM backend)
    python -m vllm.entrypoints.api_server \
        --model <your_model> --swap-space 16 \
        --disable-log-requests

    (TGI backend)
    ./launch_hf_server.sh <your_model>

On the client side, run:
    python benchmarks/benchmark_serving.py \
        --backend <backend> \
        --tokenizer <your_model> --dataset <target_dataset> \
        --request-rate <request_rate>
"""
Liangsheng Yin's avatar
Liangsheng Yin committed
18

Lianmin Zheng's avatar
Lianmin Zheng committed
19
20
21
22
23
24
25
26
27
import argparse
import asyncio
import json
import random
import time
from typing import AsyncGenerator, List, Tuple

import aiohttp
import numpy as np
Liangsheng Yin's avatar
Liangsheng Yin committed
28
from tqdm.asyncio import tqdm_asyncio
29
from transformers import AutoTokenizer
Lianmin Zheng's avatar
Lianmin Zheng committed
30
31
32
33
34
35
36
37

# (prompt len, output len, latency)
REQUEST_LATENCY: List[Tuple[int, int, float]] = []


def sample_requests(
    dataset_path: str,
    num_requests: int,
38
    tokenizer: AutoTokenizer,
Lianmin Zheng's avatar
Lianmin Zheng committed
39
40
41
42
43
) -> List[Tuple[str, int, int]]:
    # Load the dataset.
    with open(dataset_path) as f:
        dataset = json.load(f)
    # Filter out the conversations with less than 2 turns.
Liangsheng Yin's avatar
Liangsheng Yin committed
44
    dataset = [data for data in dataset if len(data["conversations"]) >= 2]
Lianmin Zheng's avatar
Lianmin Zheng committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
    # Only keep the first two turns of each conversation.
    dataset = [
        (data["conversations"][0]["value"], data["conversations"][1]["value"])
        for data in dataset
    ]

    # Tokenize the prompts and completions.
    prompts = [prompt for prompt, _ in dataset]
    prompt_token_ids = tokenizer(prompts).input_ids
    completions = [completion for _, completion in dataset]
    completion_token_ids = tokenizer(completions).input_ids
    tokenized_dataset = []
    for i in range(len(dataset)):
        output_len = len(completion_token_ids[i])
        tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))

    # Filter out too long sequences.
    filtered_dataset: List[Tuple[str, int, int]] = []
    for prompt, prompt_token_ids, output_len in tokenized_dataset:
        prompt_len = len(prompt_token_ids)
        if prompt_len < 4 or output_len < 4:
            # Prune too short sequences.
            # This is because TGI causes errors when the input or output length
            # is too short.
            continue
        if prompt_len > 1024 or prompt_len + output_len > 2048:
            # Prune too long sequences.
            continue
        filtered_dataset.append((prompt, prompt_len, output_len))

    # Sample the requests.
    sampled_requests = random.sample(filtered_dataset, num_requests)
    return sampled_requests


async def get_request(
    input_requests: List[Tuple[str, int, int]],
    request_rate: float,
) -> AsyncGenerator[Tuple[str, int, int], None]:
    input_requests = iter(input_requests)
    for request in input_requests:
        yield request

        if request_rate == float("inf"):
            # If the request rate is infinity, then we don't need to wait.
            continue
        # Sample the request interval from the exponential distribution.
        interval = np.random.exponential(1.0 / request_rate)
        # The next request will be sent after the interval.
        await asyncio.sleep(interval)


async def send_request(
    backend: str,
    api_url: str,
    prompt: str,
    prompt_len: int,
    output_len: int,
    best_of: int,
    use_beam_search: bool,
) -> None:
    request_start_time = time.perf_counter()

    headers = {"User-Agent": "Benchmark Client"}
    if backend == "vllm":
        pload = {
            "prompt": prompt,
            "n": 1,
            "best_of": best_of,
            "use_beam_search": use_beam_search,
            "temperature": 0.0 if use_beam_search else 1.0,
            "top_p": 1.0,
            "max_tokens": output_len,
            "ignore_eos": True,
            "stream": False,
        }
    elif backend == "tgi":
        assert not use_beam_search
        params = {
            "best_of": best_of,
            "max_new_tokens": output_len,
            "do_sample": True,
        }
        pload = {
            "inputs": prompt,
            "parameters": params,
        }
    elif backend == "srt":
        assert not use_beam_search
        params = {
            "ignore_eos": True,
            "max_new_tokens": output_len,
        }
        pload = {
            "text": prompt,
            "sampling_params": params,
        }
    elif backend == "lightllm":
        assert not use_beam_search
        params = {
            "ignore_eos": True,
            "max_new_tokens": output_len,
        }
        pload = {
            "inputs": prompt,
            "parameters": params,
        }
Lianmin Zheng's avatar
Lianmin Zheng committed
152
    elif backend == "ginfer":
153
        pass
Lianmin Zheng's avatar
Lianmin Zheng committed
154
155
156
    else:
        raise ValueError(f"Unknown backend: {backend}")

Lianmin Zheng's avatar
Lianmin Zheng committed
157
    if backend != "ginfer":
158
159
160
161
162
163
164
165
166
        timeout = aiohttp.ClientTimeout(total=3 * 3600)
        async with aiohttp.ClientSession(timeout=timeout) as session:
            while True:
                async with session.post(api_url, headers=headers, json=pload) as response:
                    chunks = []
                    async for chunk, _ in response.content.iter_chunks():
                        chunks.append(chunk)
                output = b"".join(chunks).decode("utf-8")
                output = json.loads(output)
Lianmin Zheng's avatar
Lianmin Zheng committed
167

168
169
170
171
172
173
174
                # Re-send the request if it failed.
                if "error" not in output:
                    break
                else:
                    print(output)
    else:
        import grpc
Lianmin Zheng's avatar
Lianmin Zheng committed
175
        from ginfer import sampler_pb2, sampler_pb2_grpc
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192

        api_url = api_url.replace("http://", "").replace("/generate", "")
        sampler_channel = grpc.aio.insecure_channel(api_url)
        sampler = sampler_pb2_grpc.SamplerStub(sampler_channel)

        request_end_time = time.perf_counter()
        sample_request = sampler_pb2.SampleTextRequest(
            prompt=prompt,
            settings=sampler_pb2.SampleSettings(
                max_len=output_len,
                rng_seed=0,
                temperature=0,
                nucleus_p=1,
            ),
        )
        stream = sampler.SampleText(sample_request)
        response = "".join([x.text async for x in stream])
Lianmin Zheng's avatar
Lianmin Zheng committed
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209

    request_end_time = time.perf_counter()
    request_latency = request_end_time - request_start_time
    REQUEST_LATENCY.append((prompt_len, output_len, request_latency))


async def benchmark(
    backend: str,
    api_url: str,
    input_requests: List[Tuple[str, int, int]],
    best_of: int,
    use_beam_search: bool,
    request_rate: float,
) -> None:
    tasks: List[asyncio.Task] = []
    async for request in get_request(input_requests, request_rate):
        prompt, prompt_len, output_len = request
Liangsheng Yin's avatar
Liangsheng Yin committed
210
211
212
213
214
215
216
217
218
219
220
        task = asyncio.create_task(
            send_request(
                backend,
                api_url,
                prompt,
                prompt_len,
                output_len,
                best_of,
                use_beam_search,
            )
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
221
222
223
224
225
226
227
228
229
230
        tasks.append(task)
    await tqdm_asyncio.gather(*tasks)


def main(args: argparse.Namespace):
    print(args)
    random.seed(args.seed)
    np.random.seed(args.seed)

    api_url = f"http://{args.host}:{args.port}/generate"
231
232
233
234
235
    tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, trust_remote_code=args.trust_remote_code)

    if args.dataset:
        input_requests = sample_requests(args.dataset, args.num_prompts, tokenizer)
    else:
236
237
238
239
        input_lens = np.random.randint(
            int(args.input_len * args.range_ratio), args.input_len + 1, size=args.num_prompts)
        output_lens = np.random.randint(
            int(args.output_len * args.range_ratio), args.output_len + 1, size=args.num_prompts)
240
241
242
243
244
        offsets = np.random.randint(0, tokenizer.vocab_size, size=args.num_prompts)
        input_requests = []
        for i in range(args.num_prompts):
            prompt = tokenizer.decode([(offsets[i] + i + j) % tokenizer.vocab_size for j in range(input_lens[i])])
            input_requests.append((prompt, int(input_lens[i]), int(output_lens[i])))
Lianmin Zheng's avatar
Lianmin Zheng committed
245
246

    benchmark_start_time = time.perf_counter()
Liangsheng Yin's avatar
Liangsheng Yin committed
247
248
249
250
251
252
253
254
255
256
    asyncio.run(
        benchmark(
            args.backend,
            api_url,
            input_requests,
            args.best_of,
            args.use_beam_search,
            args.request_rate,
        )
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
257
258
259
260
261
262
263
264
    benchmark_end_time = time.perf_counter()
    benchmark_time = benchmark_end_time - benchmark_start_time
    print(f"Total time: {benchmark_time:.2f} s")
    print(f"Throughput: {args.num_prompts / benchmark_time:.2f} requests/s")

    # Compute the latency statistics.
    avg_latency = np.mean([latency for _, _, latency in REQUEST_LATENCY])
    print(f"Average latency: {avg_latency:.2f} s")
Liangsheng Yin's avatar
Liangsheng Yin committed
265
266
267
268
269
270
    avg_per_token_latency = np.mean(
        [
            latency / (prompt_len + output_len)
            for prompt_len, output_len, latency in REQUEST_LATENCY
        ]
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
271
    print(f"Average latency per token: {avg_per_token_latency:.2f} s")
Liangsheng Yin's avatar
Liangsheng Yin committed
272
273
274
275
    avg_per_output_token_latency = np.mean(
        [latency / output_len for _, output_len, latency in REQUEST_LATENCY]
    )
    print("Average latency per output token: " f"{avg_per_output_token_latency:.2f} s")
Lianmin Zheng's avatar
Lianmin Zheng committed
276
277
278
279


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
Liangsheng Yin's avatar
Liangsheng Yin committed
280
281
282
283
284
        description="Benchmark the online serving throughput."
    )
    parser.add_argument(
        "--backend",
        type=str,
285
        default="srt",
Lianmin Zheng's avatar
Lianmin Zheng committed
286
        choices=["vllm", "tgi", "srt", "lightllm", "ginfer"],
Liangsheng Yin's avatar
Liangsheng Yin committed
287
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
288
    parser.add_argument("--host", type=str, default="localhost")
289
    parser.add_argument("--port", type=int, default=30000)
Liangsheng Yin's avatar
Liangsheng Yin committed
290
    parser.add_argument(
291
        "--dataset", type=str, help="Path to the dataset."
Liangsheng Yin's avatar
Liangsheng Yin committed
292
    )
293
294
295
    parser.add_argument("--input-len", type=str, default=2048)
    parser.add_argument("--output-len", type=str, default=256)
    parser.add_argument("--range-ratio", type=float, default=0.5)
Liangsheng Yin's avatar
Liangsheng Yin committed
296
    parser.add_argument(
297
298
299
        "--tokenizer", type=str,
        default="NousResearch/Meta-Llama-3-8B",
        help="Name or path of the tokenizer."
Liangsheng Yin's avatar
Liangsheng Yin committed
300
301
302
303
304
305
306
    )
    parser.add_argument(
        "--best-of",
        type=int,
        default=1,
        help="Generates `best_of` sequences per prompt and " "returns the best one.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
307
    parser.add_argument("--use-beam-search", action="store_true")
Liangsheng Yin's avatar
Liangsheng Yin committed
308
309
310
311
312
313
314
315
316
317
318
319
    parser.add_argument(
        "--num-prompts", type=int, default=1000, help="Number of prompts to process."
    )
    parser.add_argument(
        "--request-rate",
        type=float,
        default=float("inf"),
        help="Number of requests per second. If this is inf, "
        "then all the requests are sent at time 0. "
        "Otherwise, we use Poisson process to synthesize "
        "the request arrival times.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
320
    parser.add_argument("--seed", type=int, default=0)
Liangsheng Yin's avatar
Liangsheng Yin committed
321
322
323
324
325
    parser.add_argument(
        "--trust-remote-code",
        action="store_true",
        help="trust remote code from huggingface",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
326
327
    args = parser.parse_args()
    main(args)