benchmark_latency.py 2.54 KB
Newer Older
1
2
3
4
5
import argparse
import time

import numpy as np
import torch
6
from tqdm import tqdm
7

8
from cacheflow import LLM, SamplingParams
9
10
11


def main(args: argparse.Namespace):
12
13
14
15
16
17
18
19
20
21
22
    print(args)

    # Process all the requests in a single batch if possible.
    # NOTE(woosuk): If the request cannot be processed in a single batch,
    # the server will automatically process the request in multiple batches.
    llm = LLM(
        model=args.model,
        tensor_parallel_size=args.tensor_parallel_size,
        max_num_seqs=args.batch_size,
        max_num_batched_tokens=args.batch_size * args.input_len,
    )
23

Woosuk Kwon's avatar
Woosuk Kwon committed
24
25
26
27
28
    sampling_params = SamplingParams(
        n=args.n,
        temperature=0.0 if args.use_beam_search else 1.0,
        top_p=1.0,
        use_beam_search=args.use_beam_search,
29
        ignore_eos=True,
Woosuk Kwon's avatar
Woosuk Kwon committed
30
31
        max_tokens=args.output_len,
    )
32
    print(sampling_params)
33
34
    dummy_prompts = [""] * args.batch_size
    dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size
35

36
    def run_to_completion(profile: bool = False):
37
38
39
        if profile:
            torch.cuda.cudart().cudaProfilerStart()
        start_time = time.time()
40
41
42
43

        llm.generate(dummy_prompts, sampling_params, dummy_prompt_token_ids,
                     use_tqdm=False)

44
45
46
47
48
49
        end_time = time.time()
        latency = end_time - start_time
        if profile:
            torch.cuda.cudart().cudaProfilerStop()
        return latency

50
51
    print("Warming up...")
    run_to_completion(profile=False)
52
53
54

    # Benchmark.
    latencies = []
55
56
    for _ in tqdm(range(args.num_iters), desc="Profiling iterations"):
        latencies.append(run_to_completion(profile=False))
57
58
59
60
    print(f'Avg latency: {np.mean(latencies)} seconds')


if __name__ == '__main__':
61
    parser = argparse.ArgumentParser(
62
63
64
65
        description='Benchmark the latency of processing a single batch of '
                    'requests till completion.')
    parser.add_argument('--model', type=str, default='facebook/opt-125m')
    parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)
66
67
68
    parser.add_argument('--input-len', type=int, default=32)
    parser.add_argument('--output-len', type=int, default=128)
    parser.add_argument('--batch-size', type=int, default=8)
69
70
    parser.add_argument('--n', type=int, default=1,
                        help='Number of generated sequences per prompt.')
71
    parser.add_argument('--use-beam-search', action='store_true')
72
73
    parser.add_argument('--num-iters', type=int, default=3,
                        help='Number of iterations to run.')
74
75
    args = parser.parse_args()
    main(args)