"mmdet3d/structures/bbox_3d/utils.py" did not exist on "cbc2491f556f8f92b141d7a81e08c79beef4515c"
profile_restful_api.py 7.2 KB
Newer Older
AllentDan's avatar
AllentDan committed
1
2
3
import json
import random
import time
4
5
from queue import Queue
from threading import Thread
AllentDan's avatar
AllentDan committed
6
7
8
9

import fire
import numpy as np

10
from lmdeploy.serve.openai.api_client import get_streaming_response
11
from lmdeploy.tokenizer import Tokenizer
AllentDan's avatar
AllentDan committed
12
13


14
15
def infer(server_addr: str, session_id: int, req_queue: Queue, res_que: Queue,
          stream_output: bool):
AllentDan's avatar
AllentDan committed
16
    stats = []
17
18
19
20
    for prompt, input_seqlen, output_seqlen in iter(req_queue.get,
                                                    [None, None, None]):
        if prompt is None:
            break
AllentDan's avatar
AllentDan committed
21
22
        timestamps = []
        tokens = []
23
        timestamps.append(time.perf_counter())
24
        for res, token, status in get_streaming_response(
AllentDan's avatar
AllentDan committed
25
26
27
28
                prompt,
                server_addr,
                session_id,
                request_output_len=output_seqlen,
29
30
31
                interactive_mode=False,
                ignore_eos=True,
                stream=stream_output):
AllentDan's avatar
AllentDan committed
32
33
34
            timestamps.append(time.perf_counter())
            tokens.append(token)

35
36
37
38
39
40
41
42
43
44
45
        first_token_latency = np.round(timestamps[1] - timestamps[0], 3)
        token_latency = np.round(timestamps[-1] - timestamps[0], 3)
        completion_tokens = tokens[-1]
        total_tokens = tokens[-1] + input_seqlen
        stats.append([
            first_token_latency, completion_tokens, output_seqlen,
            total_tokens, token_latency
        ])
        print(f'session {session_id}: '
              f'input_seqlen {input_seqlen}, output_seqlen {output_seqlen}, '
              f'completion_tokens {completion_tokens}')
AllentDan's avatar
AllentDan committed
46
47
48
49
50
51
    res_que.put((session_id, stats))


def warmup(server_addr: str,
           concurrency: int,
           output_seqlen: int,
52
53
           warmup_round: int = 1,
           stream_output: bool = False):
AllentDan's avatar
AllentDan committed
54
55
56
57
    print('start to warmup ...')

    def _infer(server_addr, session_id):
        for _ in range(warmup_round):
58
59
60
61
            for _ in get_streaming_response('',
                                            server_addr,
                                            session_id,
                                            request_output_len=output_seqlen,
62
63
64
                                            interactive_mode=False,
                                            stream=stream_output,
                                            ignore_eos=True):
AllentDan's avatar
AllentDan committed
65
66
67
68
69
                continue

    _start = time.perf_counter()
    procs = []
    for i in range(concurrency):
70
        proc = Thread(target=_infer, args=(server_addr, i + 1))
AllentDan's avatar
AllentDan committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
        procs.append(proc)
        proc.start()
    for proc in procs:
        proc.join()
    _end = time.perf_counter()
    print(f'end warmup, elapsed time: {round(_end - _start, 2)} s')


def read_dataset(tokenizer_path: str, dataset_path: str, samples: int,
                 session_len: int):
    start = time.perf_counter()
    with open(dataset_path) as f:
        dataset = json.load(f)
        dataset = [data for data in dataset if len(data['conversations']) >= 2]
        # Only keep the first two turns of each conversation.
        dataset = [(data['conversations'][0]['value'],
                    data['conversations'][1]['value']) for data in dataset]
        prompts = [prompt for prompt, _ in dataset]
        completions = [completion for _, completion in dataset]
        print(f'elapsed time for read data: '
              f'{round(time.perf_counter() - start, 2)} s')

93
    print('start tokenization. This takes a while, please wait...')
AllentDan's avatar
AllentDan committed
94
95
    start = time.perf_counter()
    tokenizer = Tokenizer(tokenizer_path)
96
97
98
99
    prompts_token_lens = [len(tokenizer.encode(prompt)) for prompt in prompts]
    completions_token_lens = [
        len(tokenizer.encode(prompt)) for prompt in completions
    ]
AllentDan's avatar
AllentDan committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
    print(f'elapsed time for tokenization: '
          f'{round(time.perf_counter() - start, 2)} s')

    start = time.perf_counter()
    filtered_dataset = []
    for (prompt, _), input_len, output_len in zip(dataset, prompts_token_lens,
                                                  completions_token_lens):
        if input_len + output_len > session_len:
            # ignore too long conversation
            continue
        filtered_dataset.append([prompt, input_len, output_len])

    if samples > 0:
        filtered_dataset = random.sample(filtered_dataset, samples)

115
    que = Queue()
AllentDan's avatar
AllentDan committed
116
117
    for data in filtered_dataset:
        que.put(data)
118
    que.put((None, None, None))
AllentDan's avatar
AllentDan committed
119
120
121
122
123
124
125
126
127
128
    print(f'elapsed time for filtering: '
          f'{round(time.perf_counter() - start, 2)} s')
    return que, len(filtered_dataset)


def main(server_addr: str,
         tokenizer_path: str,
         dataset_path: str,
         concurrency: int = 1,
         session_len: int = 2048,
129
130
         samples: int = 1000,
         stream_output: bool = False):
131
    api_url = server_addr + '/v1/chat/interactive'
132
    warmup(api_url, concurrency, session_len - 1, 4, stream_output)
AllentDan's avatar
AllentDan committed
133
134
    req_queue, n_req = read_dataset(tokenizer_path, dataset_path, samples,
                                    session_len)
135
136
137
    for i in range(concurrency):
        req_queue.put([None, None, None])
    res_que = Queue()
AllentDan's avatar
AllentDan committed
138
139
140
    procs = []
    _start = time.perf_counter()
    for i in range(concurrency):
141
142
        proc = Thread(target=infer,
                      args=(api_url, i + 1, req_queue, res_que, stream_output))
AllentDan's avatar
AllentDan committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
        procs.append(proc)
        proc.start()
    for proc in procs:
        proc.join()
    _end = time.perf_counter()
    elapsed_time = _end - _start

    stats = []
    while not res_que.empty():
        session_id, _stats = res_que.get()
        print(f'\n{"-" * 50}\n'
              f'session {session_id} stats: \n{_stats}\n{"-" * 50}\n')
        stats.append(np.array(_stats))

157
    stats = np.concatenate(stats).reshape(-1, 5)
AllentDan's avatar
AllentDan committed
158
159
160
161

    first_token_latency_min = np.min(stats[:, 0], axis=0)
    first_token_latency_max = np.max(stats[:, 0], axis=0)
    first_token_latency_ave = np.mean(stats[:, 0], axis=0)
162
163
164
165
166
167
168
169
170
171
172
173
174
    completion_tokens = np.sum(stats[:, 1], axis=0)
    request_output_tokens = np.sum(stats[:, 2], axis=0)
    total_tokens = np.sum(stats[:, 3], axis=0)
    prompt_tokens = total_tokens - completion_tokens
    completion_token_throughput = completion_tokens / elapsed_time
    total_token_throughput = total_tokens / elapsed_time
    rqs = n_req / elapsed_time
    rqm = rqs * 60

    if (np.abs(stats[:, 1] - stats[:, 2]) <= 1).min() is False:
        print(f'Did not generate requested number of tokens. '
              f'Request {request_output_tokens:.0f}, '
              f'but got {completion_tokens:.0f}')
AllentDan's avatar
AllentDan committed
175
176

    print(f'\n{"-" * 50}\nconcurrency: {concurrency}\n'
177
178
179
180
181
182
183
184
185
186
187
188
189
190
          f'elapsed_time: {elapsed_time:.3f}s\n')
    if stream_output:
        print(f'first_token latency(min, max, ave): '
              f'{first_token_latency_min:.3f}s, '
              f'{first_token_latency_max:.3f}s, '
              f'{first_token_latency_ave:.3f}s\n')
    print(
        f'number of prompt tokens: {prompt_tokens:.0f}\n'
        f'number of completion tokens: {completion_tokens:.0f}\n'
        f'token throughput (completion token): {completion_token_throughput:.3f} token/s\n'  # noqa
        f'token throughput (prompt + completion token): {total_token_throughput:.3f} token/s\n'  # noqa
        f'RPS (request per second): {rqs:.3f} req/s\n'
        f'RPM (request per minute): {rqm:.3f} req/min\n'
        f'{"-" * 50}\n')
AllentDan's avatar
AllentDan committed
191
192
193
194


if __name__ == '__main__':
    fire.Fire(main)