profile_serving.py 7.23 KB
Newer Older
lvhan028's avatar
lvhan028 committed
1
import json
2
import logging
lvhan028's avatar
lvhan028 committed
3
4
5
6
7
8
9
import multiprocessing as mp
import random
import time

import fire
import numpy as np

10
from lmdeploy.serve.turbomind.chatbot import Chatbot
11
from lmdeploy.tokenizer import Tokenizer
lvhan028's avatar
lvhan028 committed
12
13
14
15


def infer(chatbot, session_id: int, req_que: mp.Queue, res_que: mp.Queue):
    stats = []
16
17
    for prompt, input_seqlen, output_seqlen in iter(req_que.get,
                                                    [None, None, None]):
lvhan028's avatar
lvhan028 committed
18
19
        timestamps = []
        tokens = []
20
        timestamps.append(time.perf_counter())
lvhan028's avatar
lvhan028 committed
21
22
23
24
25
26
27
28
        for status, res, token in chatbot.stream_infer(
                session_id,
                prompt,
                request_output_len=output_seqlen,
                sequence_start=True,
                sequence_end=True):
            timestamps.append(time.perf_counter())
            tokens.append(token)
29
        first_token_latency = np.round(timestamps[1] - timestamps[0], 3)
30
        token_latency = np.round(timestamps[-1] - timestamps[0], 3)
31
32
33
34
35
36
        completion_tokens = tokens[-1]
        total_tokens = tokens[-1] + input_seqlen
        stats.append([
            first_token_latency, completion_tokens, output_seqlen,
            total_tokens, token_latency
        ])
37
        print(f'session {session_id}: '
38
39
              f'input_seqlen {input_seqlen}, output_seqlen {output_seqlen}, '
              f'completion_tokens {completion_tokens}')
lvhan028's avatar
lvhan028 committed
40
41
42
43
44
45
    res_que.put((session_id, stats))


def warmup(tritonserver_addr: str,
           concurrency: int,
           output_seqlen: int,
46
           warmup_round: int = 1):
lvhan028's avatar
lvhan028 committed
47
48
49
50
    print('start to warmup ...')

    def _infer(_chatbot, session_id):
        for _ in range(warmup_round):
51
            for _, _, _ in _chatbot.stream_infer(
lvhan028's avatar
lvhan028 committed
52
53
54
55
56
57
                    session_id,
                    prompt='',
                    request_output_len=output_seqlen,
                    sequence_start=True,
                    sequence_end=True):
                continue
58
            _chatbot.reset_session()
lvhan028's avatar
lvhan028 committed
59
60
61
62
63

    _start = time.perf_counter()
    chatbots = [
        Chatbot(tritonserver_addr=tritonserver_addr,
                ignore_eos=True,
64
                log_level=logging.ERROR,
lvhan028's avatar
lvhan028 committed
65
66
67
68
69
70
71
72
73
74
75
76
77
                profile_generation=True) for _ in range(concurrency)
    ]
    procs = []
    for i, chatbot in enumerate(chatbots):
        proc = mp.Process(target=_infer, args=(chatbot, i + 1))
        procs.append(proc)
        proc.start()
    for proc in procs:
        proc.join()
    _end = time.perf_counter()
    print(f'end warmup, elapsed time: {round(_end - _start, 2)} s')


78
def read_dataset(tokenizer_path: str, dataset_path: str, samples: int,
79
                 session_len: int, que: mp.Queue):
lvhan028's avatar
lvhan028 committed
80
81
82
83
84
85
86
87
88
89
90
    start = time.perf_counter()
    with open(dataset_path) as f:
        dataset = json.load(f)
        dataset = [data for data in dataset if len(data['conversations']) >= 2]
        # Only keep the first two turns of each conversation.
        dataset = [(data['conversations'][0]['value'],
                    data['conversations'][1]['value']) for data in dataset]
        prompts = [prompt for prompt, _ in dataset]
        completions = [completion for _, completion in dataset]
        print(f'elapsed time for read data: '
              f'{round(time.perf_counter() - start, 2)} s')
91
    print('start tokenization. This takes a while, please wait...')
lvhan028's avatar
lvhan028 committed
92
93
94

    start = time.perf_counter()
    tokenizer = Tokenizer(tokenizer_path)
95
96
97
98
    prompts_token_lens = [len(tokenizer.encode(prompt)) for prompt in prompts]
    completions_token_lens = [
        len(tokenizer.encode(prompt)) for prompt in completions
    ]
lvhan028's avatar
lvhan028 committed
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
    print(f'elapsed time for tokenization: '
          f'{round(time.perf_counter() - start, 2)} s')

    start = time.perf_counter()
    filtered_dataset = []
    for (prompt, _), input_len, output_len in zip(dataset, prompts_token_lens,
                                                  completions_token_lens):
        if input_len + output_len > session_len:
            # ignore too long conversation
            continue
        filtered_dataset.append([prompt, input_len, output_len])

    if samples > 0:
        filtered_dataset = random.sample(filtered_dataset, samples)

    for data in filtered_dataset:
        que.put(data)
    print(f'elapsed time for filtering: '
          f'{round(time.perf_counter() - start, 2)} s')
118
    return len(filtered_dataset)
lvhan028's avatar
lvhan028 committed
119
120
121
122
123
124
125


def main(tritonserver_addr: str,
         tokenizer_path: str,
         dataset_path: str,
         concurrency: int = 1,
         session_len: int = 2048,
126
         samples: int = 1000):
127
    warmup(tritonserver_addr, concurrency, session_len - 1)
128
    req_que = mp.Queue()
lvhan028's avatar
lvhan028 committed
129
    res_que = mp.Queue()
130

lvhan028's avatar
lvhan028 committed
131
132
133
134
135
    procs = []
    for i in range(concurrency):
        chatbot = Chatbot(tritonserver_addr=tritonserver_addr,
                          display=False,
                          profile_serving=True,
136
137
                          ignore_eos=True,
                          log_level=logging.ERROR)
lvhan028's avatar
lvhan028 committed
138
139
140
        proc = mp.Process(target=infer,
                          args=(chatbot, i + 1, req_que, res_que))
        procs.append(proc)
141
142
143
144
145
146

    # read data and put it to queue
    n_req = read_dataset(tokenizer_path, dataset_path, samples, session_len,
                         req_que)
    for i in range(concurrency):
        req_que.put([None, None, None])
147
148
149
    _start = time.perf_counter()
    for proc in procs:
        proc.start()
lvhan028's avatar
lvhan028 committed
150
151

    stats = []
152
    for i in range(concurrency):
lvhan028's avatar
lvhan028 committed
153
154
        session_id, _stats = res_que.get()
        print(f'\n{"-" * 50}\n'
155
156
              f'session {session_id}: processed reqs {len(_stats)}, '
              f'stats: \n{_stats}\n{"-" * 50}\n')
157
        stats.append(np.array(_stats))
158
    _end = time.perf_counter()
159

160
161
    elapsed_time = _end - _start

162
    stats = np.concatenate(stats).reshape(-1, 5)
lvhan028's avatar
lvhan028 committed
163
164
165
166

    first_token_latency_min = np.min(stats[:, 0], axis=0)
    first_token_latency_max = np.max(stats[:, 0], axis=0)
    first_token_latency_ave = np.mean(stats[:, 0], axis=0)
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
    completion_tokens = np.sum(stats[:, 1], axis=0)
    request_output_tokens = np.sum(stats[:, 2], axis=0)
    total_tokens = np.sum(stats[:, 3], axis=0)
    prompt_tokens = total_tokens - completion_tokens
    completion_token_throughput = completion_tokens / elapsed_time
    total_token_throughput = total_tokens / elapsed_time
    rqs = n_req / elapsed_time
    rqm = rqs * 60

    if (np.abs(stats[:, 1] - stats[:, 2]) <= 1).min() is False:
        print(f'Did not generate requested number of tokens. '
              f'Request {request_output_tokens:.0f}, '
              f'but got {completion_tokens:.0f}')

    print(
        f'\n{"-" * 50}\nconcurrency: {concurrency}\n'
        f'elapsed_time: {elapsed_time:.3f}s\n'
        f'first_token latency(min, max, ave): '
        f'{first_token_latency_min:.3f}s, {first_token_latency_max:.3f}s, '
        f'{first_token_latency_ave:.3f}s\n'
        f'number of prompt tokens: {prompt_tokens:.0f}\n'
        f'number of completion tokens: {completion_tokens:.0f}\n'
        f'token throughput (completion token): {completion_token_throughput:.3f} token/s\n'  # noqa
        f'token throughput (prompt + completion token): {total_token_throughput:.3f} token/s\n'  # noqa
        f'RPS (request per second): {rqs:.3f} req/s\n'
        f'RPM (request per minute): {rqm:.3f} req/min\n'
        f'{"-" * 50}\n')
194
195
196
    for proc in procs:
        proc.join()

lvhan028's avatar
lvhan028 committed
197
198
199

if __name__ == '__main__':
    fire.Fire(main)