chatbot.py 27.8 KB
Newer Older
lvhan028's avatar
lvhan028 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) OpenMMLab. All rights reserved.
import json
import logging
import queue
import random
import threading
from dataclasses import dataclass
from enum import Enum
from functools import partial
from typing import List, Union

import google.protobuf.json_format
import mmengine
import numpy as np
import tritonclient.grpc as grpcclient
from tritonclient.grpc.service_pb2 import ModelInferResponse

lvhan028's avatar
lvhan028 committed
18
from lmdeploy.model import MODELS
19
20
from lmdeploy.serve.turbomind.utils import (Postprocessor, Preprocessor,
                                            prepare_tensor)
lvhan028's avatar
lvhan028 committed
21
22
23
24
25
26


@dataclass
class Session:
    session_id: Union[int, str]
    request_id: str = ''
lvhan028's avatar
lvhan028 committed
27
    histories: str = ''  # history conversations of the session
lvhan028's avatar
lvhan028 committed
28
    sequence_length: int = 0  # the total generated token number in the session
lvhan028's avatar
lvhan028 committed
29
    prompt: str = ''
lvhan028's avatar
lvhan028 committed
30
31
32
33
34
35
36
    response: str = ''
    status: int = None  # status of the session


class StatusCode(Enum):
    TRITON_STREAM_END = 0  # end of streaming
    TRITON_STREAM_ING = 1  # response is in streaming
37
    TRITON_SESSION_READY = 2  # session is ready for inference
lvhan028's avatar
lvhan028 committed
38
39
40
41
42
43
44
    TRITON_SERVER_ERR = -1  # triton server's error
    TRITON_SESSION_CLOSED = -2  # session has been closed
    TRITON_SESSION_OUT_OF_LIMIT = -3  # request length out of limit
    TRITON_SESSION_INVALID_ARG = -4  # invalid argument


def stream_callback(que, result, error):
lvhan028's avatar
lvhan028 committed
45
    """callback function invoked by triton client."""
lvhan028's avatar
lvhan028 committed
46
47
48
49
50
51
52
53
    if error:
        print(error)
        que.put(dict(errcode=StatusCode.TRITON_SERVER_ERR, errmsg=f'{error}'))
    else:
        que.put(result.get_response(as_json=True))


def get_logger(log_file=None, log_level=logging.INFO):
lvhan028's avatar
lvhan028 committed
54
    """Return the logger."""
55
    from lmdeploy.utils import get_logger
lvhan028's avatar
lvhan028 committed
56
57
58
59
60
    logger = get_logger('service.ft', log_file=log_file, log_level=log_level)
    return logger


class Chatbot:
61
    """Chatbot for LLaMA series models with turbomind as inference engine.
lvhan028's avatar
lvhan028 committed
62
63
64
65
66
67
68

    Args:
        tritonserver_addr (str): communicating address '<ip>:<port>' of
            triton inference server
        model_name (str): name of the to-be-deployed mode
        log_level (int): the level of the log
        display (bool): display the generated text on consolo or not
lvhan028's avatar
lvhan028 committed
69
        profile_generation (bool): profile token generation or not
lvhan028's avatar
lvhan028 committed
70
71
72
73
    """

    def __init__(self,
                 tritonserver_addr: str,
74
                 model_name: str = '',
lvhan028's avatar
lvhan028 committed
75
                 ignore_eos: bool = False,
lvhan028's avatar
lvhan028 committed
76
                 log_level: int = logging.INFO,
lvhan028's avatar
lvhan028 committed
77
78
                 display: bool = False,
                 profile_generation: bool = False,
79
80
                 profile_serving: bool = False,
                 **model_kwargs):
81
        self.tritonserver_addr = tritonserver_addr
82
83
84
        self.model_name = model_name
        if self.model_name == '':
            self.model_name = self._get_model_name()
85
86
        assert self.model_name in MODELS.module_dict.keys(), \
            f"'{self.model_name}' is not supported. " \
lvhan028's avatar
lvhan028 committed
87
            f'The supported models are: {MODELS.module_dict.keys()}'
88
        self.model = MODELS.get(self.model_name)(**model_kwargs)
lvhan028's avatar
lvhan028 committed
89
        self._session = None
lvhan028's avatar
lvhan028 committed
90
91
92
93
94
95
96
97
98
        self.preprocess = Preprocessor(tritonserver_addr)
        self.postprocess = Postprocessor(tritonserver_addr)
        self.bos_id = self._get_bos()
        self.eos_id = self._get_eos()
        stop_words = self._stop_words(self.model.stop_words)
        bad_words = None
        if ignore_eos:
            stop_words = None
            bad_words = np.array([[[self.eos_id], [1]]], dtype=np.int32)
lvhan028's avatar
lvhan028 committed
99
        self.cfg = mmengine.Config(
100
101
102
103
104
            dict(session_len=self.model.session_len,
                 top_p=self.model.top_p,
                 top_k=self.model.top_k,
                 temperature=self.model.temperature,
                 repetition_penalty=self.model.repetition_penalty,
lvhan028's avatar
lvhan028 committed
105
106
                 stop_words=stop_words,
                 bad_words=bad_words))
lvhan028's avatar
lvhan028 committed
107
108
        self.log_level = log_level
        self.display = display
lvhan028's avatar
lvhan028 committed
109
110
        self.profile_generation = profile_generation
        self.profile_serving = profile_serving
lvhan028's avatar
lvhan028 committed
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151

    def stream_infer(self,
                     session_id: int,
                     prompt: str,
                     request_id: str = '',
                     request_output_len: int = None,
                     sequence_start: bool = False,
                     sequence_end: bool = False,
                     *args,
                     **kwargs):
        """Start a new round conversion of a session.

        Args:
            session_id (int): the identical id of a session
            prompt (str): user's prompt in this round conversation
            request_id (str): the identical id of this round conversation
            request_output_len (int): the expected generated token numbers
            sequence_start (bool): start flag of a session
            sequence_end (bool): end flag of a session
        Returns:
            iterator: The generated content by chatbot
        """
        assert isinstance(session_id, int), \
            f'INT session id is required, but got {type(session_id)}'

        logger = get_logger(log_level=self.log_level)
        logger.info(f'session {session_id}, request_id {request_id}, '
                    f'request_output_len {request_output_len}')

        if self._session is None:
            sequence_start = True
            self._session = Session(session_id=session_id)
        elif self._session.status == 0:
            logger.error(f'session {session_id} has been ended. Please set '
                         f'`sequence_start` be True if you want to restart it')
            yield StatusCode.TRITON_SESSION_CLOSED, '', 0
            return

        self._session.status = 1
        self._session.request_id = request_id
        self._session.response = ''
Lyu Han's avatar
Lyu Han committed
152
        self.cfg.update(**kwargs)
lvhan028's avatar
lvhan028 committed
153

lvhan028's avatar
lvhan028 committed
154
155
156
        self._session.prompt = self._get_prompt(prompt, sequence_start)
        for status, res, tokens in self._stream_infer(self._session,
                                                      self._session.prompt,
lvhan028's avatar
lvhan028 committed
157
158
159
                                                      request_output_len,
                                                      sequence_start,
                                                      sequence_end):
160
            if status.value < 0:
161
162
163
164
165
166
167
168
169
                break
            else:
                yield status, res, tokens
        if status.value == 0:
            self._session.histories = \
                self._session.histories + self._session.prompt + \
                self._session.response
        else:
            yield status, res, tokens
lvhan028's avatar
lvhan028 committed
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

    def end(self, session_id: int, *args, **kwargs):
        """end a session. Triton inference server will release the session's
        occupied resource when it is ended.

        Args:
            session_id (int): the identical id of a session

        Returns:
            int: 0: success, -1: session not found
        """
        assert isinstance(session_id, int), \
            f'INT session id is required, but got {type(session_id)}'

        logger = get_logger(log_level=self.log_level)
        logger.info(f'end session: {session_id}')

        if self._session is None:
            logger.error(
                f"session {session_id} doesn't exist. It cannot be ended")
            return StatusCode.TRITON_SESSION_INVALID_ARG
        if self._session.session_id != session_id:
            logger.error(f'you cannot end session {session_id}, because this '
                         f'session is {self._session.session_id}')
            return StatusCode.TRITON_SESSION_INVALID_ARG
        if self._session.status == 0:
            logger.warning(f'session {session_id} has already been ended')
            return StatusCode.TRITON_SESSION_CLOSED

        self._session.status = 0
lvhan028's avatar
lvhan028 committed
200
201
202
203
204
        for status, _, _ in self._stream_infer(self._session,
                                               prompt='',
                                               request_output_len=0,
                                               sequence_start=False,
                                               sequence_end=True):
205
206
            if status.value < 0:
                break
q.yao's avatar
q.yao committed
207
208

        self.reset_session()
209
        return status
lvhan028's avatar
lvhan028 committed
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239

    def cancel(self, session_id: int, *args, **kwargs):
        """Cancel the session during generating tokens.

        Args:
            session_id (int): the identical id of a session

        Returns:
            int: 0: success, -1: session not found
        """
        assert isinstance(session_id, int), \
            f'INT session id is required, but got {type(session_id)}'
        logger = get_logger(log_level=self.log_level)
        logger.info(f'cancel session: {session_id}')

        if self._session is None:
            logger.error(
                f"session {session_id} doesn't exist. It cannot be cancelled")
            return StatusCode.TRITON_SESSION_INVALID_ARG
        if self._session.session_id != session_id:
            logger.error(
                f'you cannot cancel session {session_id}, because this '
                f'session is {self._session.session_id}')
            return StatusCode.TRITON_SESSION_INVALID_ARG
        if self._session.status == 0:
            logger.error(f'session {session_id} has already been ended. '
                         f'It cannot be cancelled')
            return StatusCode.TRITON_SESSION_CLOSED

        prev_session = self._session
240
        status, res = None, None
lvhan028's avatar
lvhan028 committed
241
242
243
244
245
246
        for status, res, _ in self._stream_infer(self._session,
                                                 prompt='',
                                                 request_output_len=0,
                                                 sequence_start=False,
                                                 sequence_end=False,
                                                 cancel=True):
lvhan028's avatar
lvhan028 committed
247
248
249
250
            if status.value < 0:
                break
        if status == StatusCode.TRITON_STREAM_END:
            logger.info(f'cancel session {session_id} successfully')
lvhan028's avatar
lvhan028 committed
251
            if prev_session.histories:
252
                logger.warning(f'TODO: start to recover session {session_id}')
lvhan028's avatar
lvhan028 committed
253
254
255
256
        else:
            logger.info(f'cancel session {session_id} failed: {res}')
        return status

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    def resume(self, session_id: int, *args, **kwargs):
        """Resume a session by sending the history conversations to triton
        inference server. After resuming, users can continue chatting with
        chatbot.

        Args:
            session_id (int): the identical id of a session

        Returns:
            int: 0: success, -1: session not found
        """
        assert isinstance(session_id, int), \
            f'INT session id is required, but got {type(session_id)}'

        logger = get_logger(log_level=self.log_level)
        logger.info(f'resume session: {session_id}')

        if self._session is None:
            logger.error(
                f"session {session_id} doesn't exist. It cannot be recovered")
            return StatusCode.TRITON_SESSION_INVALID_ARG
        if self._session.session_id != session_id:
            logger.error(
                f'you cannot resume session {session_id}, because this '
                f'session is {self._session.session_id}')
            return StatusCode.TRITON_SESSION_INVALID_ARG

        self._session.status = 1
        self._session.sequence_length = 0
        histories = self._session.histories
        for status, _, _ in self._stream_infer(self._session,
                                               prompt=histories,
                                               request_output_len=0,
                                               sequence_start=True,
                                               sequence_end=False):
            if status.value < 0:
293
                break
294
295
296
297

        self._session.histories = histories
        return status

298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
    def infer(self,
              session_id: int,
              prompt: str,
              request_id: str = '',
              request_output_len: int = None,
              sequence_start: bool = False,
              sequence_end: bool = False,
              *args,
              **kwargs):
        """Start a new round conversion of a session. Return the chat
        completions in non-stream mode.

        Args:
            session_id (int): the identical id of a session
            prompt (str): user's prompt in this round conversation
            request_id (str): the identical id of this round conversation
            request_output_len (int): the expected generated token numbers
            sequence_start (bool): start flag of a session
            sequence_end (bool): end flag of a session
        Returns:
            tuple(Status, str, int): status, text/chat completion,
            generated token number
        """
        assert isinstance(session_id, int), \
            f'INT session id is required, but got {type(session_id)}'

        logger = get_logger(log_level=self.log_level)
        logger.info(f'session {session_id}, request_id {request_id}, '
                    f'request_output_len {request_output_len}')

        if self._session is None:
            sequence_start = True
            self._session = Session(session_id=session_id)
        elif self._session.status == 0:
            logger.error(f'session {session_id} has been ended. Please set '
                         f'`sequence_start` be True if you want to restart it')
            return StatusCode.TRITON_SESSION_CLOSED, '', 0

        self._session.status = 1
        self._session.request_id = request_id
        self._session.response = ''

        self._session.prompt = self._get_prompt(prompt, sequence_start)
        status, res, tokens = None, '', 0
        for status, res, tokens in self._stream_infer(self._session,
                                                      self._session.prompt,
                                                      request_output_len,
                                                      sequence_start,
                                                      sequence_end):
            if status.value < 0:
                break
        if status.value == 0:
            self._session.histories = \
                self._session.histories + self._session.prompt + \
                self._session.response
            return status, res, tokens
        else:
            return status, res, tokens

lvhan028's avatar
lvhan028 committed
357
    def reset_session(self):
lvhan028's avatar
lvhan028 committed
358
        """reset session."""
lvhan028's avatar
lvhan028 committed
359
360
        self._session = None

361
362
363
364
365
366
367
368
369
370
    @property
    def session(self):
        """get session."""
        return self._session

    @session.setter
    def session(self, value):
        """set session."""
        self._session = value

371
372
373
374
375
376
377
378
    def _get_model_name(self):
        with grpcclient.InferenceServerClient(
                self.tritonserver_addr) as client:
            model_config = client.get_model_config(model_name='turbomind',
                                                   as_json=True)
            return model_config['config']['parameters']['model_name'][
                'string_value']

lvhan028's avatar
lvhan028 committed
379
    def _get_bos(self):
lvhan028's avatar
lvhan028 committed
380
        """return bos token id."""
lvhan028's avatar
lvhan028 committed
381
382
383
384
        token_ids, _ = self.preprocess('<BOS>')
        return token_ids[0][0]

    def _get_eos(self):
lvhan028's avatar
lvhan028 committed
385
        """return eos token id."""
lvhan028's avatar
lvhan028 committed
386
387
388
389
        token_ids, _ = self.preprocess('<EOS>')
        return token_ids[0][0]

    def _stop_words(self, stop_words: List[int]):
lvhan028's avatar
lvhan028 committed
390
        """return stop-words' token ids."""
lvhan028's avatar
lvhan028 committed
391
392
393
394
395
396
397
        if stop_words is None:
            return None
        assert isinstance(stop_words, List) and \
               all(isinstance(elem, int) for elem in stop_words), \
               f'stop_words must be a list but got {type(stop_words)}'
        # each id in stop_words represents a stop word
        # refer to https://github.com/fauxpilot/fauxpilot/discussions/165 for
398
        # detailed explanation about turbomind's stop_words
lvhan028's avatar
lvhan028 committed
399
400
401
402
403
        stop_word_offsets = range(1, len(stop_words) + 1)
        stop_words = np.array([[stop_words,
                                stop_word_offsets]]).astype(np.int32)
        return stop_words

lvhan028's avatar
lvhan028 committed
404
    def _get_prompt(self, prompt: str, sequence_start: bool):
lvhan028's avatar
lvhan028 committed
405
406
        """return the concatenated prompt according to the model's chat
        template."""
lvhan028's avatar
lvhan028 committed
407
        if self.profile_generation or self.profile_serving:
408
            return prompt
lvhan028's avatar
lvhan028 committed
409
        return self.model.get_prompt(prompt, sequence_start)
lvhan028's avatar
lvhan028 committed
410
411
412
413
414
415
416
417

    def _stream_infer(self,
                      session: Session,
                      prompt: str,
                      request_output_len: int = 512,
                      sequence_start: bool = True,
                      sequence_end: bool = False,
                      cancel: bool = False):
lvhan028's avatar
lvhan028 committed
418
419
420
421
422
423
424
425
426
427
428
429
430
        """communicate with inference server to chat, or cancel a session, or
        end a session.

        Args:
            session (Session): an instance of a session
            prompt (str): the concatenated prompt
            request_output_len (int): the max number of tokens to be generated
            sequence_start (bool): indicator for starting a sequence
            sequence_end (bool): indicator for ending a sequence
            cancel (bool): indicator for cancelling the session
        Yields:
            tuple: status, text, generated token number
        """
lvhan028's avatar
lvhan028 committed
431
432
433
434
435
436
437
438
439
440
441
442
        logger = get_logger(log_level=self.log_level)
        logger.info(f'session {session.session_id}, '
                    f'request id {session.request_id}, '
                    f'request_output_len {request_output_len}, '
                    f'start {sequence_start}, '
                    f'end {sequence_end}, cancel {cancel}')

        assert request_output_len is None or \
               isinstance(request_output_len, int), \
               f'request_output_len is supposed to be None or int, ' \
               f'but got {type(request_output_len)}'

lvhan028's avatar
lvhan028 committed
443
444
445
446
447
448
        if sequence_start:
            logger.info(f'session {session.session_id}, clear history since '
                        f'sequence_start is True')
            session.histories = ''
            session.sequence_length = 0

lvhan028's avatar
lvhan028 committed
449
450
        input_ids, input_lengths = self.preprocess(prompt)
        input_tokens = input_lengths.squeeze()
lvhan028's avatar
lvhan028 committed
451
452
453
        if self.profile_generation:
            yield StatusCode.TRITON_STREAM_ING, \
                  'ignore preprocessing during profiling generation', 0
lvhan028's avatar
lvhan028 committed
454
455
456
457
458
459
460
461
462
463
464
465
        if request_output_len is None:
            request_output_len = max(
                128,
                self.cfg.session_len - session.sequence_length - input_tokens)

        if input_tokens + request_output_len + \
                session.sequence_length > self.cfg.session_len:
            errmsg = f'session {session.session_id}, ' \
                     f'out of max sequence length {self.cfg.session_len}, ' \
                     f'#input tokens {input_tokens}, ' \
                     f'history tokens {session.sequence_length}, ' \
                     f'request length {request_output_len}'
466
            logger.warning(errmsg)
lvhan028's avatar
lvhan028 committed
467
            yield StatusCode.TRITON_SESSION_OUT_OF_LIMIT, errmsg, 0
468
469
            return

lvhan028's avatar
lvhan028 committed
470
        logger.info(f'session {session.session_id}, '
471
                    f'max length: {self.cfg.session_len}, '
lvhan028's avatar
lvhan028 committed
472
473
474
475
476
                    f'input tokens: {input_tokens}, '
                    f'request tokens: {request_output_len}, '
                    f'history tokens: {session.sequence_length}')

        preseq_length = session.sequence_length
lvhan028's avatar
lvhan028 committed
477
        session.response = ''
478
        session.status = StatusCode.TRITON_SESSION_READY
lvhan028's avatar
lvhan028 committed
479
480

        que = queue.Queue()
lvhan028's avatar
lvhan028 committed
481
482
483
484
485
        producer = threading.Thread(target=self._stream_producer,
                                    args=(self.tritonserver_addr, session, que,
                                          self.cfg, input_ids, input_lengths,
                                          request_output_len, sequence_start,
                                          sequence_end, preseq_length, cancel))
lvhan028's avatar
lvhan028 committed
486
        producer.start()
487
488
489
490
491
492
        for status, res, n_token in self.stream_consumer(
                self.postprocess, que, session, input_tokens, preseq_length,
                cancel, logger, self.display, self.profile_generation,
                self.eos_id):
            yield status, res, n_token

lvhan028's avatar
lvhan028 committed
493
494
495
496
497
498
499
500
501
502
503
        producer.join()
        self._session = que.get()
        curseq_length = self._session.sequence_length
        logger.info(f'session {session.session_id}, pre seq_len '
                    f'{preseq_length}, cur seq_len {curseq_length}, '
                    f'diff {curseq_length - preseq_length}')

    @staticmethod
    def _stream_producer(tritonserver_addr, session, que, cfg, input_ids,
                         input_lengths, request_output_len, sequence_start,
                         sequence_end, preseq_length, cancel):
lvhan028's avatar
lvhan028 committed
504
505
506
507
508
509
510
        """Send a request to the triton inference server.

        Args:
            tritonserver_addr (str): the communication address of the inference
                server
            session (Session): an instance of a session
            que (multiprocessing.Queue): response queue
Lyu Han's avatar
Lyu Han committed
511
            cfg (dict): parameters for sampling
lvhan028's avatar
lvhan028 committed
512
513
514
515
516
517
518
519
            input_ids (numpy.ndarray): token ids of input prompt
            input_lengths (numpy.ndarray): length of input_ids
            request_output_len (int): the max number of tokens to be generated
            sequence_start (bool): indicator for starting a sequence
            sequence_end (bool): indicator for ending a sequence
            preseq_length (int): the history sequence length
            cancel (bool): indicator for cancelling the session
        """
lvhan028's avatar
lvhan028 committed
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
        request_output_len = np.full(input_lengths.shape,
                                     request_output_len).astype(np.uint32)

        callback = partial(stream_callback, que)
        with grpcclient.InferenceServerClient(tritonserver_addr) as client:
            inputs = [
                prepare_tensor('input_ids', input_ids),
                prepare_tensor('input_lengths', input_lengths),
                prepare_tensor('request_output_len', request_output_len),
                prepare_tensor('runtime_top_p',
                               cfg.top_p * np.ones((1, 1), dtype=np.float32)),
                prepare_tensor(
                    'temperature',
                    cfg.temperature * np.ones((1, 1), dtype=np.float32)),
                prepare_tensor(
                    'repetition_penalty',
                    cfg.repetition_penalty * np.ones(
                        (1, 1), dtype=np.float32)),
                prepare_tensor('step',
                               preseq_length * np.ones((1, 1), dtype=np.int32))
            ]
541
542
543
544
            if cfg.top_k is not None:
                inputs += prepare_tensor(
                    'runtime_top_k',
                    cfg.top_k * np.ones((1, 1), dtype=np.uint32)),
lvhan028's avatar
lvhan028 committed
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
            if cfg.stop_words is not None:
                inputs += [prepare_tensor('stop_words_list', cfg.stop_words)]
            if cfg.bad_words is not None:
                inputs += [prepare_tensor('bad_words_list', cfg.bad_words)]

            inputs += [
                prepare_tensor(
                    'session_len',
                    cfg.session_len *
                    np.ones([input_ids.shape[0], 1], dtype=np.uint32)),
                prepare_tensor('START', (1 if sequence_start else 0) * np.ones(
                    (1, 1), dtype=np.int32)),
                prepare_tensor('END', (1 if sequence_end else 0) * np.ones(
                    (1, 1), dtype=np.int32)),
                prepare_tensor(
                    'CORRID',
                    session.session_id * np.ones((1, 1), dtype=np.uint64)),
                prepare_tensor('STOP', (1 if cancel else 0) * np.ones(
                    (1, 1), dtype=np.int32))
            ]
            if sequence_start:
                random_seed = random.getrandbits(64)
                inputs += [
                    prepare_tensor(
                        'random_seed',
                        random_seed * np.ones((1, 1), dtype=np.uint64))
                ]
            client.start_stream(callback)
573
            client.async_stream_infer('turbomind',
lvhan028's avatar
lvhan028 committed
574
575
576
577
578
                                      inputs,
                                      sequence_id=session.session_id,
                                      request_id=session.request_id,
                                      sequence_start=sequence_start,
                                      sequence_end=sequence_end)
lvhan028's avatar
lvhan028 committed
579
580
581
        que.put(None)

    @staticmethod
582
583
584
    def stream_consumer(postprocess, res_queue, session, n_input_token,
                        preseq_length, cancel, logger, display,
                        profile_generation, eos_id):
lvhan028's avatar
lvhan028 committed
585
        """Consume the response from the triton inference server.
lvhan028's avatar
lvhan028 committed
586

lvhan028's avatar
lvhan028 committed
587
588
589
590
591
        Args:
            postprocess (callable): postprocess function for
                the generated tokens
            res_queue (multiprocessing.Queue): response queue
            session (Session): an instance of a session
592
            n_input_token (int): token number of input prompt
lvhan028's avatar
lvhan028 committed
593
594
595
596
597
598
599
600
601
602
            preseq_length (int): the history sequence length
            cancel (bool): indicator for cancelling the session
            logger (util.Logger):
            display (bool): display the text in the consolo interface or not
            profile_generation (bool): indicator for profiling token generation
            eos_id (int): eos token id

        Yields:
            tuple: status, text, generated token number
        """
603
        status, res, n_token = None, '', 0
lvhan028's avatar
lvhan028 committed
604
605
606
        while True:
            result = res_queue.get()
            if result is None:
607
608
                status = StatusCode.TRITON_STREAM_END
                res = session.response
609
                session.status = StatusCode.TRITON_STREAM_END
lvhan028's avatar
lvhan028 committed
610
611
                break
            if 'errcode' in result:
612
                logger.error(f'got error from turbomind, code '
lvhan028's avatar
lvhan028 committed
613
614
615
                             f"{result['errcode']}, {result['errmsg']}, "
                             f'token {session.sequence_length}')
                session.sequence_length = preseq_length
616
617
618
619
                session.response = ''
                status = StatusCode.TRITON_SERVER_ERR
                res = f"{result['errcode']}, {result['errmsg']}"
                n_token = 0
lvhan028's avatar
lvhan028 committed
620
621
622
623
624
625
626
627
628
629
630
                break
            if cancel:
                continue
            try:
                message = ModelInferResponse()
                google.protobuf.json_format.Parse(json.dumps(result), message)
                result = grpcclient.InferResult(message)
                sequence_length = result.as_numpy('sequence_length')
                output_ids = result.as_numpy('output_ids')

                session.sequence_length = sequence_length.squeeze()
631
632
633
634
                output_ids = output_ids.reshape((1, 1, output_ids.shape[-1]))
                output_ids = output_ids[:, :, n_input_token +
                                        preseq_length:sequence_length.squeeze(
                                        )]
635
636
                last_token_id = None if output_ids.shape[
                    -1] == 0 else output_ids[-1, -1, -1]
lvhan028's avatar
lvhan028 committed
637
638
                if last_token_id == eos_id:
                    session.sequence_length = session.sequence_length - 1
639
                    output_ids = output_ids[:, :, :-1]
lvhan028's avatar
lvhan028 committed
640
641
642
643

                if profile_generation:
                    yield (StatusCode.TRITON_STREAM_ING,
                           'postprocessing is ignored during profiling '
644
                           'token generation', output_ids.shape[-1])
lvhan028's avatar
lvhan028 committed
645
                    continue
646
647
648
                output_str = postprocess(
                    output_ids, np.array([[n_token]], dtype=np.uint32))
                n_token = output_ids.shape[-1]
lvhan028's avatar
lvhan028 committed
649
650
                text = output_str[0].decode()
                if display:
651
652
                    print(text, end='', flush=True)
                session.response += text
653
                yield (StatusCode.TRITON_STREAM_ING, session.response,
654
                       output_ids.shape[-1])
lvhan028's avatar
lvhan028 committed
655
656
            except Exception as e:
                logger.error(f'catch exception: {e}')
657
658
                logger.error(
                    f'session {session.session_id}: prompt: {session.prompt}')
lvhan028's avatar
lvhan028 committed
659
660
661
662
663
664
665
666

        # put session back to queue so that `_stream_infer` can update it in
        # `self.sessions`
        while not res_queue.empty():
            res_queue.get()
        res_queue.put(session)
        if display:
            print('\n')
667
        yield status, res, n_token