client.py 2.36 KB
Newer Older
lvhan028's avatar
lvhan028 committed
1
2
3
4
5
# Copyright (c) OpenMMLab. All rights reserved.
import os

import fire

6
from lmdeploy.serve.turbomind.chatbot import Chatbot
lvhan028's avatar
lvhan028 committed
7
8


Lyu Han's avatar
Lyu Han committed
9
10
11
12
13
14
15
16
def input_prompt(model_name):
    """Input a prompt in the consolo interface."""
    if model_name == 'codellama':
        print('\nenter !! to end the input >>>\n', end='')
        sentinel = '!!'
    else:
        print('\ndouble enter to end input >>> ', end='')
        sentinel = ''  # ends when this string is seen
lvhan028's avatar
lvhan028 committed
17
18
19
    return '\n'.join(iter(input, sentinel))


20
21
def main(tritonserver_addr: str,
         session_id: int = 1,
Lyu Han's avatar
Lyu Han committed
22
23
24
         cap: str = 'chat',
         stream_output: bool = True,
         **kwargs):
lvhan028's avatar
lvhan028 committed
25
26
27
28
29
30
31
    """An example to communicate with inference server through the command line
    interface.

    Args:
        tritonserver_addr (str): the address in format "ip:port" of
          triton inference server
        session_id (int): the identical id of a session
Lyu Han's avatar
Lyu Han committed
32
33
        cap (str): the capability of a model. For example, codellama has
            the ability among ['completion', 'infill', 'instruct', 'python']
34
        stream_output (bool): indicator for streaming output or not
Lyu Han's avatar
Lyu Han committed
35
        **kwargs (dict): other arguments for initializing model's chat template
lvhan028's avatar
lvhan028 committed
36
    """
37
    log_level = os.environ.get('SERVICE_LOG_LEVEL', 'WARNING')
38
    kwargs.update(capability=cap)
39
40
    chatbot = Chatbot(tritonserver_addr,
                      log_level=log_level,
Lyu Han's avatar
Lyu Han committed
41
42
                      display=stream_output,
                      **kwargs)
lvhan028's avatar
lvhan028 committed
43
44
    nth_round = 1
    while True:
Lyu Han's avatar
Lyu Han committed
45
        prompt = input_prompt(chatbot.model_name)
lvhan028's avatar
lvhan028 committed
46
47
48
49
50
51
        if prompt == 'exit':
            exit(0)
        elif prompt == 'end':
            chatbot.end(session_id)
        else:
            request_id = f'{session_id}-{nth_round}'
52
53
54
55
56
57
58
59
60
61
62
63
64
            if stream_output:
                for status, res, n_token in chatbot.stream_infer(
                        session_id,
                        prompt,
                        request_id=request_id,
                        request_output_len=512):
                    continue
            else:
                status, res, n_token = chatbot.infer(session_id,
                                                     prompt,
                                                     request_id=request_id,
                                                     request_output_len=512)
                print(res)
lvhan028's avatar
lvhan028 committed
65
66
67
68
69
        nth_round += 1


if __name__ == '__main__':
    fire.Fire(main)