simple_server.py 1.52 KB
Newer Older
1
2
import argparse

3
from cacheflow import ServerArgs, LLMServer, SamplingParams
4
5
6


def main(args: argparse.Namespace):
7
8
9
    # Parse the CLI argument and initialize the server.
    server_args = ServerArgs.from_cli_args(args)
    server = LLMServer.from_server_args(server_args)
10
11
12
13
14
15
16

    # Test the following prompts.
    test_prompts = [
        ("A robot may not injure a human being", SamplingParams()),
        ("To be or not to be,",
         SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2)),
        ("What is the meaning of life?",
17
         SamplingParams(n=2, best_of=5, temperature=0.8, top_p=0.95, frequency_penalty=0.1)),
18
        ("It is only with the heart that one can see rightly",
19
         SamplingParams(n=3, best_of=3, use_beam_search=True, temperature=0.0)),
20
21
    ]

Zhuohan Li's avatar
Zhuohan Li committed
22
23
    request_id = 0

24
25
26
27
28
    # Run the server.
    while True:
        # To test iteration-level scheduling, we add one request at each step.
        if test_prompts:
            prompt, sampling_params = test_prompts.pop(0)
Zhuohan Li's avatar
Zhuohan Li committed
29
30
            server.add_request(str(request_id), prompt, sampling_params)
            request_id += 1
31
32
33

        request_outputs = server.step()
        for request_output in request_outputs:
Zhuohan Li's avatar
Zhuohan Li committed
34
            if request_output.finished():
35
36
37
38
39
40
41
42
                print(request_output)

        if not (server.has_unfinished_requests() or test_prompts):
            break


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Simple CacheFlow server.')
43
    parser = ServerArgs.add_cli_args(parser)
44
45
    args = parser.parse_args()
    main(args)