server.py 3.24 KB
Newer Older
Woosuk Kwon's avatar
Woosuk Kwon committed
1
2
3
import argparse
from typing import List

Woosuk Kwon's avatar
Woosuk Kwon committed
4
from cacheflow.master.frontend import Frontend
Woosuk Kwon's avatar
Woosuk Kwon committed
5
from cacheflow.master.scheduler import Scheduler
6
from cacheflow.models import get_memory_analyzer
Woosuk Kwon's avatar
Woosuk Kwon committed
7
8
9
10
11
12
from cacheflow.worker.controller import Controller

parser = argparse.ArgumentParser(description='CacheFlow server')
parser.add_argument('--model', type=str, default='facebook/opt-125m', help='model name')
parser.add_argument('--num-nodes', type=int, default=1, help='number of nodes')
parser.add_argument('--num-workers', type=int, default=1, help='number of workers per node')
13
14
15
parser.add_argument('--block-size', type=int, default=8, choices=[8, 16], help='token block size')
# NOTE(woosuk): If FlashAttention is used, the float data type is not supported.
parser.add_argument('--dtype', type=str, default='half', choices=['half', 'float'], help='data type')
16
17
# TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
parser.add_argument('--seed', type=int, default=0, help='random seed')
Woosuk Kwon's avatar
Woosuk Kwon committed
18
19
parser.add_argument('--swap-space', type=int, default=20, help='CPU swap space size (GiB) per GPU')
parser.add_argument('--max-batch-size', type=int, default=2560, help='maximum number of batched tokens')
Woosuk Kwon's avatar
Woosuk Kwon committed
20
args = parser.parse_args()
Woosuk Kwon's avatar
Woosuk Kwon committed
21
22
23


def main():
24
25
26
27
28
29
30
    memory_analyzer = get_memory_analyzer(
        model_name=args.model,
        block_size=args.block_size,
        dtype=args.dtype,
    )
    num_gpu_blocks = memory_analyzer.get_max_num_gpu_blocks(
        max_num_batched_tokens=args.max_batch_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
31
32
    num_cpu_blocks = memory_analyzer.get_max_num_cpu_blocks(
        swap_space=args.swap_space)
33
34
    print(f'# GPU blocks: {num_gpu_blocks}, # CPU blocks: {num_cpu_blocks}')

Woosuk Kwon's avatar
Woosuk Kwon committed
35
    # Create a controller for each node.
Woosuk Kwon's avatar
Woosuk Kwon committed
36
37
38
39
40
41
42
    controllers: List[Controller] = []
    for i in range(args.num_nodes):
        controller = Controller(
            node_id=i,
            num_workers=args.num_workers,
            model_name=args.model,
            block_size=args.block_size,
43
44
            num_gpu_blocks=num_gpu_blocks,
            num_cpu_blocks=num_cpu_blocks,
45
            dtype=args.dtype,
46
            seed=args.seed,
Woosuk Kwon's avatar
Woosuk Kwon committed
47
48
49
        )
        controllers.append(controller)

Woosuk Kwon's avatar
Woosuk Kwon committed
50
51
52
53
54
55
    # Create a frontend.
    frontend = Frontend(
        model_name=args.model,
        block_size=args.block_size,
    )

Woosuk Kwon's avatar
Woosuk Kwon committed
56
57
    # Create a scheduler.
    scheduler = Scheduler(
Woosuk Kwon's avatar
Woosuk Kwon committed
58
        frontend=frontend,
Woosuk Kwon's avatar
Woosuk Kwon committed
59
60
        controllers=controllers,
        block_size=args.block_size,
61
62
63
        num_gpu_blocks=num_gpu_blocks,
        num_cpu_blocks=num_cpu_blocks,
        max_num_batched_tokens=args.max_batch_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
64
65
66
67
68
69
    )
    # Connect the controllers.
    for i in range(len(controllers) - 1):
        controllers[i].set_next(controllers[i + 1])
    controllers[-1].set_next(scheduler)

70
    # Test the following inputs.
Woosuk Kwon's avatar
Woosuk Kwon committed
71
    test_inputs = [
72
73
74
        ('Ion Stoica is a', {'n': 4, 'use_beam_search': True, 'temperature': 0.0}),
        ('UC Berkeley is', {'n': 3, 'temperature': 0.8, 'top_p': 0.99}),
        ('The future of cloud computing is', {}),   # Use default parameters.
Woosuk Kwon's avatar
Woosuk Kwon committed
75
76
    ]
    while True:
Woosuk Kwon's avatar
Woosuk Kwon committed
77
        if test_inputs:
78
79
            text, sampling_params = test_inputs.pop(0)
            frontend.query(text, **sampling_params)
Woosuk Kwon's avatar
Woosuk Kwon committed
80
        scheduler.step()
81
        if not (scheduler.pending or scheduler.running or test_inputs):
Woosuk Kwon's avatar
Woosuk Kwon committed
82
            break
Woosuk Kwon's avatar
Woosuk Kwon committed
83
84
85
86


if __name__ == '__main__':
    main()