async_llm_server.py 8.46 KB
Newer Older
1
2
import asyncio
import time
3
from typing import Dict, List, Optional
4

5
from cacheflow.logger import init_logger
6
7
from cacheflow.outputs import RequestOutput
from cacheflow.sampling_params import SamplingParams
8
from cacheflow.server.arg_utils import AsyncServerArgs
9
from cacheflow.server.llm_server import LLMServer
10
11
12
from cacheflow.server.ray_utils import ray, initialize_cluster

logger = init_logger(__name__)
13
14
15
16

TIMEOUT_TO_PREVENT_DEADLOCK = 1 # seconds


Zhuohan Li's avatar
Zhuohan Li committed
17
class AsyncLLMServer:
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    """An asynchronous wrapper for LLMServer.

    This class is used to wrap the LLMServer class to make it asynchronous. It
    uses asyncio to create a background loop that keeps processing incoming
    requests. The LLMServer is kicked by the generate method when there
    are requests in the waiting queue. The generate method yields the outputs
    from the LLMServer to the caller.

    NOTE: For the comprehensive list of arguments, see `LLMServer`.

    Args:
        worker_use_ray: Whether to use Ray for model workers. Required for
            distributed execution. Should be the same as
            `parallel_config.worker_use_ray`.
        server_use_ray: Whether to make LLMServer a Ray actor. If so, the
            async frontend will be executed in a separate process as the
            model workers.
        *args, *kwargs: Arguments for LLMServer.
    """
37
38
39
40
41
42
43
44
    def __init__(self, worker_use_ray: bool, server_use_ray: bool,
                 *args, **kwargs) -> None:
        self.worker_use_ray = worker_use_ray
        self.server_use_ray = server_use_ray
        if not self.server_use_ray:
            server_class = LLMServer
        elif self.worker_use_ray:
            server_class = ray.remote(num_cpus=0)(LLMServer).remote
45
        else:
46
47
            server_class = ray.remote(num_gpus=1)(LLMServer).remote
        self.server = server_class(*args, **kwargs)
48
49
50
51
52
        # Request id -> request output.
        self.request_outputs: Dict[str, RequestOutput] = {}
        # Request id -> event to notify that there is new output.
        self.request_events: Dict[str, asyncio.Event] = {}
        self.is_server_running = False
53
        self.kicking_request_id: Optional[str] = None
54

55
    async def server_step(self, kicking_request_id: Optional[str] = None):
56
        """Kick the server to process the waiting requests."""
57
        self.is_server_running = True
58
59
60
61
62
63
64
65
66
        self.kicking_request_id = kicking_request_id
        if self.server_use_ray:
            request_outputs = await self.server.step.remote()
        else:
            # Yield to the event loop to allow other coroutines to run
            # while is_server_running is True. This let the server to add new
            # requests into the queue.
            await asyncio.sleep(0)
            request_outputs = self.server.step()
67
        self.is_server_running = False
68
69
        self.kicking_request_id = None

70
71
72
73
74
75
        # Notify the waiting coroutines that there are new outputs ready.
        for request_output in request_outputs:
            request_id = request_output.request_id
            self.request_outputs[request_id] = request_output
            self.request_events[request_id].set()

76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    async def generate(
        self,
        prompt: Optional[str],
        sampling_params: SamplingParams,
        request_id: str,
        prompt_token_ids: Optional[List[int]] = None
    ) -> RequestOutput:
        """Generate outputs for a request.

        Generate outputs for a request. This method is a coroutine. It adds the
        request into the waiting queue of the LLMServer and streams the outputs
        from the LLMServer to the caller.

        Args:
            prompt: The prompt string. Can be None if prompt_token_ids is
                provided.
            sampling_params: The sampling parameters of the request.
            request_id: The unique id of the request.
            prompt_token_ids: The token IDs of the prompt. If None, we
                use the tokenizer to convert the prompts to token IDs.

        Yields:
            The output `RequestOutput` objects from the LLMServer for the
            request.
        """
101
102
103
104
105
106
107
108
        # Preprocess the request.
        arrival_time = time.time()

        # Create an event to notify us that there is new output from the
        # cacheflow server.
        request_event = asyncio.Event()
        self.request_events[request_id] = request_event

109
110
        logger.info(f"Received request {request_id}: "
                    f"prompt: {prompt!r}, "
111
112
                    f"sampling params: {sampling_params}, "
                    f"prompt token ids: {prompt_token_ids}.")
113

114
        # Add the request into the cacheflow server's waiting queue.
115
116
        if self.server_use_ray:
            await self.server.add_request.remote(
117
118
119
                request_id, prompt, sampling_params,
                prompt_token_ids=prompt_token_ids,
                arrival_time=arrival_time)
120
121
        else:
            self.server.add_request(
122
123
124
                request_id, prompt, sampling_params,
                prompt_token_ids=prompt_token_ids,
                arrival_time=arrival_time)
125
126
127
128
129

        # The cacheflow server does not have a background loop that keeps
        # processing incoming requests. Therefore, we need to keep kicking
        # the server to process the requests.
        while True:
130
131
132
133
            if request_id not in self.request_events:
                # The request has been aborted.
                return

134
135
            # Kick the server if the server is not running.
            if not self.is_server_running:
136
                await self.server_step(request_id)
137
138
139
140
141
142
143
144
145
146
147
148
149
150

            # Wait for new output. The group_event will be set in server_step
            # when there is new output available for the sequence group.
            # Added a timeout to prevent deadlock.
            try:
                await asyncio.wait_for(request_event.wait(),
                                       timeout=TIMEOUT_TO_PREVENT_DEADLOCK)
            except asyncio.TimeoutError:
                continue
            # Reset the event to wait for the next output.
            request_event.clear()

            # Decode and return new outputs.
            request_output = self.request_outputs[request_id]
Zhuohan Li's avatar
Zhuohan Li committed
151
            yield request_output
152
153

            # Once finished, release the resources of the sequence group.
Zhuohan Li's avatar
Zhuohan Li committed
154
            if request_output.finished():
155
156
                logger.info(f"Finished request {request_id}.")

157
158
159
160
161
162
163
164
165
                del self.request_outputs[request_id]
                del self.request_events[request_id]
                # Kick the server if the server is not running. This is to
                # prevent that there are still requests in server's waiting
                # queue to be executed.
                if not self.is_server_running:
                    await self.server_step()
                break

166
    async def abort(self, request_id: str) -> None:
167
168
169
170
171
172
173
174
        """Abort a request.

        Abort a submitted request. If the request is finished or not found,
        this method will be a no-op.

        Args:
            request_id: The unique id of the request.
        """
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
        if request_id not in self.request_events:
            # The request has already finished or been aborted.
            return

        logger.info(f"Aborted request {request_id}.")

        if self.server_use_ray:
            await self.server.abort_request.remote(request_id)
        else:
            self.server.abort_request(request_id)

        if request_id in self.request_events:
            del self.request_events[request_id]
        if request_id in self.request_outputs:
            del self.request_outputs[request_id]

        # To prevent deadlock when a request is aborted while the server is
        # running.
        if self.kicking_request_id == request_id:
            self.is_server_running = False
            self.kicking_request_id = None

Zhuohan Li's avatar
Zhuohan Li committed
197
    @classmethod
198
    def from_server_args(cls, server_args: AsyncServerArgs) -> "AsyncLLMServer":
199
        """Creates an async LLM server from the server arguments."""
Zhuohan Li's avatar
Zhuohan Li committed
200
201
202
203
        # Create the server configs.
        server_configs = server_args.create_server_configs()
        parallel_config = server_configs[2]
        # Initialize the cluster.
204
205
        distributed_init_method, devices = initialize_cluster(
            parallel_config, server_args.server_use_ray)
Zhuohan Li's avatar
Zhuohan Li committed
206
        # Create the LLM server.
207
208
209
        server = cls(server_args.worker_use_ray,
                     server_args.server_use_ray,
                     *server_configs,
Zhuohan Li's avatar
Zhuohan Li committed
210
211
212
                     distributed_init_method, devices,
                     log_stats=not server_args.disable_log_stats)
        return server