"vllm/engine/async_llm_engine.py" did not exist on "e86717833da1216222cf0d490c2e3ba198610b13"
async_llm_engine.py 16.1 KB
Newer Older
1
2
import asyncio
import time
Antoni Baum's avatar
Antoni Baum committed
3
from functools import partial
4
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union
5

6
from vllm.config import ModelConfig
Woosuk Kwon's avatar
Woosuk Kwon committed
7
8
9
10
11
12
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.llm_engine import LLMEngine
from vllm.engine.ray_utils import initialize_cluster, ray
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
13
14

logger = init_logger(__name__)
15

Antoni Baum's avatar
Antoni Baum committed
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
class AsyncEngineDeadError(RuntimeError):
    pass


def _raise_exception_on_finish(task: asyncio.Task,
                               request_tracker: "RequestTracker") -> None:
    msg = ("Task finished unexpectedly. This should never happen! "
           "Please open an issue on Github.")
    try:
        try:
            task.result()
        except asyncio.CancelledError:
            return
        except Exception as exc:
            raise AsyncEngineDeadError(
                msg + " See stack trace above for the actual cause.") from exc
        raise AsyncEngineDeadError(msg)
    except Exception as exc:
        request_tracker.propagate_exception(exc)
        raise exc


Antoni Baum's avatar
Antoni Baum committed
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
class AsyncStream:
    """A stream of RequestOutputs for a request that can be
    iterated over asynchronously."""

    def __init__(self, request_id: str) -> None:
        self.request_id = request_id
        self._queue = asyncio.Queue()
        self._finished = False

    def put(self, item: RequestOutput) -> None:
        if self._finished:
            return
        self._queue.put_nowait(item)

    def finish(self) -> None:
        self._queue.put_nowait(StopIteration)
        self._finished = True

    @property
    def finished(self) -> bool:
        return self._finished

    def __aiter__(self):
        return self

    async def __anext__(self) -> RequestOutput:
        result = await self._queue.get()
        if result is StopIteration:
            raise StopAsyncIteration
68
69
        elif isinstance(result, Exception):
            raise result
Antoni Baum's avatar
Antoni Baum committed
70
71
72
        return result


73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
class RequestTracker:
    """Synchronous abstraction for tracking requests."""

    def __init__(self) -> None:
        self._request_streams: Dict[str, AsyncStream] = {}
        self._finished_requests: asyncio.Queue[str] = asyncio.Queue()
        self._new_requests: asyncio.Queue[Tuple[AsyncStream,
                                                dict]] = asyncio.Queue()

    def __contains__(self, item):
        return item in self._request_streams

    def propagate_exception(self, exc: Exception) -> None:
        """Propagate an exception to all request streams."""
        for stream in self._request_streams.values():
            stream.put(exc)

    def process_request_output(self,
                               request_output: RequestOutput,
                               *,
                               verbose: bool = False) -> None:
        """Process a request output from the engine."""
        request_id = request_output.request_id

        self._request_streams[request_id].put(request_output)
        if request_output.finished:
            if verbose:
                logger.info(f"Finished request {request_id}.")
            self.abort_request(request_id)

    def add_request(self, request_id: str,
                    **engine_add_request_kwargs) -> AsyncStream:
        """Add a request to be sent to the engine on the next background
        loop iteration."""
        if request_id in self._request_streams:
            raise KeyError(f"Request {request_id} already exists.")

        stream = AsyncStream(request_id)
        self._new_requests.put_nowait((stream, {
            "request_id": request_id,
            **engine_add_request_kwargs
        }))
        return stream

    def abort_request(self, request_id: str, *, verbose: bool = False) -> None:
        """Abort a request during next background loop iteration."""
        if verbose:
            logger.info(f"Aborted request {request_id}.")

        self._finished_requests.put_nowait(request_id)

        if request_id not in self._request_streams or self._request_streams[
                request_id].finished:
            # The request has already finished or been aborted.
            return

        self._request_streams[request_id].finish()

    def get_new_and_finished_requests(self) -> Tuple[List[dict], Set[str]]:
        """Get the new requests and finished requests to be
        sent to the engine."""
        new_requests: List[dict] = []
        finished_requests: Set[str] = set()

        while not self._finished_requests.empty():
            request_id = self._finished_requests.get_nowait()
            finished_requests.add(request_id)
            self._request_streams.pop(request_id, None)

        while not self._new_requests.empty():
            stream, new_request = self._new_requests.get_nowait()
            if stream.request_id in finished_requests:
                # The request has already been aborted.
                stream.finish()
                continue
            self._request_streams[stream.request_id] = stream
            new_requests.append(new_request)

        return new_requests, finished_requests
Antoni Baum's avatar
Antoni Baum committed
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180


class _AsyncLLMEngine(LLMEngine):
    """Extension of LLMEngine to add async methods."""

    async def step_async(self) -> List[RequestOutput]:
        """Performs one decoding iteration and returns newly generated results.
        The workers are ran asynchronously if possible.

        This function performs one decoding iteration of the engine. It first
        schedules the sequences to be executed in the next iteration and the
        token blocks to be swapped in/out/copy. Then, it executes the model
        and updates the scheduler with the model outputs. Finally, it decodes
        the sequences and returns the newly generated results.
        """
        (seq_group_metadata_list, scheduler_outputs,
         early_return) = self._schedule()
        if early_return is not None:
            return early_return

        # Execute the model.
        output = await self._run_workers_async(
            "execute_model",
            seq_group_metadata_list=seq_group_metadata_list,
            blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in,
            blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out,
            blocks_to_copy=scheduler_outputs.blocks_to_copy,
        )

Wen Sun's avatar
Wen Sun committed
181
        return self._process_model_outputs(output, scheduler_outputs)
Antoni Baum's avatar
Antoni Baum committed
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211

    async def _run_workers_async(
        self,
        method: str,
        *args,
        get_all_outputs: bool = False,
        **kwargs,
    ) -> Any:
        """Runs the given method on all workers."""
        all_outputs = []
        for worker in self.workers:
            if self.parallel_config.worker_use_ray:
                executor = partial(worker.execute_method.remote, method)
            else:
                executor = getattr(worker, method)

            output = executor(*args, **kwargs)
            all_outputs.append(output)

        if self.parallel_config.worker_use_ray:
            all_outputs = await asyncio.gather(*all_outputs)

        if get_all_outputs:
            return all_outputs

        # Make sure all workers have the same results.
        output = all_outputs[0]
        for other_output in all_outputs[1:]:
            assert output == other_output
        return output
212
213


214
215
class AsyncLLMEngine:
    """An asynchronous wrapper for LLMEngine.
216

217
    This class is used to wrap the LLMEngine class to make it asynchronous. It
218
    uses asyncio to create a background loop that keeps processing incoming
219
    requests. The LLMEngine is kicked by the generate method when there
220
    are requests in the waiting queue. The generate method yields the outputs
221
    from the LLMEngine to the caller.
222

223
    NOTE: For the comprehensive list of arguments, see `LLMEngine`.
224
225
226
227
228

    Args:
        worker_use_ray: Whether to use Ray for model workers. Required for
            distributed execution. Should be the same as
            `parallel_config.worker_use_ray`.
Zhuohan Li's avatar
Zhuohan Li committed
229
        engine_use_ray: Whether to make LLMEngine a Ray actor. If so, the
230
231
            async frontend will be executed in a separate process as the
            model workers.
232
        log_requests: Whether to log the requests.
233
        *args, *kwargs: Arguments for LLMEngine.
234
    """
235

Antoni Baum's avatar
Antoni Baum committed
236
237
    _engine_class: Type[_AsyncLLMEngine] = _AsyncLLMEngine

238
239
240
241
242
    def __init__(self,
                 worker_use_ray: bool,
                 engine_use_ray: bool,
                 *args,
                 log_requests: bool = True,
Antoni Baum's avatar
Antoni Baum committed
243
                 start_engine_loop: bool = False,
244
                 **kwargs) -> None:
245
        self.worker_use_ray = worker_use_ray
Zhuohan Li's avatar
Zhuohan Li committed
246
        self.engine_use_ray = engine_use_ray
247
        self.log_requests = log_requests
Antoni Baum's avatar
Antoni Baum committed
248
249
        self.engine = self._init_engine(*args, **kwargs)

250
        self.request_tracker: RequestTracker = RequestTracker()
Antoni Baum's avatar
Antoni Baum committed
251
252
        self.background_loop = None
        if start_engine_loop:
253
            self.start_background_loop()
Antoni Baum's avatar
Antoni Baum committed
254

255
256
    @property
    def is_running(self) -> bool:
257
258
        return (self.background_loop is not None
                and not self.background_loop.done())
259
260

    def start_background_loop(self) -> None:
Antoni Baum's avatar
Antoni Baum committed
261
        """Start the background loop."""
262
        if self.is_running:
Antoni Baum's avatar
Antoni Baum committed
263
264
265
            raise RuntimeError("Background loop is already running.")
        self.background_loop = asyncio.get_event_loop().create_task(
            self.run_engine_loop())
266
267
268
        self.background_loop.add_done_callback(
            partial(_raise_exception_on_finish,
                    request_tracker=self.request_tracker))
Antoni Baum's avatar
Antoni Baum committed
269
270
271

    def _init_engine(self, *args,
                     **kwargs) -> Union[_AsyncLLMEngine, "ray.ObjectRef"]:
Zhuohan Li's avatar
Zhuohan Li committed
272
        if not self.engine_use_ray:
Antoni Baum's avatar
Antoni Baum committed
273
            engine_class = self._engine_class
274
        elif self.worker_use_ray:
Antoni Baum's avatar
Antoni Baum committed
275
            engine_class = ray.remote(num_cpus=0)(self._engine_class).remote
276
        else:
Antoni Baum's avatar
Antoni Baum committed
277
278
279
280
            engine_class = ray.remote(num_gpus=1)(self._engine_class).remote
        return engine_class(*args, **kwargs)

    async def engine_step(self):
Zhuohan Li's avatar
Zhuohan Li committed
281
        """Kick the engine to process the waiting requests."""
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

        new_requests, finished_requests = (
            self.request_tracker.get_new_and_finished_requests())

        for new_request in new_requests:
            # Add the request into the vLLM engine's waiting queue.
            # TODO: Maybe add add_request_batch to reduce Ray overhead
            if self.engine_use_ray:
                await self.engine.add_request.remote(**new_request)
            else:
                self.engine.add_request(**new_request)

        if finished_requests:
            await self._engine_abort(finished_requests)

Zhuohan Li's avatar
Zhuohan Li committed
297
298
        if self.engine_use_ray:
            request_outputs = await self.engine.step.remote()
299
        else:
Antoni Baum's avatar
Antoni Baum committed
300
            request_outputs = await self.engine.step_async()
301

Antoni Baum's avatar
Antoni Baum committed
302
        # Put the outputs into the corresponding streams.
303
        for request_output in request_outputs:
304
305
            self.request_tracker.process_request_output(
                request_output, verbose=self.log_requests)
Antoni Baum's avatar
Antoni Baum committed
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331

    async def _engine_abort(self, request_ids: Iterable[str]):
        if self.engine_use_ray:
            await self.engine.abort_request.remote(request_ids)
        else:
            self.engine.abort_request(request_ids)

    async def run_engine_loop(self):
        while True:
            await self.engine_step()
            await asyncio.sleep(0)

    async def add_request(
        self,
        request_id: str,
        prompt: Optional[str],
        sampling_params: SamplingParams,
        prompt_token_ids: Optional[List[int]] = None,
        arrival_time: Optional[float] = None,
    ) -> AsyncStream:
        if self.log_requests:
            logger.info(f"Received request {request_id}: "
                        f"prompt: {prompt!r}, "
                        f"sampling params: {sampling_params}, "
                        f"prompt token ids: {prompt_token_ids}.")

332
333
334
335
336
337
        if not self.is_running:
            raise AsyncEngineDeadError(
                "Background loop is not running. If it was running, "
                "inspect the output to find the stacktrace of the "
                "error that caused the background loop to stop "
                "(AsyncEngineDeadError).")
Antoni Baum's avatar
Antoni Baum committed
338

339
340
341
342
343
344
        stream = self.request_tracker.add_request(
            request_id,
            prompt=prompt,
            sampling_params=sampling_params,
            prompt_token_ids=prompt_token_ids,
            arrival_time=arrival_time)
Antoni Baum's avatar
Antoni Baum committed
345
346

        return stream
347

348
    async def generate(
349
350
351
352
353
            self,
            prompt: Optional[str],
            sampling_params: SamplingParams,
            request_id: str,
            prompt_token_ids: Optional[List[int]] = None) -> RequestOutput:
354
355
356
        """Generate outputs for a request.

        Generate outputs for a request. This method is a coroutine. It adds the
357
358
        request into the waiting queue of the LLMEngine and streams the outputs
        from the LLMEngine to the caller.
359
360
361
362
363
364
365
366
367
368

        Args:
            prompt: The prompt string. Can be None if prompt_token_ids is
                provided.
            sampling_params: The sampling parameters of the request.
            request_id: The unique id of the request.
            prompt_token_ids: The token IDs of the prompt. If None, we
                use the tokenizer to convert the prompts to token IDs.

        Yields:
369
            The output `RequestOutput` objects from the LLMEngine for the
370
371
            request.
        """
372
373
374
        # Preprocess the request.
        arrival_time = time.time()

Antoni Baum's avatar
Antoni Baum committed
375
376
377
378
379
380
        try:
            stream = await self.add_request(request_id,
                                            prompt,
                                            sampling_params,
                                            prompt_token_ids=prompt_token_ids,
                                            arrival_time=arrival_time)
381

Antoni Baum's avatar
Antoni Baum committed
382
383
384
385
386
387
            async for request_output in stream:
                yield request_output
        except Exception as e:
            # If there is an exception, abort the request.
            self._abort(request_id)
            raise e
388

Antoni Baum's avatar
Antoni Baum committed
389
390
    async def abort(self, request_id: str) -> None:
        """Abort a request.
391

Antoni Baum's avatar
Antoni Baum committed
392
393
        Abort a submitted request. If the request is finished or not found,
        this method will be a no-op.
394

Antoni Baum's avatar
Antoni Baum committed
395
396
397
        Args:
            request_id: The unique id of the request.
        """
398
399
400
401
402
403
404
        if not self.is_running:
            raise AsyncEngineDeadError(
                "Background loop is not running. If it was running, "
                "inspect the output to find the stacktrace of the "
                "error that caused the background loop to stop "
                "(AsyncEngineDeadError).")

Antoni Baum's avatar
Antoni Baum committed
405
        return self._abort(request_id)
406

Antoni Baum's avatar
Antoni Baum committed
407
    def _abort(self, request_id: str) -> None:
408
409
410
411
412
413
414
415
        """Abort a request.

        Abort a submitted request. If the request is finished or not found,
        this method will be a no-op.

        Args:
            request_id: The unique id of the request.
        """
416
417
        self.request_tracker.abort_request(request_id,
                                           verbose=self.log_requests)
418

419
420
421
422
423
424
425
    async def get_model_config(self) -> ModelConfig:
        """Get the model configuration of the vLLM engine."""
        if self.engine_use_ray:
            return await self.engine.get_model_config.remote()
        else:
            return self.engine.get_model_config()

Zhuohan Li's avatar
Zhuohan Li committed
426
    @classmethod
427
    def from_engine_args(cls,
428
429
                         engine_args: AsyncEngineArgs,
                         start_engine_loop: bool = False) -> "AsyncLLMEngine":
Zhuohan Li's avatar
Zhuohan Li committed
430
431
432
433
        """Creates an async LLM engine from the engine arguments."""
        # Create the engine configs.
        engine_configs = engine_args.create_engine_configs()
        parallel_config = engine_configs[2]
Zhuohan Li's avatar
Zhuohan Li committed
434
        # Initialize the cluster.
435
        distributed_init_method, placement_group = initialize_cluster(
Zhuohan Li's avatar
Zhuohan Li committed
436
437
438
439
440
            parallel_config, engine_args.engine_use_ray)
        # Create the async LLM engine.
        engine = cls(engine_args.worker_use_ray,
                     engine_args.engine_use_ray,
                     *engine_configs,
441
                     distributed_init_method,
442
                     placement_group,
443
                     log_requests=not engine_args.disable_log_requests,
444
445
                     log_stats=not engine_args.disable_log_stats,
                     start_engine_loop=start_engine_loop)
Zhuohan Li's avatar
Zhuohan Li committed
446
        return engine