api_server.py 3.05 KB
Newer Older
Zhuohan Li's avatar
Zhuohan Li committed
1
2
3
4
import argparse
import json
from typing import AsyncGenerator

5
from fastapi import BackgroundTasks, FastAPI, Request
6
from fastapi.responses import JSONResponse, Response, StreamingResponse
Zhuohan Li's avatar
Zhuohan Li committed
7
8
import uvicorn

Woosuk Kwon's avatar
Woosuk Kwon committed
9
10
11
12
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.sampling_params import SamplingParams
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
13

14
15
TIMEOUT_KEEP_ALIVE = 5  # seconds.
TIMEOUT_TO_PREVENT_DEADLOCK = 1  # seconds.
Zhuohan Li's avatar
Zhuohan Li committed
16
17
18
19
app = FastAPI()


@app.post("/generate")
20
async def generate(request: Request) -> Response:
21
    """Generate completion for the request.
22
23
24

    The request should be a JSON object with the following fields:
    - prompt: the prompt to use for the generation.
25
    - stream: whether to stream the results or not.
26
27
    - other fields: the sampling parameters (See `SamplingParams` for details).
    """
Zhuohan Li's avatar
Zhuohan Li committed
28
29
    request_dict = await request.json()
    prompt = request_dict.pop("prompt")
30
    stream = request_dict.pop("stream", False)
Zhuohan Li's avatar
Zhuohan Li committed
31
    sampling_params = SamplingParams(**request_dict)
32
    request_id = random_uuid()
33
34
35
36

    if not engine.is_running:
        engine.start_background_loop()

Zhuohan Li's avatar
Zhuohan Li committed
37
    results_generator = engine.generate(prompt, sampling_params, request_id)
Zhuohan Li's avatar
Zhuohan Li committed
38

39
    # Streaming case
Zhuohan Li's avatar
Zhuohan Li committed
40
41
42
43
    async def stream_results() -> AsyncGenerator[bytes, None]:
        async for request_output in results_generator:
            prompt = request_output.prompt
            text_outputs = [
44
                prompt + output.text for output in request_output.outputs
Zhuohan Li's avatar
Zhuohan Li committed
45
            ]
46
            ret = {"text": text_outputs}
Zhuohan Li's avatar
Zhuohan Li committed
47
48
            yield (json.dumps(ret) + "\0").encode("utf-8")

49
    async def abort_request() -> None:
Zhuohan Li's avatar
Zhuohan Li committed
50
        await engine.abort(request_id)
51

52
53
54
55
56
57
58
59
60
61
62
    if stream:
        background_tasks = BackgroundTasks()
        # Abort the request if the client disconnects.
        background_tasks.add_task(abort_request)
        return StreamingResponse(stream_results(), background=background_tasks)

    # Non-streaming case
    final_output = None
    async for request_output in results_generator:
        if await request.is_disconnected():
            # Abort the request if the client disconnects.
Zhuohan Li's avatar
Zhuohan Li committed
63
            await engine.abort(request_id)
64
65
66
67
68
            return Response(status_code=499)
        final_output = request_output

    assert final_output is not None
    prompt = final_output.prompt
69
    text_outputs = [prompt + output.text for output in final_output.outputs]
70
    ret = {"text": text_outputs}
71
    return JSONResponse(ret)
Zhuohan Li's avatar
Zhuohan Li committed
72
73
74
75
76


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--host", type=str, default="localhost")
77
    parser.add_argument("--port", type=int, default=8000)
Zhuohan Li's avatar
Zhuohan Li committed
78
    parser = AsyncEngineArgs.add_cli_args(parser)
Zhuohan Li's avatar
Zhuohan Li committed
79
80
    args = parser.parse_args()

Zhuohan Li's avatar
Zhuohan Li committed
81
    engine_args = AsyncEngineArgs.from_cli_args(args)
82
83
    engine = AsyncLLMEngine.from_engine_args(engine_args,
                                             start_engine_loop=False)
Zhuohan Li's avatar
Zhuohan Li committed
84

85
86
87
88
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="debug",
89
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE)