api_server.py 26.8 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/serve/openai_api_server.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4

import argparse
5
import asyncio
Zhuohan Li's avatar
Zhuohan Li committed
6
7
import json
import time
8
9
from http import HTTPStatus
from typing import AsyncGenerator, Dict, List, Optional, Tuple, Union
Zhuohan Li's avatar
Zhuohan Li committed
10
11

import fastapi
12
import uvicorn
13
from fastapi import Request
Zhuohan Li's avatar
Zhuohan Li committed
14
15
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
16
from fastapi.responses import JSONResponse, StreamingResponse, Response
17
from packaging import version
Zhuohan Li's avatar
Zhuohan Li committed
18

Woosuk Kwon's avatar
Woosuk Kwon committed
19
20
21
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.entrypoints.openai.protocol import (
Zhuohan Li's avatar
Zhuohan Li committed
22
    CompletionRequest, CompletionResponse, CompletionResponseChoice,
23
    CompletionResponseStreamChoice, CompletionStreamResponse,
24
25
26
27
    ChatCompletionRequest, ChatCompletionResponse,
    ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
    LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo)
Woosuk Kwon's avatar
Woosuk Kwon committed
28
29
30
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
31
from vllm.transformers_utils.tokenizer import get_tokenizer
Woosuk Kwon's avatar
Woosuk Kwon committed
32
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
33

34
try:
35
    import fastchat
36
37
38
39
40
41
    from fastchat.conversation import Conversation, SeparatorStyle
    from fastchat.model.model_adapter import get_conversation_template
    _fastchat_available = True
except ImportError:
    _fastchat_available = False

42
TIMEOUT_KEEP_ALIVE = 5  # seconds
Zhuohan Li's avatar
Zhuohan Li committed
43
44
45
46

logger = init_logger(__name__)
served_model = None
app = fastapi.FastAPI()
47
engine = None
Zhuohan Li's avatar
Zhuohan Li committed
48
49
50
51


def create_error_response(status_code: HTTPStatus,
                          message: str) -> JSONResponse:
52
53
54
    return JSONResponse(ErrorResponse(message=message,
                                      type="invalid_request_error").dict(),
                        status_code=status_code.value)
Zhuohan Li's avatar
Zhuohan Li committed
55
56
57


@app.exception_handler(RequestValidationError)
58
async def validation_exception_handler(_, exc):
Zhuohan Li's avatar
Zhuohan Li committed
59
60
61
62
63
64
65
66
67
68
69
70
71
    return create_error_response(HTTPStatus.BAD_REQUEST, str(exc))


async def check_model(request) -> Optional[JSONResponse]:
    if request.model == served_model:
        return
    ret = create_error_response(
        HTTPStatus.NOT_FOUND,
        f"The model `{request.model}` does not exist.",
    )
    return ret


72
async def get_gen_prompt(request) -> str:
73
74
75
76
77
    if not _fastchat_available:
        raise ModuleNotFoundError(
            "fastchat is not installed. Please install fastchat to use "
            "the chat completion and conversation APIs: `$ pip install fschat`"
        )
78
79
80
81
82
    if version.parse(fastchat.__version__) < version.parse("0.2.23"):
        raise ImportError(
            f"fastchat version is low. Current version: {fastchat.__version__} "
            "Please upgrade fastchat to use: `$ pip install -U fschat`")

83
    conv = get_conversation_template(request.model)
84
85
    conv = Conversation(
        name=conv.name,
86
87
        system_template=conv.system_template,
        system_message=conv.system_message,
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
        roles=conv.roles,
        messages=list(conv.messages),  # prevent in-place modification
        offset=conv.offset,
        sep_style=SeparatorStyle(conv.sep_style),
        sep=conv.sep,
        sep2=conv.sep2,
        stop_str=conv.stop_str,
        stop_token_ids=conv.stop_token_ids,
    )

    if isinstance(request.messages, str):
        prompt = request.messages
    else:
        for message in request.messages:
            msg_role = message["role"]
            if msg_role == "system":
104
                conv.system_message = message["content"]
105
106
107
108
109
110
111
112
113
114
115
116
117
118
            elif msg_role == "user":
                conv.append_message(conv.roles[0], message["content"])
            elif msg_role == "assistant":
                conv.append_message(conv.roles[1], message["content"])
            else:
                raise ValueError(f"Unknown role: {msg_role}")

        # Add a blank message for the assistant.
        conv.append_message(conv.roles[1], None)
        prompt = conv.get_prompt()

    return prompt


119
120
121
122
123
124
125
126
async def check_length(
    request: Union[ChatCompletionRequest, CompletionRequest],
    prompt: Optional[str] = None,
    prompt_ids: Optional[List[int]] = None
) -> Tuple[List[int], Optional[JSONResponse]]:
    assert (not (prompt is None and prompt_ids is None)
            and not (prompt is not None and prompt_ids is not None)
            ), "Either prompt or prompt_ids should be provided."
127
128
    input_ids = prompt_ids if prompt_ids is not None else tokenizer(
        prompt).input_ids
129
130
    token_num = len(input_ids)

131
132
    if request.max_tokens is None:
        request.max_tokens = max_model_len - token_num
133
    if token_num + request.max_tokens > max_model_len:
134
        return input_ids, create_error_response(
135
            HTTPStatus.BAD_REQUEST,
136
            f"This model's maximum context length is {max_model_len} tokens. "
137
138
139
140
141
142
            f"However, you requested {request.max_tokens + token_num} tokens "
            f"({token_num} in the messages, "
            f"{request.max_tokens} in the completion). "
            f"Please reduce the length of the messages or completion.",
        )
    else:
143
        return input_ids, None
144
145


146
147
148
149
150
151
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Zhuohan Li's avatar
Zhuohan Li committed
152
153
154
@app.get("/v1/models")
async def show_available_models():
    """Show available models. Right now we only have one model."""
155
156
157
158
159
    model_cards = [
        ModelCard(id=served_model,
                  root=served_model,
                  permission=[ModelPermission()])
    ]
Zhuohan Li's avatar
Zhuohan Li committed
160
161
162
    return ModelList(data=model_cards)


163
164
165
166
167
168
def create_logprobs(
    token_ids: List[int],
    top_logprobs: Optional[List[Optional[Dict[int, float]]]] = None,
    num_output_top_logprobs: Optional[int] = None,
    initial_text_offset: int = 0,
) -> LogProbs:
Zhuohan Li's avatar
Zhuohan Li committed
169
170
171
    """Create OpenAI-style logprobs."""
    logprobs = LogProbs()
    last_token_len = 0
172
173
174
175
176
177
178
179
    if num_output_top_logprobs:
        logprobs.top_logprobs = []
    for i, token_id in enumerate(token_ids):
        step_top_logprobs = top_logprobs[i]
        if step_top_logprobs is not None:
            token_logprob = step_top_logprobs[token_id]
        else:
            token_logprob = None
Zhuohan Li's avatar
Zhuohan Li committed
180
181
        token = tokenizer.convert_ids_to_tokens(token_id)
        logprobs.tokens.append(token)
182
        logprobs.token_logprobs.append(token_logprob)
Zhuohan Li's avatar
Zhuohan Li committed
183
184
185
        if len(logprobs.text_offset) == 0:
            logprobs.text_offset.append(initial_text_offset)
        else:
186
187
            logprobs.text_offset.append(logprobs.text_offset[-1] +
                                        last_token_len)
Zhuohan Li's avatar
Zhuohan Li committed
188
189
        last_token_len = len(token)

190
191
192
193
194
        if num_output_top_logprobs:
            logprobs.top_logprobs.append({
                tokenizer.convert_ids_to_tokens(i): p
                for i, p in step_top_logprobs.items()
            } if step_top_logprobs else None)
Zhuohan Li's avatar
Zhuohan Li committed
195
196
197
    return logprobs


198
@app.post("/v1/chat/completions")
199
200
async def create_chat_completion(request: ChatCompletionRequest,
                                 raw_request: Request):
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    """Completion API similar to OpenAI's API.

    See  https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI ChatCompletion API.

    NOTE: Currently we do not support the following features:
        - function_call (Users should implement this by themselves)
        - logit_bias (to be supported by vLLM engine)
    """
    logger.info(f"Received chat completion request: {request}")

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

216
    if request.logit_bias is not None and len(request.logit_bias) > 0:
217
218
219
220
221
        # TODO: support logit_bias in vLLM engine.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    prompt = await get_gen_prompt(request)
222
    token_ids, error_check_ret = await check_length(request, prompt=prompt)
223
224
225
226
227
    if error_check_ret is not None:
        return error_check_ret

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
228
    created_time = int(time.monotonic())
229
    try:
230
        spaces_between_special_tokens = request.spaces_between_special_tokens
231
232
233
234
235
236
237
        sampling_params = SamplingParams(
            n=request.n,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
            temperature=request.temperature,
            top_p=request.top_p,
            stop=request.stop,
238
            stop_token_ids=request.stop_token_ids,
239
240
241
242
243
            max_tokens=request.max_tokens,
            best_of=request.best_of,
            top_k=request.top_k,
            ignore_eos=request.ignore_eos,
            use_beam_search=request.use_beam_search,
244
            skip_special_tokens=request.skip_special_tokens,
245
            spaces_between_special_tokens=spaces_between_special_tokens,
246
247
248
249
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

250
251
    result_generator = engine.generate(prompt, sampling_params, request_id,
                                       token_ids)
252

253
254
255
256
    def create_stream_response_json(
        index: int,
        text: str,
        finish_reason: Optional[str] = None,
257
        usage: Optional[UsageInfo] = None,
258
    ) -> str:
259
260
261
262
263
264
265
266
267
268
269
        choice_data = ChatCompletionResponseStreamChoice(
            index=index,
            delta=DeltaMessage(content=text),
            finish_reason=finish_reason,
        )
        response = ChatCompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
270
271
272
273
        if usage is not None:
            response.usage = usage
        # exclude unset to leave details out of each sse
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
274
275
276
277
278
279
280
281
282
283
284

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        # First chunk with role
        for i in range(request.n):
            choice_data = ChatCompletionResponseStreamChoice(
                index=i,
                delta=DeltaMessage(role="assistant"),
                finish_reason=None,
            )
285
286
287
288
289
            chunk = ChatCompletionStreamResponse(id=request_id,
                                                 choices=[choice_data],
                                                 model=model_name)
            data = chunk.json(exclude_unset=True, ensure_ascii=False)
            yield f"data: {data}\n\n"
290
291
292
293
294
295
296
297
298

        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
                previous_texts[i] = output.text
299
300
                completion_tokens = len(output.token_ids)
                previous_num_tokens[i] = completion_tokens
301
302
303
304
305
306
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
307
308
309
310
311
312
                    prompt_tokens = len(res.prompt_token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
313
314
315
316
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        finish_reason=output.finish_reason,
317
                        usage=final_usage,
318
319
                    )
                    yield f"data: {response_json}\n\n"
320
        yield "data: [DONE]\n\n"
321
322
323
324

    # Streaming response
    if request.stream:
        return StreamingResponse(completion_stream_generator(),
325
                                 media_type="text/event-stream")
326
327
328
329
330
331

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
332
            await engine.abort(request_id)
333
334
335
336
337
338
339
340
341
342
343
344
345
346
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
        final_res = res
    assert final_res is not None
    choices = []
    for output in final_res.outputs:
        choice_data = ChatCompletionResponseChoice(
            index=output.index,
            message=ChatMessage(role="assistant", content=output.text),
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
347
348
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = ChatCompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
366

367
368
369
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
370

371
372
373
374
375
376
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


Zhuohan Li's avatar
Zhuohan Li committed
377
@app.post("/v1/completions")
378
async def create_completion(request: CompletionRequest, raw_request: Request):
379
380
381
382
383
384
385
386
    """Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/completions/create
    for the API specification. This API mimics the OpenAI Completion API.

    NOTE: Currently we do not support the following features:
        - suffix (the language models we currently support do not support
          suffix)
Woosuk Kwon's avatar
Woosuk Kwon committed
387
        - logit_bias (to be supported by vLLM engine)
388
    """
Zhuohan Li's avatar
Zhuohan Li committed
389
390
391
392
393
394
    logger.info(f"Received completion request: {request}")

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

395
396
    # OpenAI API supports echoing the prompt when max_tokens is 0.
    echo_without_generation = request.echo and request.max_tokens == 0
Zhuohan Li's avatar
Zhuohan Li committed
397
398
399
400

    if request.suffix is not None:
        # The language models we currently support do not support suffix.
        return create_error_response(HTTPStatus.BAD_REQUEST,
401
                                     "suffix is not currently supported")
Zhuohan Li's avatar
Zhuohan Li committed
402

403
    if request.logit_bias is not None and len(request.logit_bias) > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
404
        # TODO: support logit_bias in vLLM engine.
Zhuohan Li's avatar
Zhuohan Li committed
405
406
407
408
409
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
410
411

    use_token_ids = False
412
    if isinstance(request.prompt, list):
413
414
415
        if len(request.prompt) == 0:
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "please provide at least one prompt")
416
417
418
419
420
421
422
423
424
425
426
427
        first_element = request.prompt[0]
        if isinstance(first_element, int):
            use_token_ids = True
            prompt = request.prompt
        elif isinstance(first_element, (str, list)):
            # TODO: handles multiple prompt case in list[list[int]]
            if len(request.prompt) > 1:
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "multiple prompts in a batch is not currently supported")
            use_token_ids = not isinstance(first_element, str)
            prompt = request.prompt[0]
428
429
    else:
        prompt = request.prompt
430

431
432
433
434
    if use_token_ids:
        _, error_check_ret = await check_length(request, prompt_ids=prompt)
    else:
        token_ids, error_check_ret = await check_length(request, prompt=prompt)
435
436
437
    if error_check_ret is not None:
        return error_check_ret

438
    created_time = int(time.monotonic())
Zhuohan Li's avatar
Zhuohan Li committed
439
    try:
440
        spaces_between_special_tokens = request.spaces_between_special_tokens
Zhuohan Li's avatar
Zhuohan Li committed
441
442
443
444
445
446
447
448
449
        sampling_params = SamplingParams(
            n=request.n,
            best_of=request.best_of,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
            stop=request.stop,
450
            stop_token_ids=request.stop_token_ids,
Zhuohan Li's avatar
Zhuohan Li committed
451
            ignore_eos=request.ignore_eos,
452
453
            max_tokens=request.max_tokens
            if not echo_without_generation else 1,
Zhuohan Li's avatar
Zhuohan Li committed
454
455
            logprobs=request.logprobs,
            use_beam_search=request.use_beam_search,
456
            prompt_logprobs=request.logprobs if request.echo else None,
457
            skip_special_tokens=request.skip_special_tokens,
458
            spaces_between_special_tokens=spaces_between_special_tokens,
Zhuohan Li's avatar
Zhuohan Li committed
459
460
461
462
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

463
464
465
466
467
468
469
470
    if use_token_ids:
        result_generator = engine.generate(None,
                                           sampling_params,
                                           request_id,
                                           prompt_token_ids=prompt)
    else:
        result_generator = engine.generate(prompt, sampling_params, request_id,
                                           token_ids)
Zhuohan Li's avatar
Zhuohan Li committed
471
472
473

    # Similar to the OpenAI API, when n != best_of, we do not stream the
    # results. In addition, we do not stream the results when use beam search.
474
475
476
    stream = (request.stream
              and (request.best_of is None or request.n == request.best_of)
              and not request.use_beam_search)
Zhuohan Li's avatar
Zhuohan Li committed
477

478
479
480
481
482
    def create_stream_response_json(
        index: int,
        text: str,
        logprobs: Optional[LogProbs] = None,
        finish_reason: Optional[str] = None,
483
        usage: Optional[UsageInfo] = None,
484
    ) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
485
486
487
488
489
490
491
492
493
494
495
496
        choice_data = CompletionResponseStreamChoice(
            index=index,
            text=text,
            logprobs=logprobs,
            finish_reason=finish_reason,
        )
        response = CompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
497
498
499
        if usage is not None:
            response.usage = usage
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
Zhuohan Li's avatar
Zhuohan Li committed
500
501
502
503
504
505

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
506
        has_echoed = [False] * request.n
Zhuohan Li's avatar
Zhuohan Li committed
507
508
509
510
511
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
512
513
514
515
516
517
518
519
520
521
522
523
524
                token_ids = output.token_ids[previous_num_tokens[i]:]
                top_logprobs = output.logprobs[previous_num_tokens[i]:]
                offsets = len(previous_texts[i])
                if request.echo and not has_echoed[i]:
                    if not echo_without_generation:
                        delta_text = res.prompt + delta_text
                        token_ids = res.prompt_token_ids + token_ids
                        top_logprobs = res.prompt_logprobs + top_logprobs
                    else:
                        delta_text = res.prompt
                        token_ids = res.prompt_token_ids
                        top_logprobs = res.prompt_logprobs
                    has_echoed[i] = True
Zhuohan Li's avatar
Zhuohan Li committed
525
526
                if request.logprobs is not None:
                    logprobs = create_logprobs(
527
528
529
530
531
                        token_ids=token_ids,
                        top_logprobs=top_logprobs,
                        num_output_top_logprobs=request.logprobs,
                        initial_text_offset=offsets,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
532
533
534
535
                else:
                    logprobs = None
                previous_texts[i] = output.text
                previous_num_tokens[i] = len(output.token_ids)
536
                finish_reason = output.finish_reason
Zhuohan Li's avatar
Zhuohan Li committed
537
538
539
540
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                    logprobs=logprobs,
541
                    finish_reason=finish_reason,
Zhuohan Li's avatar
Zhuohan Li committed
542
543
544
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
545
546
                    logprobs = (LogProbs()
                                if request.logprobs is not None else None)
547
548
549
550
551
552
553
                    prompt_tokens = len(res.prompt_token_ids)
                    completion_tokens = len(output.token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
554
555
556
557
558
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        logprobs=logprobs,
                        finish_reason=output.finish_reason,
559
                        usage=final_usage,
Zhuohan Li's avatar
Zhuohan Li committed
560
561
                    )
                    yield f"data: {response_json}\n\n"
562
        yield "data: [DONE]\n\n"
Zhuohan Li's avatar
Zhuohan Li committed
563
564
565
566

    # Streaming response
    if stream:
        return StreamingResponse(completion_stream_generator(),
567
                                 media_type="text/event-stream")
Zhuohan Li's avatar
Zhuohan Li committed
568
569
570
571

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
572
573
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
574
            await engine.abort(request_id)
575
576
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
Zhuohan Li's avatar
Zhuohan Li committed
577
578
579
        final_res = res
    assert final_res is not None
    choices = []
580
581
582
    prompt_token_ids = final_res.prompt_token_ids
    prompt_logprobs = final_res.prompt_logprobs
    prompt_text = final_res.prompt
Zhuohan Li's avatar
Zhuohan Li committed
583
584
    for output in final_res.outputs:
        if request.logprobs is not None:
585
586
587
588
589
590
591
592
593
594
595
596
597
598
            if not echo_without_generation:
                token_ids = output.token_ids
                top_logprobs = output.logprobs
                if request.echo:
                    token_ids = prompt_token_ids + token_ids
                    top_logprobs = prompt_logprobs + top_logprobs
            else:
                token_ids = prompt_token_ids
                top_logprobs = prompt_logprobs
            logprobs = create_logprobs(
                token_ids=token_ids,
                top_logprobs=top_logprobs,
                num_output_top_logprobs=request.logprobs,
            )
Zhuohan Li's avatar
Zhuohan Li committed
599
600
        else:
            logprobs = None
601
602
603
604
605
606
        if not echo_without_generation:
            output_text = output.text
            if request.echo:
                output_text = prompt_text + output_text
        else:
            output_text = prompt_text
Zhuohan Li's avatar
Zhuohan Li committed
607
608
        choice_data = CompletionResponseChoice(
            index=output.index,
609
            text=output_text,
Zhuohan Li's avatar
Zhuohan Li committed
610
611
612
613
614
615
            logprobs=logprobs,
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
616
617
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
Zhuohan Li's avatar
Zhuohan Li committed
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = CompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
635

Zhuohan Li's avatar
Zhuohan Li committed
636
637
638
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
639

Zhuohan Li's avatar
Zhuohan Li committed
640
641
642
643
644
645
646
647
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
648
        description="vLLM OpenAI-Compatible RESTful API server.")
649
    parser.add_argument("--host", type=str, default=None, help="host name")
Zhuohan Li's avatar
Zhuohan Li committed
650
    parser.add_argument("--port", type=int, default=8000, help="port number")
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
    parser.add_argument("--allow-credentials",
                        action="store_true",
                        help="allow credentials")
    parser.add_argument("--allowed-origins",
                        type=json.loads,
                        default=["*"],
                        help="allowed origins")
    parser.add_argument("--allowed-methods",
                        type=json.loads,
                        default=["*"],
                        help="allowed methods")
    parser.add_argument("--allowed-headers",
                        type=json.loads,
                        default=["*"],
                        help="allowed headers")
666
667
668
669
670
671
    parser.add_argument("--served-model-name",
                        type=str,
                        default=None,
                        help="The model name used in the API. If not "
                        "specified, the model name will be the same as "
                        "the huggingface name.")
672

Zhuohan Li's avatar
Zhuohan Li committed
673
    parser = AsyncEngineArgs.add_cli_args(parser)
Zhuohan Li's avatar
Zhuohan Li committed
674
675
676
677
678
679
680
681
682
683
684
685
    args = parser.parse_args()

    app.add_middleware(
        CORSMiddleware,
        allow_origins=args.allowed_origins,
        allow_credentials=args.allow_credentials,
        allow_methods=args.allowed_methods,
        allow_headers=args.allowed_headers,
    )

    logger.info(f"args: {args}")

686
687
688
689
690
    if args.served_model_name is not None:
        served_model = args.served_model_name
    else:
        served_model = args.model

Zhuohan Li's avatar
Zhuohan Li committed
691
    engine_args = AsyncEngineArgs.from_cli_args(args)
692
    engine = AsyncLLMEngine.from_engine_args(engine_args)
693
    engine_model_config = asyncio.run(engine.get_model_config())
694
    max_model_len = engine_model_config.max_model_len
Zhuohan Li's avatar
Zhuohan Li committed
695
696

    # A separate tokenizer to map token IDs to strings.
697
698
699
700
    tokenizer = get_tokenizer(
        engine_model_config.tokenizer,
        tokenizer_mode=engine_model_config.tokenizer_mode,
        trust_remote_code=engine_model_config.trust_remote_code)
Zhuohan Li's avatar
Zhuohan Li committed
701

702
703
704
705
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="info",
706
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE)