api_server.py 29.4 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/serve/openai_api_server.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4

import argparse
5
import asyncio
6
import codecs
Zhuohan Li's avatar
Zhuohan Li committed
7
8
import json
import time
9
10
from http import HTTPStatus
from typing import AsyncGenerator, Dict, List, Optional, Tuple, Union
Zhuohan Li's avatar
Zhuohan Li committed
11

12
13
from aioprometheus import MetricsMiddleware
from aioprometheus.asgi.starlette import metrics
Zhuohan Li's avatar
Zhuohan Li committed
14
import fastapi
15
import uvicorn
16
from fastapi import Request
Zhuohan Li's avatar
Zhuohan Li committed
17
18
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
19
from fastapi.responses import JSONResponse, StreamingResponse, Response
Zhuohan Li's avatar
Zhuohan Li committed
20

Woosuk Kwon's avatar
Woosuk Kwon committed
21
22
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
23
from vllm.engine.metrics import add_global_metrics_labels
Woosuk Kwon's avatar
Woosuk Kwon committed
24
from vllm.entrypoints.openai.protocol import (
Zhuohan Li's avatar
Zhuohan Li committed
25
    CompletionRequest, CompletionResponse, CompletionResponseChoice,
26
    CompletionResponseStreamChoice, CompletionStreamResponse,
27
28
29
30
    ChatCompletionRequest, ChatCompletionResponse,
    ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
    LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo)
Woosuk Kwon's avatar
Woosuk Kwon committed
31
32
33
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
34
from vllm.transformers_utils.tokenizer import get_tokenizer
Woosuk Kwon's avatar
Woosuk Kwon committed
35
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
36

37
TIMEOUT_KEEP_ALIVE = 5  # seconds
Zhuohan Li's avatar
Zhuohan Li committed
38
39
40
41

logger = init_logger(__name__)
served_model = None
app = fastapi.FastAPI()
42
engine = None
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
response_role = None


def parse_args():
    parser = argparse.ArgumentParser(
        description="vLLM OpenAI-Compatible RESTful API server.")
    parser.add_argument("--host", type=str, default=None, help="host name")
    parser.add_argument("--port", type=int, default=8000, help="port number")
    parser.add_argument("--allow-credentials",
                        action="store_true",
                        help="allow credentials")
    parser.add_argument("--allowed-origins",
                        type=json.loads,
                        default=["*"],
                        help="allowed origins")
    parser.add_argument("--allowed-methods",
                        type=json.loads,
                        default=["*"],
                        help="allowed methods")
    parser.add_argument("--allowed-headers",
                        type=json.loads,
                        default=["*"],
                        help="allowed headers")
    parser.add_argument("--served-model-name",
                        type=str,
                        default=None,
                        help="The model name used in the API. If not "
                        "specified, the model name will be the same as "
                        "the huggingface name.")
    parser.add_argument("--chat-template",
                        type=str,
                        default=None,
                        help="The file path to the chat template, "
                        "or the template in single-line form "
                        "for the specified model")
    parser.add_argument("--response-role",
                        type=str,
                        default="assistant",
                        help="The role name to return if "
                        "`request.add_generation_prompt=true`.")

    parser = AsyncEngineArgs.add_cli_args(parser)
    return parser.parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
86
87


88
89
90
91
app.add_middleware(MetricsMiddleware)  # Trace HTTP server metrics
app.add_route("/metrics", metrics)  # Exposes HTTP metrics


Zhuohan Li's avatar
Zhuohan Li committed
92
93
def create_error_response(status_code: HTTPStatus,
                          message: str) -> JSONResponse:
94
95
96
    return JSONResponse(ErrorResponse(message=message,
                                      type="invalid_request_error").dict(),
                        status_code=status_code.value)
Zhuohan Li's avatar
Zhuohan Li committed
97
98


99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def load_chat_template(args, tokenizer):
    if args.chat_template is not None:
        try:
            with open(args.chat_template, "r") as f:
                chat_template = f.read()
        except OSError:
            # If opening a file fails, set chat template to be args to
            # ensure we decode so our escape are interpreted correctly
            chat_template = codecs.decode(args.chat_template, "unicode_escape")

        tokenizer.chat_template = chat_template
        logger.info(
            f"Using supplied chat template:\n{tokenizer.chat_template}")
    elif tokenizer.chat_template is not None:
        logger.info(f"Using default chat template:\n{tokenizer.chat_template}")
    else:
        logger.warning("No chat template provided. Chat API will not work.")


Zhuohan Li's avatar
Zhuohan Li committed
118
@app.exception_handler(RequestValidationError)
119
async def validation_exception_handler(_, exc):
Zhuohan Li's avatar
Zhuohan Li committed
120
121
122
123
124
125
126
127
128
129
130
131
132
    return create_error_response(HTTPStatus.BAD_REQUEST, str(exc))


async def check_model(request) -> Optional[JSONResponse]:
    if request.model == served_model:
        return
    ret = create_error_response(
        HTTPStatus.NOT_FOUND,
        f"The model `{request.model}` does not exist.",
    )
    return ret


133
134
135
136
137
138
139
140
async def check_length(
    request: Union[ChatCompletionRequest, CompletionRequest],
    prompt: Optional[str] = None,
    prompt_ids: Optional[List[int]] = None
) -> Tuple[List[int], Optional[JSONResponse]]:
    assert (not (prompt is None and prompt_ids is None)
            and not (prompt is not None and prompt_ids is not None)
            ), "Either prompt or prompt_ids should be provided."
141
142
    input_ids = prompt_ids if prompt_ids is not None else tokenizer(
        prompt).input_ids
143
144
    token_num = len(input_ids)

145
146
    if request.max_tokens is None:
        request.max_tokens = max_model_len - token_num
147
    if token_num + request.max_tokens > max_model_len:
148
        return input_ids, create_error_response(
149
            HTTPStatus.BAD_REQUEST,
150
            f"This model's maximum context length is {max_model_len} tokens. "
151
152
153
154
155
156
            f"However, you requested {request.max_tokens + token_num} tokens "
            f"({token_num} in the messages, "
            f"{request.max_tokens} in the completion). "
            f"Please reduce the length of the messages or completion.",
        )
    else:
157
        return input_ids, None
158
159


160
161
162
163
164
165
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Zhuohan Li's avatar
Zhuohan Li committed
166
167
168
@app.get("/v1/models")
async def show_available_models():
    """Show available models. Right now we only have one model."""
169
170
171
172
173
    model_cards = [
        ModelCard(id=served_model,
                  root=served_model,
                  permission=[ModelPermission()])
    ]
Zhuohan Li's avatar
Zhuohan Li committed
174
175
176
    return ModelList(data=model_cards)


177
178
179
180
181
182
def create_logprobs(
    token_ids: List[int],
    top_logprobs: Optional[List[Optional[Dict[int, float]]]] = None,
    num_output_top_logprobs: Optional[int] = None,
    initial_text_offset: int = 0,
) -> LogProbs:
Zhuohan Li's avatar
Zhuohan Li committed
183
184
185
    """Create OpenAI-style logprobs."""
    logprobs = LogProbs()
    last_token_len = 0
186
187
188
189
190
191
192
193
    if num_output_top_logprobs:
        logprobs.top_logprobs = []
    for i, token_id in enumerate(token_ids):
        step_top_logprobs = top_logprobs[i]
        if step_top_logprobs is not None:
            token_logprob = step_top_logprobs[token_id]
        else:
            token_logprob = None
Zhuohan Li's avatar
Zhuohan Li committed
194
195
        token = tokenizer.convert_ids_to_tokens(token_id)
        logprobs.tokens.append(token)
196
        logprobs.token_logprobs.append(token_logprob)
Zhuohan Li's avatar
Zhuohan Li committed
197
198
199
        if len(logprobs.text_offset) == 0:
            logprobs.text_offset.append(initial_text_offset)
        else:
200
201
            logprobs.text_offset.append(logprobs.text_offset[-1] +
                                        last_token_len)
Zhuohan Li's avatar
Zhuohan Li committed
202
203
        last_token_len = len(token)

204
205
206
207
208
        if num_output_top_logprobs:
            logprobs.top_logprobs.append({
                tokenizer.convert_ids_to_tokens(i): p
                for i, p in step_top_logprobs.items()
            } if step_top_logprobs else None)
Zhuohan Li's avatar
Zhuohan Li committed
209
210
211
    return logprobs


212
@app.post("/v1/chat/completions")
213
214
async def create_chat_completion(request: ChatCompletionRequest,
                                 raw_request: Request):
215
216
217
218
219
220
221
222
223
224
225
226
227
    """Completion API similar to OpenAI's API.

    See  https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI ChatCompletion API.

    NOTE: Currently we do not support the following features:
        - function_call (Users should implement this by themselves)
        - logit_bias (to be supported by vLLM engine)
    """
    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

228
    if request.logit_bias is not None and len(request.logit_bias) > 0:
229
230
231
232
        # TODO: support logit_bias in vLLM engine.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

233
234
235
236
237
238
239
240
241
    try:
        prompt = tokenizer.apply_chat_template(
            conversation=request.messages,
            tokenize=False,
            add_generation_prompt=request.add_generation_prompt)
    except Exception as e:
        logger.error(f"Error in applying chat template from request: {str(e)}")
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

242
    token_ids, error_check_ret = await check_length(request, prompt=prompt)
243
244
245
246
247
    if error_check_ret is not None:
        return error_check_ret

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
248
    created_time = int(time.monotonic())
249
    chunk_object_type = "chat.completion.chunk"
250
    try:
251
        spaces_between_special_tokens = request.spaces_between_special_tokens
252
253
254
255
        sampling_params = SamplingParams(
            n=request.n,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
Roy's avatar
Roy committed
256
            repetition_penalty=request.repetition_penalty,
257
258
            temperature=request.temperature,
            top_p=request.top_p,
Roy's avatar
Roy committed
259
            min_p=request.min_p,
260
            stop=request.stop,
261
            stop_token_ids=request.stop_token_ids,
262
263
264
265
266
            max_tokens=request.max_tokens,
            best_of=request.best_of,
            top_k=request.top_k,
            ignore_eos=request.ignore_eos,
            use_beam_search=request.use_beam_search,
267
            skip_special_tokens=request.skip_special_tokens,
268
            spaces_between_special_tokens=spaces_between_special_tokens,
269
270
271
272
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

273
274
    result_generator = engine.generate(prompt, sampling_params, request_id,
                                       token_ids)
275

276
277
278
279
280
    def get_role() -> str:
        if request.add_generation_prompt:
            return response_role
        else:
            return request.messages[-1]["role"]
281
282

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
283
284
        # Send first response for each request.n (index) with the role
        role = get_role()
285
286
        for i in range(request.n):
            choice_data = ChatCompletionResponseStreamChoice(
287
                index=i, delta=DeltaMessage(role=role), finish_reason=None)
288
            chunk = ChatCompletionStreamResponse(id=request_id,
289
290
                                                 object=chunk_object_type,
                                                 created=created_time,
291
292
293
294
                                                 choices=[choice_data],
                                                 model=model_name)
            data = chunk.json(exclude_unset=True, ensure_ascii=False)
            yield f"data: {data}\n\n"
295

296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
        # Send response to echo the input portion of the last message
        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]
            if last_msg_content:
                for i in range(request.n):
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=last_msg_content),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"

        # Send response for each token for each request.n (index)
320
321
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
322
        finish_reason_sent = [False] * request.n
323
324
325
326
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
327
328
329
330
331
332
333
334

                if finish_reason_sent[i]:
                    continue

                if output.finish_reason is None:
                    # Send token-by-token response for each request.n
                    delta_text = output.text[len(previous_texts[i]):]
                    previous_texts[i] = output.text
335
                    previous_num_tokens[i] = len(output.token_ids)
336
337
338
339
340
341
342
343
344
345
346
347
348
349
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=delta_text),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"
                else:
                    # Send the finish response for each request.n only once
350
351
352
                    prompt_tokens = len(res.prompt_token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
353
354
                        completion_tokens=previous_num_tokens[i],
                        total_tokens=prompt_tokens + previous_num_tokens[i],
355
                    )
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i, delta=[], finish_reason=output.finish_reason)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    if final_usage is not None:
                        chunk.usage = final_usage
                    data = chunk.json(exclude_unset=True,
                                      exclude_none=True,
                                      ensure_ascii=False)
                    yield f"data: {data}\n\n"
                    finish_reason_sent[i] = True
        # Send the final done message after all response.n are finished
372
        yield "data: [DONE]\n\n"
373

374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
    async def completion_full_generator():
        final_res: RequestOutput = None
        async for res in result_generator:
            if await raw_request.is_disconnected():
                # Abort the request if the client disconnects.
                await engine.abort(request_id)
                return create_error_response(HTTPStatus.BAD_REQUEST,
                                             "Client disconnected")
            final_res = res
        assert final_res is not None

        choices = []
        role = get_role()
        for output in final_res.outputs:
            choice_data = ChatCompletionResponseChoice(
                index=output.index,
                message=ChatMessage(role=role, content=output.text),
                finish_reason=output.finish_reason,
            )
            choices.append(choice_data)

        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]

            for choice in choices:
                full_message = last_msg_content + choice.message.content
                choice.message.content = full_message

        num_prompt_tokens = len(final_res.prompt_token_ids)
        num_generated_tokens = sum(
            len(output.token_ids) for output in final_res.outputs)
        usage = UsageInfo(
            prompt_tokens=num_prompt_tokens,
            completion_tokens=num_generated_tokens,
            total_tokens=num_prompt_tokens + num_generated_tokens,
        )
        response = ChatCompletionResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=choices,
            usage=usage,
421
422
        )

423
        return response
424

425
    # Streaming response
426
    if request.stream:
427
        return StreamingResponse(completion_stream_generator(),
428
                                 media_type="text/event-stream")
429
430
    else:
        return await completion_full_generator()
431
432


Zhuohan Li's avatar
Zhuohan Li committed
433
@app.post("/v1/completions")
434
async def create_completion(request: CompletionRequest, raw_request: Request):
435
436
437
438
439
440
441
442
    """Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/completions/create
    for the API specification. This API mimics the OpenAI Completion API.

    NOTE: Currently we do not support the following features:
        - suffix (the language models we currently support do not support
          suffix)
Woosuk Kwon's avatar
Woosuk Kwon committed
443
        - logit_bias (to be supported by vLLM engine)
444
    """
Zhuohan Li's avatar
Zhuohan Li committed
445
446
447
448
449

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

450
451
    # OpenAI API supports echoing the prompt when max_tokens is 0.
    echo_without_generation = request.echo and request.max_tokens == 0
Zhuohan Li's avatar
Zhuohan Li committed
452
453
454
455

    if request.suffix is not None:
        # The language models we currently support do not support suffix.
        return create_error_response(HTTPStatus.BAD_REQUEST,
456
                                     "suffix is not currently supported")
Zhuohan Li's avatar
Zhuohan Li committed
457

458
    if request.logit_bias is not None and len(request.logit_bias) > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
459
        # TODO: support logit_bias in vLLM engine.
Zhuohan Li's avatar
Zhuohan Li committed
460
461
462
463
464
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
465
466

    use_token_ids = False
467
    if isinstance(request.prompt, list):
468
469
470
        if len(request.prompt) == 0:
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "please provide at least one prompt")
471
472
473
474
475
476
477
478
479
480
481
482
        first_element = request.prompt[0]
        if isinstance(first_element, int):
            use_token_ids = True
            prompt = request.prompt
        elif isinstance(first_element, (str, list)):
            # TODO: handles multiple prompt case in list[list[int]]
            if len(request.prompt) > 1:
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "multiple prompts in a batch is not currently supported")
            use_token_ids = not isinstance(first_element, str)
            prompt = request.prompt[0]
483
484
    else:
        prompt = request.prompt
485

486
487
488
489
    if use_token_ids:
        _, error_check_ret = await check_length(request, prompt_ids=prompt)
    else:
        token_ids, error_check_ret = await check_length(request, prompt=prompt)
490
491
492
    if error_check_ret is not None:
        return error_check_ret

493
    created_time = int(time.monotonic())
Zhuohan Li's avatar
Zhuohan Li committed
494
    try:
495
        spaces_between_special_tokens = request.spaces_between_special_tokens
Zhuohan Li's avatar
Zhuohan Li committed
496
497
498
499
500
        sampling_params = SamplingParams(
            n=request.n,
            best_of=request.best_of,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
Roy's avatar
Roy committed
501
            repetition_penalty=request.repetition_penalty,
Zhuohan Li's avatar
Zhuohan Li committed
502
503
504
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
Roy's avatar
Roy committed
505
            min_p=request.min_p,
Zhuohan Li's avatar
Zhuohan Li committed
506
            stop=request.stop,
507
            stop_token_ids=request.stop_token_ids,
Zhuohan Li's avatar
Zhuohan Li committed
508
            ignore_eos=request.ignore_eos,
509
510
            max_tokens=request.max_tokens
            if not echo_without_generation else 1,
Zhuohan Li's avatar
Zhuohan Li committed
511
512
            logprobs=request.logprobs,
            use_beam_search=request.use_beam_search,
513
            prompt_logprobs=request.logprobs if request.echo else None,
514
            skip_special_tokens=request.skip_special_tokens,
515
            spaces_between_special_tokens=spaces_between_special_tokens,
Zhuohan Li's avatar
Zhuohan Li committed
516
517
518
519
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

520
521
522
523
524
525
526
527
    if use_token_ids:
        result_generator = engine.generate(None,
                                           sampling_params,
                                           request_id,
                                           prompt_token_ids=prompt)
    else:
        result_generator = engine.generate(prompt, sampling_params, request_id,
                                           token_ids)
Zhuohan Li's avatar
Zhuohan Li committed
528
529
530

    # Similar to the OpenAI API, when n != best_of, we do not stream the
    # results. In addition, we do not stream the results when use beam search.
531
532
533
    stream = (request.stream
              and (request.best_of is None or request.n == request.best_of)
              and not request.use_beam_search)
Zhuohan Li's avatar
Zhuohan Li committed
534

535
536
537
538
539
    def create_stream_response_json(
        index: int,
        text: str,
        logprobs: Optional[LogProbs] = None,
        finish_reason: Optional[str] = None,
540
        usage: Optional[UsageInfo] = None,
541
    ) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
542
543
544
545
546
547
548
549
550
551
552
553
        choice_data = CompletionResponseStreamChoice(
            index=index,
            text=text,
            logprobs=logprobs,
            finish_reason=finish_reason,
        )
        response = CompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
554
555
556
        if usage is not None:
            response.usage = usage
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
Zhuohan Li's avatar
Zhuohan Li committed
557
558
559
560
561
562

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
563
        has_echoed = [False] * request.n
Zhuohan Li's avatar
Zhuohan Li committed
564
565
566
567
568
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
569
                token_ids = output.token_ids[previous_num_tokens[i]:]
570
571
572
573
                if request.logprobs is not None:
                    top_logprobs = output.logprobs[previous_num_tokens[i]:]
                else:
                    top_logprobs = None
574
575
576
577
578
                offsets = len(previous_texts[i])
                if request.echo and not has_echoed[i]:
                    if not echo_without_generation:
                        delta_text = res.prompt + delta_text
                        token_ids = res.prompt_token_ids + token_ids
579
580
581
                        if top_logprobs:
                            top_logprobs = res.prompt_logprobs + top_logprobs
                    else:  # only just return the prompt
582
583
                        delta_text = res.prompt
                        token_ids = res.prompt_token_ids
584
585
                        if top_logprobs:
                            top_logprobs = res.prompt_logprobs
586
                    has_echoed[i] = True
Zhuohan Li's avatar
Zhuohan Li committed
587
588
                if request.logprobs is not None:
                    logprobs = create_logprobs(
589
590
591
592
593
                        token_ids=token_ids,
                        top_logprobs=top_logprobs,
                        num_output_top_logprobs=request.logprobs,
                        initial_text_offset=offsets,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
594
595
596
597
                else:
                    logprobs = None
                previous_texts[i] = output.text
                previous_num_tokens[i] = len(output.token_ids)
598
                finish_reason = output.finish_reason
Zhuohan Li's avatar
Zhuohan Li committed
599
600
601
602
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                    logprobs=logprobs,
603
                    finish_reason=finish_reason,
Zhuohan Li's avatar
Zhuohan Li committed
604
605
606
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
607
608
                    logprobs = (LogProbs()
                                if request.logprobs is not None else None)
609
610
611
612
613
614
615
                    prompt_tokens = len(res.prompt_token_ids)
                    completion_tokens = len(output.token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
616
617
618
619
620
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        logprobs=logprobs,
                        finish_reason=output.finish_reason,
621
                        usage=final_usage,
Zhuohan Li's avatar
Zhuohan Li committed
622
623
                    )
                    yield f"data: {response_json}\n\n"
624
        yield "data: [DONE]\n\n"
Zhuohan Li's avatar
Zhuohan Li committed
625
626
627
628

    # Streaming response
    if stream:
        return StreamingResponse(completion_stream_generator(),
629
                                 media_type="text/event-stream")
Zhuohan Li's avatar
Zhuohan Li committed
630
631
632
633

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
634
635
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
636
            await engine.abort(request_id)
637
638
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
Zhuohan Li's avatar
Zhuohan Li committed
639
640
641
        final_res = res
    assert final_res is not None
    choices = []
642
643
644
    prompt_token_ids = final_res.prompt_token_ids
    prompt_logprobs = final_res.prompt_logprobs
    prompt_text = final_res.prompt
Zhuohan Li's avatar
Zhuohan Li committed
645
646
    for output in final_res.outputs:
        if request.logprobs is not None:
647
648
649
650
651
652
653
654
655
656
657
658
659
660
            if not echo_without_generation:
                token_ids = output.token_ids
                top_logprobs = output.logprobs
                if request.echo:
                    token_ids = prompt_token_ids + token_ids
                    top_logprobs = prompt_logprobs + top_logprobs
            else:
                token_ids = prompt_token_ids
                top_logprobs = prompt_logprobs
            logprobs = create_logprobs(
                token_ids=token_ids,
                top_logprobs=top_logprobs,
                num_output_top_logprobs=request.logprobs,
            )
Zhuohan Li's avatar
Zhuohan Li committed
661
662
        else:
            logprobs = None
663
664
665
666
667
668
        if not echo_without_generation:
            output_text = output.text
            if request.echo:
                output_text = prompt_text + output_text
        else:
            output_text = prompt_text
Zhuohan Li's avatar
Zhuohan Li committed
669
670
        choice_data = CompletionResponseChoice(
            index=output.index,
671
            text=output_text,
Zhuohan Li's avatar
Zhuohan Li committed
672
673
674
675
676
677
            logprobs=logprobs,
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
678
679
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
Zhuohan Li's avatar
Zhuohan Li committed
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = CompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
697

Zhuohan Li's avatar
Zhuohan Li committed
698
699
700
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
701

Zhuohan Li's avatar
Zhuohan Li committed
702
703
704
705
706
707
708
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


if __name__ == "__main__":
709
    args = parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
710
711
712
713
714
715
716
717
718
719
720

    app.add_middleware(
        CORSMiddleware,
        allow_origins=args.allowed_origins,
        allow_credentials=args.allow_credentials,
        allow_methods=args.allowed_methods,
        allow_headers=args.allowed_headers,
    )

    logger.info(f"args: {args}")

721
722
723
724
725
    if args.served_model_name is not None:
        served_model = args.served_model_name
    else:
        served_model = args.model

726
727
    response_role = args.response_role

Zhuohan Li's avatar
Zhuohan Li committed
728
    engine_args = AsyncEngineArgs.from_cli_args(args)
729
    engine = AsyncLLMEngine.from_engine_args(engine_args)
730
    engine_model_config = asyncio.run(engine.get_model_config())
731
    max_model_len = engine_model_config.max_model_len
Zhuohan Li's avatar
Zhuohan Li committed
732
733

    # A separate tokenizer to map token IDs to strings.
734
735
736
737
    tokenizer = get_tokenizer(
        engine_model_config.tokenizer,
        tokenizer_mode=engine_model_config.tokenizer_mode,
        trust_remote_code=engine_model_config.trust_remote_code)
738
    load_chat_template(args, tokenizer)
Zhuohan Li's avatar
Zhuohan Li committed
739

740
741
742
    # Register labels for metrics
    add_global_metrics_labels(model_name=engine_args.model)

743
744
745
746
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="info",
747
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE)