api_server.py 29.9 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/serve/openai_api_server.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4

import argparse
5
import asyncio
6
import codecs
Zhuohan Li's avatar
Zhuohan Li committed
7
8
import json
import time
9
10
from http import HTTPStatus
from typing import AsyncGenerator, Dict, List, Optional, Tuple, Union
Zhuohan Li's avatar
Zhuohan Li committed
11

12
13
from aioprometheus import MetricsMiddleware
from aioprometheus.asgi.starlette import metrics
Zhuohan Li's avatar
Zhuohan Li committed
14
import fastapi
15
import uvicorn
16
from fastapi import Request
Zhuohan Li's avatar
Zhuohan Li committed
17
18
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
19
from fastapi.responses import JSONResponse, StreamingResponse, Response
Zhuohan Li's avatar
Zhuohan Li committed
20

Woosuk Kwon's avatar
Woosuk Kwon committed
21
22
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
23
from vllm.engine.metrics import add_global_metrics_labels
Woosuk Kwon's avatar
Woosuk Kwon committed
24
from vllm.entrypoints.openai.protocol import (
Zhuohan Li's avatar
Zhuohan Li committed
25
    CompletionRequest, CompletionResponse, CompletionResponseChoice,
26
    CompletionResponseStreamChoice, CompletionStreamResponse,
27
28
29
30
    ChatCompletionRequest, ChatCompletionResponse,
    ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
    LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo)
Woosuk Kwon's avatar
Woosuk Kwon committed
31
32
33
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
34
from vllm.transformers_utils.tokenizer import get_tokenizer
Woosuk Kwon's avatar
Woosuk Kwon committed
35
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
36

37
TIMEOUT_KEEP_ALIVE = 5  # seconds
Zhuohan Li's avatar
Zhuohan Li committed
38
39
40
41

logger = init_logger(__name__)
served_model = None
app = fastapi.FastAPI()
42
engine = None
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
response_role = None


def parse_args():
    parser = argparse.ArgumentParser(
        description="vLLM OpenAI-Compatible RESTful API server.")
    parser.add_argument("--host", type=str, default=None, help="host name")
    parser.add_argument("--port", type=int, default=8000, help="port number")
    parser.add_argument("--allow-credentials",
                        action="store_true",
                        help="allow credentials")
    parser.add_argument("--allowed-origins",
                        type=json.loads,
                        default=["*"],
                        help="allowed origins")
    parser.add_argument("--allowed-methods",
                        type=json.loads,
                        default=["*"],
                        help="allowed methods")
    parser.add_argument("--allowed-headers",
                        type=json.loads,
                        default=["*"],
                        help="allowed headers")
    parser.add_argument("--served-model-name",
                        type=str,
                        default=None,
                        help="The model name used in the API. If not "
                        "specified, the model name will be the same as "
                        "the huggingface name.")
    parser.add_argument("--chat-template",
                        type=str,
                        default=None,
                        help="The file path to the chat template, "
                        "or the template in single-line form "
                        "for the specified model")
    parser.add_argument("--response-role",
                        type=str,
                        default="assistant",
                        help="The role name to return if "
                        "`request.add_generation_prompt=true`.")
83
84
85
86
87
88
89
90
    parser.add_argument("--ssl-keyfile",
                        type=str,
                        default=None,
                        help="The file path to the SSL key file")
    parser.add_argument("--ssl-certfile",
                        type=str,
                        default=None,
                        help="The file path to the SSL cert file")
91
92
93

    parser = AsyncEngineArgs.add_cli_args(parser)
    return parser.parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
94
95


96
97
98
99
app.add_middleware(MetricsMiddleware)  # Trace HTTP server metrics
app.add_route("/metrics", metrics)  # Exposes HTTP metrics


Zhuohan Li's avatar
Zhuohan Li committed
100
101
def create_error_response(status_code: HTTPStatus,
                          message: str) -> JSONResponse:
102
103
104
    return JSONResponse(ErrorResponse(message=message,
                                      type="invalid_request_error").dict(),
                        status_code=status_code.value)
Zhuohan Li's avatar
Zhuohan Li committed
105
106


107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
def load_chat_template(args, tokenizer):
    if args.chat_template is not None:
        try:
            with open(args.chat_template, "r") as f:
                chat_template = f.read()
        except OSError:
            # If opening a file fails, set chat template to be args to
            # ensure we decode so our escape are interpreted correctly
            chat_template = codecs.decode(args.chat_template, "unicode_escape")

        tokenizer.chat_template = chat_template
        logger.info(
            f"Using supplied chat template:\n{tokenizer.chat_template}")
    elif tokenizer.chat_template is not None:
        logger.info(f"Using default chat template:\n{tokenizer.chat_template}")
    else:
        logger.warning("No chat template provided. Chat API will not work.")


Zhuohan Li's avatar
Zhuohan Li committed
126
@app.exception_handler(RequestValidationError)
127
async def validation_exception_handler(_, exc):
Zhuohan Li's avatar
Zhuohan Li committed
128
129
130
131
132
133
134
135
136
137
138
139
140
    return create_error_response(HTTPStatus.BAD_REQUEST, str(exc))


async def check_model(request) -> Optional[JSONResponse]:
    if request.model == served_model:
        return
    ret = create_error_response(
        HTTPStatus.NOT_FOUND,
        f"The model `{request.model}` does not exist.",
    )
    return ret


141
142
143
144
145
146
147
148
async def check_length(
    request: Union[ChatCompletionRequest, CompletionRequest],
    prompt: Optional[str] = None,
    prompt_ids: Optional[List[int]] = None
) -> Tuple[List[int], Optional[JSONResponse]]:
    assert (not (prompt is None and prompt_ids is None)
            and not (prompt is not None and prompt_ids is not None)
            ), "Either prompt or prompt_ids should be provided."
149
150
    input_ids = prompt_ids if prompt_ids is not None else tokenizer(
        prompt).input_ids
151
152
    token_num = len(input_ids)

153
154
    if request.max_tokens is None:
        request.max_tokens = max_model_len - token_num
155
    if token_num + request.max_tokens > max_model_len:
156
        return input_ids, create_error_response(
157
            HTTPStatus.BAD_REQUEST,
158
            f"This model's maximum context length is {max_model_len} tokens. "
159
160
161
162
163
164
            f"However, you requested {request.max_tokens + token_num} tokens "
            f"({token_num} in the messages, "
            f"{request.max_tokens} in the completion). "
            f"Please reduce the length of the messages or completion.",
        )
    else:
165
        return input_ids, None
166
167


168
169
170
171
172
173
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Zhuohan Li's avatar
Zhuohan Li committed
174
175
176
@app.get("/v1/models")
async def show_available_models():
    """Show available models. Right now we only have one model."""
177
178
179
180
181
    model_cards = [
        ModelCard(id=served_model,
                  root=served_model,
                  permission=[ModelPermission()])
    ]
Zhuohan Li's avatar
Zhuohan Li committed
182
183
184
    return ModelList(data=model_cards)


185
186
187
188
189
190
def create_logprobs(
    token_ids: List[int],
    top_logprobs: Optional[List[Optional[Dict[int, float]]]] = None,
    num_output_top_logprobs: Optional[int] = None,
    initial_text_offset: int = 0,
) -> LogProbs:
Zhuohan Li's avatar
Zhuohan Li committed
191
192
193
    """Create OpenAI-style logprobs."""
    logprobs = LogProbs()
    last_token_len = 0
194
195
196
197
198
199
200
201
    if num_output_top_logprobs:
        logprobs.top_logprobs = []
    for i, token_id in enumerate(token_ids):
        step_top_logprobs = top_logprobs[i]
        if step_top_logprobs is not None:
            token_logprob = step_top_logprobs[token_id]
        else:
            token_logprob = None
Zhuohan Li's avatar
Zhuohan Li committed
202
203
        token = tokenizer.convert_ids_to_tokens(token_id)
        logprobs.tokens.append(token)
204
        logprobs.token_logprobs.append(token_logprob)
Zhuohan Li's avatar
Zhuohan Li committed
205
206
207
        if len(logprobs.text_offset) == 0:
            logprobs.text_offset.append(initial_text_offset)
        else:
208
209
            logprobs.text_offset.append(logprobs.text_offset[-1] +
                                        last_token_len)
Zhuohan Li's avatar
Zhuohan Li committed
210
211
        last_token_len = len(token)

212
213
214
215
216
        if num_output_top_logprobs:
            logprobs.top_logprobs.append({
                tokenizer.convert_ids_to_tokens(i): p
                for i, p in step_top_logprobs.items()
            } if step_top_logprobs else None)
Zhuohan Li's avatar
Zhuohan Li committed
217
218
219
    return logprobs


220
@app.post("/v1/chat/completions")
221
222
async def create_chat_completion(request: ChatCompletionRequest,
                                 raw_request: Request):
223
224
225
226
227
228
229
230
231
232
233
234
235
    """Completion API similar to OpenAI's API.

    See  https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI ChatCompletion API.

    NOTE: Currently we do not support the following features:
        - function_call (Users should implement this by themselves)
        - logit_bias (to be supported by vLLM engine)
    """
    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

236
    if request.logit_bias is not None and len(request.logit_bias) > 0:
237
238
239
240
        # TODO: support logit_bias in vLLM engine.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

241
242
243
244
245
246
247
248
249
    try:
        prompt = tokenizer.apply_chat_template(
            conversation=request.messages,
            tokenize=False,
            add_generation_prompt=request.add_generation_prompt)
    except Exception as e:
        logger.error(f"Error in applying chat template from request: {str(e)}")
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

250
    token_ids, error_check_ret = await check_length(request, prompt=prompt)
251
252
253
254
255
    if error_check_ret is not None:
        return error_check_ret

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
256
    created_time = int(time.monotonic())
257
    chunk_object_type = "chat.completion.chunk"
258
    try:
259
        spaces_between_special_tokens = request.spaces_between_special_tokens
260
261
262
263
        sampling_params = SamplingParams(
            n=request.n,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
Roy's avatar
Roy committed
264
            repetition_penalty=request.repetition_penalty,
265
266
            temperature=request.temperature,
            top_p=request.top_p,
Roy's avatar
Roy committed
267
            min_p=request.min_p,
268
            stop=request.stop,
269
            stop_token_ids=request.stop_token_ids,
270
271
272
273
274
            max_tokens=request.max_tokens,
            best_of=request.best_of,
            top_k=request.top_k,
            ignore_eos=request.ignore_eos,
            use_beam_search=request.use_beam_search,
275
            skip_special_tokens=request.skip_special_tokens,
276
            spaces_between_special_tokens=spaces_between_special_tokens,
277
278
279
280
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

281
282
    result_generator = engine.generate(prompt, sampling_params, request_id,
                                       token_ids)
283

284
285
286
287
288
    def get_role() -> str:
        if request.add_generation_prompt:
            return response_role
        else:
            return request.messages[-1]["role"]
289
290

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
291
292
        # Send first response for each request.n (index) with the role
        role = get_role()
293
294
        for i in range(request.n):
            choice_data = ChatCompletionResponseStreamChoice(
295
                index=i, delta=DeltaMessage(role=role), finish_reason=None)
296
            chunk = ChatCompletionStreamResponse(id=request_id,
297
298
                                                 object=chunk_object_type,
                                                 created=created_time,
299
300
301
302
                                                 choices=[choice_data],
                                                 model=model_name)
            data = chunk.json(exclude_unset=True, ensure_ascii=False)
            yield f"data: {data}\n\n"
303

304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
        # Send response to echo the input portion of the last message
        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]
            if last_msg_content:
                for i in range(request.n):
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=last_msg_content),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"

        # Send response for each token for each request.n (index)
328
329
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
330
        finish_reason_sent = [False] * request.n
331
332
333
334
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
335
336
337
338
339
340
341
342

                if finish_reason_sent[i]:
                    continue

                if output.finish_reason is None:
                    # Send token-by-token response for each request.n
                    delta_text = output.text[len(previous_texts[i]):]
                    previous_texts[i] = output.text
343
                    previous_num_tokens[i] = len(output.token_ids)
344
345
346
347
348
349
350
351
352
353
354
355
356
357
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=delta_text),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"
                else:
                    # Send the finish response for each request.n only once
358
359
360
                    prompt_tokens = len(res.prompt_token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
361
362
                        completion_tokens=previous_num_tokens[i],
                        total_tokens=prompt_tokens + previous_num_tokens[i],
363
                    )
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i, delta=[], finish_reason=output.finish_reason)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    if final_usage is not None:
                        chunk.usage = final_usage
                    data = chunk.json(exclude_unset=True,
                                      exclude_none=True,
                                      ensure_ascii=False)
                    yield f"data: {data}\n\n"
                    finish_reason_sent[i] = True
        # Send the final done message after all response.n are finished
380
        yield "data: [DONE]\n\n"
381

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
    async def completion_full_generator():
        final_res: RequestOutput = None
        async for res in result_generator:
            if await raw_request.is_disconnected():
                # Abort the request if the client disconnects.
                await engine.abort(request_id)
                return create_error_response(HTTPStatus.BAD_REQUEST,
                                             "Client disconnected")
            final_res = res
        assert final_res is not None

        choices = []
        role = get_role()
        for output in final_res.outputs:
            choice_data = ChatCompletionResponseChoice(
                index=output.index,
                message=ChatMessage(role=role, content=output.text),
                finish_reason=output.finish_reason,
            )
            choices.append(choice_data)

        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]

            for choice in choices:
                full_message = last_msg_content + choice.message.content
                choice.message.content = full_message

        num_prompt_tokens = len(final_res.prompt_token_ids)
        num_generated_tokens = sum(
            len(output.token_ids) for output in final_res.outputs)
        usage = UsageInfo(
            prompt_tokens=num_prompt_tokens,
            completion_tokens=num_generated_tokens,
            total_tokens=num_prompt_tokens + num_generated_tokens,
        )
        response = ChatCompletionResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=choices,
            usage=usage,
429
430
        )

431
        return response
432

433
    # Streaming response
434
    if request.stream:
435
        return StreamingResponse(completion_stream_generator(),
436
                                 media_type="text/event-stream")
437
438
    else:
        return await completion_full_generator()
439
440


Zhuohan Li's avatar
Zhuohan Li committed
441
@app.post("/v1/completions")
442
async def create_completion(request: CompletionRequest, raw_request: Request):
443
444
445
446
447
448
449
450
    """Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/completions/create
    for the API specification. This API mimics the OpenAI Completion API.

    NOTE: Currently we do not support the following features:
        - suffix (the language models we currently support do not support
          suffix)
Woosuk Kwon's avatar
Woosuk Kwon committed
451
        - logit_bias (to be supported by vLLM engine)
452
    """
Zhuohan Li's avatar
Zhuohan Li committed
453
454
455
456
457

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

458
459
    # OpenAI API supports echoing the prompt when max_tokens is 0.
    echo_without_generation = request.echo and request.max_tokens == 0
Zhuohan Li's avatar
Zhuohan Li committed
460
461
462
463

    if request.suffix is not None:
        # The language models we currently support do not support suffix.
        return create_error_response(HTTPStatus.BAD_REQUEST,
464
                                     "suffix is not currently supported")
Zhuohan Li's avatar
Zhuohan Li committed
465

466
    if request.logit_bias is not None and len(request.logit_bias) > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
467
        # TODO: support logit_bias in vLLM engine.
Zhuohan Li's avatar
Zhuohan Li committed
468
469
470
471
472
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
473
474

    use_token_ids = False
475
    if isinstance(request.prompt, list):
476
477
478
        if len(request.prompt) == 0:
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "please provide at least one prompt")
479
480
481
482
483
484
485
486
487
488
489
490
        first_element = request.prompt[0]
        if isinstance(first_element, int):
            use_token_ids = True
            prompt = request.prompt
        elif isinstance(first_element, (str, list)):
            # TODO: handles multiple prompt case in list[list[int]]
            if len(request.prompt) > 1:
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "multiple prompts in a batch is not currently supported")
            use_token_ids = not isinstance(first_element, str)
            prompt = request.prompt[0]
491
492
    else:
        prompt = request.prompt
493

494
495
496
497
    if use_token_ids:
        _, error_check_ret = await check_length(request, prompt_ids=prompt)
    else:
        token_ids, error_check_ret = await check_length(request, prompt=prompt)
498
499
500
    if error_check_ret is not None:
        return error_check_ret

501
    created_time = int(time.monotonic())
Zhuohan Li's avatar
Zhuohan Li committed
502
    try:
503
        spaces_between_special_tokens = request.spaces_between_special_tokens
Zhuohan Li's avatar
Zhuohan Li committed
504
505
506
507
508
        sampling_params = SamplingParams(
            n=request.n,
            best_of=request.best_of,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
Roy's avatar
Roy committed
509
            repetition_penalty=request.repetition_penalty,
Zhuohan Li's avatar
Zhuohan Li committed
510
511
512
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
Roy's avatar
Roy committed
513
            min_p=request.min_p,
Zhuohan Li's avatar
Zhuohan Li committed
514
            stop=request.stop,
515
            stop_token_ids=request.stop_token_ids,
Zhuohan Li's avatar
Zhuohan Li committed
516
            ignore_eos=request.ignore_eos,
517
518
            max_tokens=request.max_tokens
            if not echo_without_generation else 1,
Zhuohan Li's avatar
Zhuohan Li committed
519
520
            logprobs=request.logprobs,
            use_beam_search=request.use_beam_search,
521
            prompt_logprobs=request.logprobs if request.echo else None,
522
            skip_special_tokens=request.skip_special_tokens,
523
            spaces_between_special_tokens=spaces_between_special_tokens,
Zhuohan Li's avatar
Zhuohan Li committed
524
525
526
527
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

528
529
530
531
532
533
534
535
    if use_token_ids:
        result_generator = engine.generate(None,
                                           sampling_params,
                                           request_id,
                                           prompt_token_ids=prompt)
    else:
        result_generator = engine.generate(prompt, sampling_params, request_id,
                                           token_ids)
Zhuohan Li's avatar
Zhuohan Li committed
536
537
538

    # Similar to the OpenAI API, when n != best_of, we do not stream the
    # results. In addition, we do not stream the results when use beam search.
539
540
541
    stream = (request.stream
              and (request.best_of is None or request.n == request.best_of)
              and not request.use_beam_search)
Zhuohan Li's avatar
Zhuohan Li committed
542

543
544
545
546
547
    def create_stream_response_json(
        index: int,
        text: str,
        logprobs: Optional[LogProbs] = None,
        finish_reason: Optional[str] = None,
548
        usage: Optional[UsageInfo] = None,
549
    ) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
550
551
552
553
554
555
556
557
558
559
560
561
        choice_data = CompletionResponseStreamChoice(
            index=index,
            text=text,
            logprobs=logprobs,
            finish_reason=finish_reason,
        )
        response = CompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
562
563
564
        if usage is not None:
            response.usage = usage
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
Zhuohan Li's avatar
Zhuohan Li committed
565
566
567
568
569
570

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
571
        has_echoed = [False] * request.n
Zhuohan Li's avatar
Zhuohan Li committed
572
573
574
575
576
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
577
                token_ids = output.token_ids[previous_num_tokens[i]:]
578
579
580
581
                if request.logprobs is not None:
                    top_logprobs = output.logprobs[previous_num_tokens[i]:]
                else:
                    top_logprobs = None
582
583
584
585
586
                offsets = len(previous_texts[i])
                if request.echo and not has_echoed[i]:
                    if not echo_without_generation:
                        delta_text = res.prompt + delta_text
                        token_ids = res.prompt_token_ids + token_ids
587
588
589
                        if top_logprobs:
                            top_logprobs = res.prompt_logprobs + top_logprobs
                    else:  # only just return the prompt
590
591
                        delta_text = res.prompt
                        token_ids = res.prompt_token_ids
592
593
                        if top_logprobs:
                            top_logprobs = res.prompt_logprobs
594
                    has_echoed[i] = True
Zhuohan Li's avatar
Zhuohan Li committed
595
596
                if request.logprobs is not None:
                    logprobs = create_logprobs(
597
598
599
600
601
                        token_ids=token_ids,
                        top_logprobs=top_logprobs,
                        num_output_top_logprobs=request.logprobs,
                        initial_text_offset=offsets,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
602
603
604
605
                else:
                    logprobs = None
                previous_texts[i] = output.text
                previous_num_tokens[i] = len(output.token_ids)
606
                finish_reason = output.finish_reason
Zhuohan Li's avatar
Zhuohan Li committed
607
608
609
610
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                    logprobs=logprobs,
611
                    finish_reason=finish_reason,
Zhuohan Li's avatar
Zhuohan Li committed
612
613
614
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
615
616
                    logprobs = (LogProbs()
                                if request.logprobs is not None else None)
617
618
619
620
621
622
623
                    prompt_tokens = len(res.prompt_token_ids)
                    completion_tokens = len(output.token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
624
625
626
627
628
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        logprobs=logprobs,
                        finish_reason=output.finish_reason,
629
                        usage=final_usage,
Zhuohan Li's avatar
Zhuohan Li committed
630
631
                    )
                    yield f"data: {response_json}\n\n"
632
        yield "data: [DONE]\n\n"
Zhuohan Li's avatar
Zhuohan Li committed
633
634
635
636

    # Streaming response
    if stream:
        return StreamingResponse(completion_stream_generator(),
637
                                 media_type="text/event-stream")
Zhuohan Li's avatar
Zhuohan Li committed
638
639
640
641

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
642
643
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
644
            await engine.abort(request_id)
645
646
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
Zhuohan Li's avatar
Zhuohan Li committed
647
648
649
        final_res = res
    assert final_res is not None
    choices = []
650
651
652
    prompt_token_ids = final_res.prompt_token_ids
    prompt_logprobs = final_res.prompt_logprobs
    prompt_text = final_res.prompt
Zhuohan Li's avatar
Zhuohan Li committed
653
654
    for output in final_res.outputs:
        if request.logprobs is not None:
655
656
657
658
659
660
661
662
663
664
665
666
667
668
            if not echo_without_generation:
                token_ids = output.token_ids
                top_logprobs = output.logprobs
                if request.echo:
                    token_ids = prompt_token_ids + token_ids
                    top_logprobs = prompt_logprobs + top_logprobs
            else:
                token_ids = prompt_token_ids
                top_logprobs = prompt_logprobs
            logprobs = create_logprobs(
                token_ids=token_ids,
                top_logprobs=top_logprobs,
                num_output_top_logprobs=request.logprobs,
            )
Zhuohan Li's avatar
Zhuohan Li committed
669
670
        else:
            logprobs = None
671
672
673
674
675
676
        if not echo_without_generation:
            output_text = output.text
            if request.echo:
                output_text = prompt_text + output_text
        else:
            output_text = prompt_text
Zhuohan Li's avatar
Zhuohan Li committed
677
678
        choice_data = CompletionResponseChoice(
            index=output.index,
679
            text=output_text,
Zhuohan Li's avatar
Zhuohan Li committed
680
681
682
683
684
685
            logprobs=logprobs,
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
686
687
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
Zhuohan Li's avatar
Zhuohan Li committed
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = CompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
705

Zhuohan Li's avatar
Zhuohan Li committed
706
707
708
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
709

Zhuohan Li's avatar
Zhuohan Li committed
710
711
712
713
714
715
716
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


if __name__ == "__main__":
717
    args = parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
718
719
720
721
722
723
724
725
726
727
728

    app.add_middleware(
        CORSMiddleware,
        allow_origins=args.allowed_origins,
        allow_credentials=args.allow_credentials,
        allow_methods=args.allowed_methods,
        allow_headers=args.allowed_headers,
    )

    logger.info(f"args: {args}")

729
730
731
732
733
    if args.served_model_name is not None:
        served_model = args.served_model_name
    else:
        served_model = args.model

734
735
    response_role = args.response_role

Zhuohan Li's avatar
Zhuohan Li committed
736
    engine_args = AsyncEngineArgs.from_cli_args(args)
737
    engine = AsyncLLMEngine.from_engine_args(engine_args)
738
    engine_model_config = asyncio.run(engine.get_model_config())
739
    max_model_len = engine_model_config.max_model_len
Zhuohan Li's avatar
Zhuohan Li committed
740
741

    # A separate tokenizer to map token IDs to strings.
742
743
744
745
    tokenizer = get_tokenizer(
        engine_model_config.tokenizer,
        tokenizer_mode=engine_model_config.tokenizer_mode,
        trust_remote_code=engine_model_config.trust_remote_code)
746
    load_chat_template(args, tokenizer)
Zhuohan Li's avatar
Zhuohan Li committed
747

748
749
750
    # Register labels for metrics
    add_global_metrics_labels(model_name=engine_args.model)

751
752
753
754
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="info",
755
756
757
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
                ssl_keyfile=args.ssl_keyfile,
                ssl_certfile=args.ssl_certfile)