api_server.py 30.4 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/serve/openai_api_server.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4

import argparse
5
import asyncio
6
import codecs
Zhuohan Li's avatar
Zhuohan Li committed
7
8
import json
import time
9
from contextlib import asynccontextmanager
10
11
from http import HTTPStatus
from typing import AsyncGenerator, Dict, List, Optional, Tuple, Union
Zhuohan Li's avatar
Zhuohan Li committed
12

13
14
from aioprometheus import MetricsMiddleware
from aioprometheus.asgi.starlette import metrics
Zhuohan Li's avatar
Zhuohan Li committed
15
import fastapi
16
import uvicorn
17
from fastapi import Request
Zhuohan Li's avatar
Zhuohan Li committed
18
19
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
20
from fastapi.responses import JSONResponse, StreamingResponse, Response
Zhuohan Li's avatar
Zhuohan Li committed
21

Woosuk Kwon's avatar
Woosuk Kwon committed
22
23
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
24
from vllm.engine.metrics import add_global_metrics_labels
Woosuk Kwon's avatar
Woosuk Kwon committed
25
from vllm.entrypoints.openai.protocol import (
Zhuohan Li's avatar
Zhuohan Li committed
26
    CompletionRequest, CompletionResponse, CompletionResponseChoice,
27
    CompletionResponseStreamChoice, CompletionStreamResponse,
28
29
30
31
    ChatCompletionRequest, ChatCompletionResponse,
    ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
    LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo)
Woosuk Kwon's avatar
Woosuk Kwon committed
32
33
34
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
35
from vllm.transformers_utils.tokenizer import get_tokenizer
Woosuk Kwon's avatar
Woosuk Kwon committed
36
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
37

38
TIMEOUT_KEEP_ALIVE = 5  # seconds
Zhuohan Li's avatar
Zhuohan Li committed
39
40
41

logger = init_logger(__name__)
served_model = None
42
engine_args = None
43
engine = None
44
45
46
response_role = None


47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
@asynccontextmanager
async def lifespan(app: fastapi.FastAPI):

    async def _force_log():
        while True:
            await asyncio.sleep(10)
            await engine.do_log_stats()

    if not engine_args.disable_log_stats:
        asyncio.create_task(_force_log())

    yield


app = fastapi.FastAPI(lifespan=lifespan)


64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
def parse_args():
    parser = argparse.ArgumentParser(
        description="vLLM OpenAI-Compatible RESTful API server.")
    parser.add_argument("--host", type=str, default=None, help="host name")
    parser.add_argument("--port", type=int, default=8000, help="port number")
    parser.add_argument("--allow-credentials",
                        action="store_true",
                        help="allow credentials")
    parser.add_argument("--allowed-origins",
                        type=json.loads,
                        default=["*"],
                        help="allowed origins")
    parser.add_argument("--allowed-methods",
                        type=json.loads,
                        default=["*"],
                        help="allowed methods")
    parser.add_argument("--allowed-headers",
                        type=json.loads,
                        default=["*"],
                        help="allowed headers")
    parser.add_argument("--served-model-name",
                        type=str,
                        default=None,
                        help="The model name used in the API. If not "
                        "specified, the model name will be the same as "
                        "the huggingface name.")
    parser.add_argument("--chat-template",
                        type=str,
                        default=None,
                        help="The file path to the chat template, "
                        "or the template in single-line form "
                        "for the specified model")
    parser.add_argument("--response-role",
                        type=str,
                        default="assistant",
                        help="The role name to return if "
                        "`request.add_generation_prompt=true`.")
101
102
103
104
105
106
107
108
    parser.add_argument("--ssl-keyfile",
                        type=str,
                        default=None,
                        help="The file path to the SSL key file")
    parser.add_argument("--ssl-certfile",
                        type=str,
                        default=None,
                        help="The file path to the SSL cert file")
109
110
111
112
113
    parser.add_argument(
        "--root-path",
        type=str,
        default=None,
        help="FastAPI root_path when app is behind a path based routing proxy")
114
115
116

    parser = AsyncEngineArgs.add_cli_args(parser)
    return parser.parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
117
118


119
120
121
122
app.add_middleware(MetricsMiddleware)  # Trace HTTP server metrics
app.add_route("/metrics", metrics)  # Exposes HTTP metrics


Zhuohan Li's avatar
Zhuohan Li committed
123
124
def create_error_response(status_code: HTTPStatus,
                          message: str) -> JSONResponse:
125
126
127
    return JSONResponse(ErrorResponse(message=message,
                                      type="invalid_request_error").dict(),
                        status_code=status_code.value)
Zhuohan Li's avatar
Zhuohan Li committed
128
129


130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def load_chat_template(args, tokenizer):
    if args.chat_template is not None:
        try:
            with open(args.chat_template, "r") as f:
                chat_template = f.read()
        except OSError:
            # If opening a file fails, set chat template to be args to
            # ensure we decode so our escape are interpreted correctly
            chat_template = codecs.decode(args.chat_template, "unicode_escape")

        tokenizer.chat_template = chat_template
        logger.info(
            f"Using supplied chat template:\n{tokenizer.chat_template}")
    elif tokenizer.chat_template is not None:
        logger.info(f"Using default chat template:\n{tokenizer.chat_template}")
    else:
        logger.warning("No chat template provided. Chat API will not work.")


Zhuohan Li's avatar
Zhuohan Li committed
149
@app.exception_handler(RequestValidationError)
150
async def validation_exception_handler(_, exc):
Zhuohan Li's avatar
Zhuohan Li committed
151
152
153
154
155
156
157
158
159
160
161
162
163
    return create_error_response(HTTPStatus.BAD_REQUEST, str(exc))


async def check_model(request) -> Optional[JSONResponse]:
    if request.model == served_model:
        return
    ret = create_error_response(
        HTTPStatus.NOT_FOUND,
        f"The model `{request.model}` does not exist.",
    )
    return ret


164
165
166
167
168
169
170
171
async def check_length(
    request: Union[ChatCompletionRequest, CompletionRequest],
    prompt: Optional[str] = None,
    prompt_ids: Optional[List[int]] = None
) -> Tuple[List[int], Optional[JSONResponse]]:
    assert (not (prompt is None and prompt_ids is None)
            and not (prompt is not None and prompt_ids is not None)
            ), "Either prompt or prompt_ids should be provided."
172
173
    input_ids = prompt_ids if prompt_ids is not None else tokenizer(
        prompt).input_ids
174
175
    token_num = len(input_ids)

176
177
    if request.max_tokens is None:
        request.max_tokens = max_model_len - token_num
178
    if token_num + request.max_tokens > max_model_len:
179
        return input_ids, create_error_response(
180
            HTTPStatus.BAD_REQUEST,
181
            f"This model's maximum context length is {max_model_len} tokens. "
182
183
184
185
186
187
            f"However, you requested {request.max_tokens + token_num} tokens "
            f"({token_num} in the messages, "
            f"{request.max_tokens} in the completion). "
            f"Please reduce the length of the messages or completion.",
        )
    else:
188
        return input_ids, None
189
190


191
192
193
194
195
196
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Zhuohan Li's avatar
Zhuohan Li committed
197
198
199
@app.get("/v1/models")
async def show_available_models():
    """Show available models. Right now we only have one model."""
200
201
202
203
204
    model_cards = [
        ModelCard(id=served_model,
                  root=served_model,
                  permission=[ModelPermission()])
    ]
Zhuohan Li's avatar
Zhuohan Li committed
205
206
207
    return ModelList(data=model_cards)


208
209
210
211
212
213
def create_logprobs(
    token_ids: List[int],
    top_logprobs: Optional[List[Optional[Dict[int, float]]]] = None,
    num_output_top_logprobs: Optional[int] = None,
    initial_text_offset: int = 0,
) -> LogProbs:
Zhuohan Li's avatar
Zhuohan Li committed
214
215
216
    """Create OpenAI-style logprobs."""
    logprobs = LogProbs()
    last_token_len = 0
217
218
219
220
221
222
223
224
    if num_output_top_logprobs:
        logprobs.top_logprobs = []
    for i, token_id in enumerate(token_ids):
        step_top_logprobs = top_logprobs[i]
        if step_top_logprobs is not None:
            token_logprob = step_top_logprobs[token_id]
        else:
            token_logprob = None
Zhuohan Li's avatar
Zhuohan Li committed
225
226
        token = tokenizer.convert_ids_to_tokens(token_id)
        logprobs.tokens.append(token)
227
        logprobs.token_logprobs.append(token_logprob)
Zhuohan Li's avatar
Zhuohan Li committed
228
229
230
        if len(logprobs.text_offset) == 0:
            logprobs.text_offset.append(initial_text_offset)
        else:
231
232
            logprobs.text_offset.append(logprobs.text_offset[-1] +
                                        last_token_len)
Zhuohan Li's avatar
Zhuohan Li committed
233
234
        last_token_len = len(token)

235
236
237
238
239
        if num_output_top_logprobs:
            logprobs.top_logprobs.append({
                tokenizer.convert_ids_to_tokens(i): p
                for i, p in step_top_logprobs.items()
            } if step_top_logprobs else None)
Zhuohan Li's avatar
Zhuohan Li committed
240
241
242
    return logprobs


243
@app.post("/v1/chat/completions")
244
245
async def create_chat_completion(request: ChatCompletionRequest,
                                 raw_request: Request):
246
247
248
249
250
251
252
253
254
255
256
257
258
    """Completion API similar to OpenAI's API.

    See  https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI ChatCompletion API.

    NOTE: Currently we do not support the following features:
        - function_call (Users should implement this by themselves)
        - logit_bias (to be supported by vLLM engine)
    """
    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

259
    if request.logit_bias is not None and len(request.logit_bias) > 0:
260
261
262
263
        # TODO: support logit_bias in vLLM engine.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

264
265
266
267
268
269
270
271
272
    try:
        prompt = tokenizer.apply_chat_template(
            conversation=request.messages,
            tokenize=False,
            add_generation_prompt=request.add_generation_prompt)
    except Exception as e:
        logger.error(f"Error in applying chat template from request: {str(e)}")
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

273
    token_ids, error_check_ret = await check_length(request, prompt=prompt)
274
275
276
277
278
    if error_check_ret is not None:
        return error_check_ret

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
279
    created_time = int(time.monotonic())
280
    chunk_object_type = "chat.completion.chunk"
281
    try:
282
        spaces_between_special_tokens = request.spaces_between_special_tokens
283
284
285
286
        sampling_params = SamplingParams(
            n=request.n,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
Roy's avatar
Roy committed
287
            repetition_penalty=request.repetition_penalty,
288
289
            temperature=request.temperature,
            top_p=request.top_p,
Roy's avatar
Roy committed
290
            min_p=request.min_p,
291
            stop=request.stop,
292
            stop_token_ids=request.stop_token_ids,
293
294
295
296
297
            max_tokens=request.max_tokens,
            best_of=request.best_of,
            top_k=request.top_k,
            ignore_eos=request.ignore_eos,
            use_beam_search=request.use_beam_search,
298
            skip_special_tokens=request.skip_special_tokens,
299
            spaces_between_special_tokens=spaces_between_special_tokens,
300
301
302
303
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

304
305
    result_generator = engine.generate(prompt, sampling_params, request_id,
                                       token_ids)
306

307
308
309
310
311
    def get_role() -> str:
        if request.add_generation_prompt:
            return response_role
        else:
            return request.messages[-1]["role"]
312
313

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
314
315
        # Send first response for each request.n (index) with the role
        role = get_role()
316
317
        for i in range(request.n):
            choice_data = ChatCompletionResponseStreamChoice(
318
                index=i, delta=DeltaMessage(role=role), finish_reason=None)
319
            chunk = ChatCompletionStreamResponse(id=request_id,
320
321
                                                 object=chunk_object_type,
                                                 created=created_time,
322
323
324
325
                                                 choices=[choice_data],
                                                 model=model_name)
            data = chunk.json(exclude_unset=True, ensure_ascii=False)
            yield f"data: {data}\n\n"
326

327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
        # Send response to echo the input portion of the last message
        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]
            if last_msg_content:
                for i in range(request.n):
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=last_msg_content),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"

        # Send response for each token for each request.n (index)
351
352
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
353
        finish_reason_sent = [False] * request.n
354
355
356
357
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
358
359
360
361
362
363
364
365

                if finish_reason_sent[i]:
                    continue

                if output.finish_reason is None:
                    # Send token-by-token response for each request.n
                    delta_text = output.text[len(previous_texts[i]):]
                    previous_texts[i] = output.text
366
                    previous_num_tokens[i] = len(output.token_ids)
367
368
369
370
371
372
373
374
375
376
377
378
379
380
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=delta_text),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"
                else:
                    # Send the finish response for each request.n only once
381
382
383
                    prompt_tokens = len(res.prompt_token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
384
385
                        completion_tokens=previous_num_tokens[i],
                        total_tokens=prompt_tokens + previous_num_tokens[i],
386
                    )
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i, delta=[], finish_reason=output.finish_reason)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    if final_usage is not None:
                        chunk.usage = final_usage
                    data = chunk.json(exclude_unset=True,
                                      exclude_none=True,
                                      ensure_ascii=False)
                    yield f"data: {data}\n\n"
                    finish_reason_sent[i] = True
        # Send the final done message after all response.n are finished
403
        yield "data: [DONE]\n\n"
404

405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
    async def completion_full_generator():
        final_res: RequestOutput = None
        async for res in result_generator:
            if await raw_request.is_disconnected():
                # Abort the request if the client disconnects.
                await engine.abort(request_id)
                return create_error_response(HTTPStatus.BAD_REQUEST,
                                             "Client disconnected")
            final_res = res
        assert final_res is not None

        choices = []
        role = get_role()
        for output in final_res.outputs:
            choice_data = ChatCompletionResponseChoice(
                index=output.index,
                message=ChatMessage(role=role, content=output.text),
                finish_reason=output.finish_reason,
            )
            choices.append(choice_data)

        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]

            for choice in choices:
                full_message = last_msg_content + choice.message.content
                choice.message.content = full_message

        num_prompt_tokens = len(final_res.prompt_token_ids)
        num_generated_tokens = sum(
            len(output.token_ids) for output in final_res.outputs)
        usage = UsageInfo(
            prompt_tokens=num_prompt_tokens,
            completion_tokens=num_generated_tokens,
            total_tokens=num_prompt_tokens + num_generated_tokens,
        )
        response = ChatCompletionResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=choices,
            usage=usage,
452
453
        )

454
        return response
455

456
    # Streaming response
457
    if request.stream:
458
        return StreamingResponse(completion_stream_generator(),
459
                                 media_type="text/event-stream")
460
461
    else:
        return await completion_full_generator()
462
463


Zhuohan Li's avatar
Zhuohan Li committed
464
@app.post("/v1/completions")
465
async def create_completion(request: CompletionRequest, raw_request: Request):
466
467
468
469
470
471
472
473
    """Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/completions/create
    for the API specification. This API mimics the OpenAI Completion API.

    NOTE: Currently we do not support the following features:
        - suffix (the language models we currently support do not support
          suffix)
Woosuk Kwon's avatar
Woosuk Kwon committed
474
        - logit_bias (to be supported by vLLM engine)
475
    """
Zhuohan Li's avatar
Zhuohan Li committed
476
477
478
479
480

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

481
482
    # OpenAI API supports echoing the prompt when max_tokens is 0.
    echo_without_generation = request.echo and request.max_tokens == 0
Zhuohan Li's avatar
Zhuohan Li committed
483
484
485
486

    if request.suffix is not None:
        # The language models we currently support do not support suffix.
        return create_error_response(HTTPStatus.BAD_REQUEST,
487
                                     "suffix is not currently supported")
Zhuohan Li's avatar
Zhuohan Li committed
488

489
    if request.logit_bias is not None and len(request.logit_bias) > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
490
        # TODO: support logit_bias in vLLM engine.
Zhuohan Li's avatar
Zhuohan Li committed
491
492
493
494
495
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
496
497

    use_token_ids = False
498
    if isinstance(request.prompt, list):
499
500
501
        if len(request.prompt) == 0:
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "please provide at least one prompt")
502
503
504
505
506
507
508
509
510
511
512
513
        first_element = request.prompt[0]
        if isinstance(first_element, int):
            use_token_ids = True
            prompt = request.prompt
        elif isinstance(first_element, (str, list)):
            # TODO: handles multiple prompt case in list[list[int]]
            if len(request.prompt) > 1:
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "multiple prompts in a batch is not currently supported")
            use_token_ids = not isinstance(first_element, str)
            prompt = request.prompt[0]
514
515
    else:
        prompt = request.prompt
516

517
518
519
520
    if use_token_ids:
        _, error_check_ret = await check_length(request, prompt_ids=prompt)
    else:
        token_ids, error_check_ret = await check_length(request, prompt=prompt)
521
522
523
    if error_check_ret is not None:
        return error_check_ret

524
    created_time = int(time.monotonic())
Zhuohan Li's avatar
Zhuohan Li committed
525
    try:
526
        spaces_between_special_tokens = request.spaces_between_special_tokens
Zhuohan Li's avatar
Zhuohan Li committed
527
528
529
530
531
        sampling_params = SamplingParams(
            n=request.n,
            best_of=request.best_of,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
Roy's avatar
Roy committed
532
            repetition_penalty=request.repetition_penalty,
Zhuohan Li's avatar
Zhuohan Li committed
533
534
535
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
Roy's avatar
Roy committed
536
            min_p=request.min_p,
Zhuohan Li's avatar
Zhuohan Li committed
537
            stop=request.stop,
538
            stop_token_ids=request.stop_token_ids,
Zhuohan Li's avatar
Zhuohan Li committed
539
            ignore_eos=request.ignore_eos,
540
541
            max_tokens=request.max_tokens
            if not echo_without_generation else 1,
Zhuohan Li's avatar
Zhuohan Li committed
542
543
            logprobs=request.logprobs,
            use_beam_search=request.use_beam_search,
544
            prompt_logprobs=request.logprobs if request.echo else None,
545
            skip_special_tokens=request.skip_special_tokens,
546
            spaces_between_special_tokens=spaces_between_special_tokens,
Zhuohan Li's avatar
Zhuohan Li committed
547
548
549
550
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

551
552
553
554
555
556
557
558
    if use_token_ids:
        result_generator = engine.generate(None,
                                           sampling_params,
                                           request_id,
                                           prompt_token_ids=prompt)
    else:
        result_generator = engine.generate(prompt, sampling_params, request_id,
                                           token_ids)
Zhuohan Li's avatar
Zhuohan Li committed
559
560
561

    # Similar to the OpenAI API, when n != best_of, we do not stream the
    # results. In addition, we do not stream the results when use beam search.
562
563
564
    stream = (request.stream
              and (request.best_of is None or request.n == request.best_of)
              and not request.use_beam_search)
Zhuohan Li's avatar
Zhuohan Li committed
565

566
567
568
569
570
    def create_stream_response_json(
        index: int,
        text: str,
        logprobs: Optional[LogProbs] = None,
        finish_reason: Optional[str] = None,
571
        usage: Optional[UsageInfo] = None,
572
    ) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
573
574
575
576
577
578
579
580
581
582
583
584
        choice_data = CompletionResponseStreamChoice(
            index=index,
            text=text,
            logprobs=logprobs,
            finish_reason=finish_reason,
        )
        response = CompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
585
586
587
        if usage is not None:
            response.usage = usage
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
Zhuohan Li's avatar
Zhuohan Li committed
588
589
590
591
592
593

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
594
        has_echoed = [False] * request.n
Zhuohan Li's avatar
Zhuohan Li committed
595
596
597
598
599
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
600
                token_ids = output.token_ids[previous_num_tokens[i]:]
601
602
603
604
                if request.logprobs is not None:
                    top_logprobs = output.logprobs[previous_num_tokens[i]:]
                else:
                    top_logprobs = None
605
606
607
608
609
                offsets = len(previous_texts[i])
                if request.echo and not has_echoed[i]:
                    if not echo_without_generation:
                        delta_text = res.prompt + delta_text
                        token_ids = res.prompt_token_ids + token_ids
610
611
612
                        if top_logprobs:
                            top_logprobs = res.prompt_logprobs + top_logprobs
                    else:  # only just return the prompt
613
614
                        delta_text = res.prompt
                        token_ids = res.prompt_token_ids
615
616
                        if top_logprobs:
                            top_logprobs = res.prompt_logprobs
617
                    has_echoed[i] = True
Zhuohan Li's avatar
Zhuohan Li committed
618
619
                if request.logprobs is not None:
                    logprobs = create_logprobs(
620
621
622
623
624
                        token_ids=token_ids,
                        top_logprobs=top_logprobs,
                        num_output_top_logprobs=request.logprobs,
                        initial_text_offset=offsets,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
625
626
627
628
                else:
                    logprobs = None
                previous_texts[i] = output.text
                previous_num_tokens[i] = len(output.token_ids)
629
                finish_reason = output.finish_reason
Zhuohan Li's avatar
Zhuohan Li committed
630
631
632
633
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                    logprobs=logprobs,
634
                    finish_reason=finish_reason,
Zhuohan Li's avatar
Zhuohan Li committed
635
636
637
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
638
639
                    logprobs = (LogProbs()
                                if request.logprobs is not None else None)
640
641
642
643
644
645
646
                    prompt_tokens = len(res.prompt_token_ids)
                    completion_tokens = len(output.token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
647
648
649
650
651
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        logprobs=logprobs,
                        finish_reason=output.finish_reason,
652
                        usage=final_usage,
Zhuohan Li's avatar
Zhuohan Li committed
653
654
                    )
                    yield f"data: {response_json}\n\n"
655
        yield "data: [DONE]\n\n"
Zhuohan Li's avatar
Zhuohan Li committed
656
657
658
659

    # Streaming response
    if stream:
        return StreamingResponse(completion_stream_generator(),
660
                                 media_type="text/event-stream")
Zhuohan Li's avatar
Zhuohan Li committed
661
662
663
664

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
665
666
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
667
            await engine.abort(request_id)
668
669
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
Zhuohan Li's avatar
Zhuohan Li committed
670
671
672
        final_res = res
    assert final_res is not None
    choices = []
673
674
675
    prompt_token_ids = final_res.prompt_token_ids
    prompt_logprobs = final_res.prompt_logprobs
    prompt_text = final_res.prompt
Zhuohan Li's avatar
Zhuohan Li committed
676
677
    for output in final_res.outputs:
        if request.logprobs is not None:
678
679
680
681
682
683
684
685
686
687
688
689
690
691
            if not echo_without_generation:
                token_ids = output.token_ids
                top_logprobs = output.logprobs
                if request.echo:
                    token_ids = prompt_token_ids + token_ids
                    top_logprobs = prompt_logprobs + top_logprobs
            else:
                token_ids = prompt_token_ids
                top_logprobs = prompt_logprobs
            logprobs = create_logprobs(
                token_ids=token_ids,
                top_logprobs=top_logprobs,
                num_output_top_logprobs=request.logprobs,
            )
Zhuohan Li's avatar
Zhuohan Li committed
692
693
        else:
            logprobs = None
694
695
696
697
698
699
        if not echo_without_generation:
            output_text = output.text
            if request.echo:
                output_text = prompt_text + output_text
        else:
            output_text = prompt_text
Zhuohan Li's avatar
Zhuohan Li committed
700
701
        choice_data = CompletionResponseChoice(
            index=output.index,
702
            text=output_text,
Zhuohan Li's avatar
Zhuohan Li committed
703
704
705
706
707
708
            logprobs=logprobs,
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
709
710
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
Zhuohan Li's avatar
Zhuohan Li committed
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = CompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
728

Zhuohan Li's avatar
Zhuohan Li committed
729
730
731
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
732

Zhuohan Li's avatar
Zhuohan Li committed
733
734
735
736
737
738
739
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


if __name__ == "__main__":
740
    args = parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
741
742
743
744
745
746
747
748
749
750
751

    app.add_middleware(
        CORSMiddleware,
        allow_origins=args.allowed_origins,
        allow_credentials=args.allow_credentials,
        allow_methods=args.allowed_methods,
        allow_headers=args.allowed_headers,
    )

    logger.info(f"args: {args}")

752
753
754
755
756
    if args.served_model_name is not None:
        served_model = args.served_model_name
    else:
        served_model = args.model

757
758
    response_role = args.response_role

Zhuohan Li's avatar
Zhuohan Li committed
759
    engine_args = AsyncEngineArgs.from_cli_args(args)
760
    engine = AsyncLLMEngine.from_engine_args(engine_args)
761
    engine_model_config = asyncio.run(engine.get_model_config())
762
    max_model_len = engine_model_config.max_model_len
Zhuohan Li's avatar
Zhuohan Li committed
763
764

    # A separate tokenizer to map token IDs to strings.
765
766
767
768
    tokenizer = get_tokenizer(
        engine_model_config.tokenizer,
        tokenizer_mode=engine_model_config.tokenizer_mode,
        trust_remote_code=engine_model_config.trust_remote_code)
769
    load_chat_template(args, tokenizer)
Zhuohan Li's avatar
Zhuohan Li committed
770

771
772
773
    # Register labels for metrics
    add_global_metrics_labels(model_name=engine_args.model)

774
    app.root_path = args.root_path
775
776
777
778
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="info",
779
780
781
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
                ssl_keyfile=args.ssl_keyfile,
                ssl_certfile=args.ssl_certfile)