api_server.py 28.7 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/serve/openai_api_server.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4

import argparse
5
import asyncio
6
import codecs
Zhuohan Li's avatar
Zhuohan Li committed
7
8
import json
import time
9
10
from http import HTTPStatus
from typing import AsyncGenerator, Dict, List, Optional, Tuple, Union
Zhuohan Li's avatar
Zhuohan Li committed
11
12

import fastapi
13
import uvicorn
14
from fastapi import Request
Zhuohan Li's avatar
Zhuohan Li committed
15
16
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
17
from fastapi.responses import JSONResponse, StreamingResponse, Response
Zhuohan Li's avatar
Zhuohan Li committed
18

Woosuk Kwon's avatar
Woosuk Kwon committed
19
20
21
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.entrypoints.openai.protocol import (
Zhuohan Li's avatar
Zhuohan Li committed
22
    CompletionRequest, CompletionResponse, CompletionResponseChoice,
23
    CompletionResponseStreamChoice, CompletionStreamResponse,
24
25
26
27
    ChatCompletionRequest, ChatCompletionResponse,
    ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
    LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo)
Woosuk Kwon's avatar
Woosuk Kwon committed
28
29
30
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
31
from vllm.transformers_utils.tokenizer import get_tokenizer
Woosuk Kwon's avatar
Woosuk Kwon committed
32
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
33

34
TIMEOUT_KEEP_ALIVE = 5  # seconds
Zhuohan Li's avatar
Zhuohan Li committed
35
36
37
38

logger = init_logger(__name__)
served_model = None
app = fastapi.FastAPI()
39
engine = None
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
response_role = None


def parse_args():
    parser = argparse.ArgumentParser(
        description="vLLM OpenAI-Compatible RESTful API server.")
    parser.add_argument("--host", type=str, default=None, help="host name")
    parser.add_argument("--port", type=int, default=8000, help="port number")
    parser.add_argument("--allow-credentials",
                        action="store_true",
                        help="allow credentials")
    parser.add_argument("--allowed-origins",
                        type=json.loads,
                        default=["*"],
                        help="allowed origins")
    parser.add_argument("--allowed-methods",
                        type=json.loads,
                        default=["*"],
                        help="allowed methods")
    parser.add_argument("--allowed-headers",
                        type=json.loads,
                        default=["*"],
                        help="allowed headers")
    parser.add_argument("--served-model-name",
                        type=str,
                        default=None,
                        help="The model name used in the API. If not "
                        "specified, the model name will be the same as "
                        "the huggingface name.")
    parser.add_argument("--chat-template",
                        type=str,
                        default=None,
                        help="The file path to the chat template, "
                        "or the template in single-line form "
                        "for the specified model")
    parser.add_argument("--response-role",
                        type=str,
                        default="assistant",
                        help="The role name to return if "
                        "`request.add_generation_prompt=true`.")

    parser = AsyncEngineArgs.add_cli_args(parser)
    return parser.parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
83
84
85
86


def create_error_response(status_code: HTTPStatus,
                          message: str) -> JSONResponse:
87
88
89
    return JSONResponse(ErrorResponse(message=message,
                                      type="invalid_request_error").dict(),
                        status_code=status_code.value)
Zhuohan Li's avatar
Zhuohan Li committed
90
91


92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
def load_chat_template(args, tokenizer):
    if args.chat_template is not None:
        try:
            with open(args.chat_template, "r") as f:
                chat_template = f.read()
        except OSError:
            # If opening a file fails, set chat template to be args to
            # ensure we decode so our escape are interpreted correctly
            chat_template = codecs.decode(args.chat_template, "unicode_escape")

        tokenizer.chat_template = chat_template
        logger.info(
            f"Using supplied chat template:\n{tokenizer.chat_template}")
    elif tokenizer.chat_template is not None:
        logger.info(f"Using default chat template:\n{tokenizer.chat_template}")
    else:
        logger.warning("No chat template provided. Chat API will not work.")


Zhuohan Li's avatar
Zhuohan Li committed
111
@app.exception_handler(RequestValidationError)
112
async def validation_exception_handler(_, exc):
Zhuohan Li's avatar
Zhuohan Li committed
113
114
115
116
117
118
119
120
121
122
123
124
125
    return create_error_response(HTTPStatus.BAD_REQUEST, str(exc))


async def check_model(request) -> Optional[JSONResponse]:
    if request.model == served_model:
        return
    ret = create_error_response(
        HTTPStatus.NOT_FOUND,
        f"The model `{request.model}` does not exist.",
    )
    return ret


126
127
128
129
130
131
132
133
async def check_length(
    request: Union[ChatCompletionRequest, CompletionRequest],
    prompt: Optional[str] = None,
    prompt_ids: Optional[List[int]] = None
) -> Tuple[List[int], Optional[JSONResponse]]:
    assert (not (prompt is None and prompt_ids is None)
            and not (prompt is not None and prompt_ids is not None)
            ), "Either prompt or prompt_ids should be provided."
134
135
    input_ids = prompt_ids if prompt_ids is not None else tokenizer(
        prompt).input_ids
136
137
    token_num = len(input_ids)

138
139
    if request.max_tokens is None:
        request.max_tokens = max_model_len - token_num
140
    if token_num + request.max_tokens > max_model_len:
141
        return input_ids, create_error_response(
142
            HTTPStatus.BAD_REQUEST,
143
            f"This model's maximum context length is {max_model_len} tokens. "
144
145
146
147
148
149
            f"However, you requested {request.max_tokens + token_num} tokens "
            f"({token_num} in the messages, "
            f"{request.max_tokens} in the completion). "
            f"Please reduce the length of the messages or completion.",
        )
    else:
150
        return input_ids, None
151
152


153
154
155
156
157
158
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Zhuohan Li's avatar
Zhuohan Li committed
159
160
161
@app.get("/v1/models")
async def show_available_models():
    """Show available models. Right now we only have one model."""
162
163
164
165
166
    model_cards = [
        ModelCard(id=served_model,
                  root=served_model,
                  permission=[ModelPermission()])
    ]
Zhuohan Li's avatar
Zhuohan Li committed
167
168
169
    return ModelList(data=model_cards)


170
171
172
173
174
175
def create_logprobs(
    token_ids: List[int],
    top_logprobs: Optional[List[Optional[Dict[int, float]]]] = None,
    num_output_top_logprobs: Optional[int] = None,
    initial_text_offset: int = 0,
) -> LogProbs:
Zhuohan Li's avatar
Zhuohan Li committed
176
177
178
    """Create OpenAI-style logprobs."""
    logprobs = LogProbs()
    last_token_len = 0
179
180
181
182
183
184
185
186
    if num_output_top_logprobs:
        logprobs.top_logprobs = []
    for i, token_id in enumerate(token_ids):
        step_top_logprobs = top_logprobs[i]
        if step_top_logprobs is not None:
            token_logprob = step_top_logprobs[token_id]
        else:
            token_logprob = None
Zhuohan Li's avatar
Zhuohan Li committed
187
188
        token = tokenizer.convert_ids_to_tokens(token_id)
        logprobs.tokens.append(token)
189
        logprobs.token_logprobs.append(token_logprob)
Zhuohan Li's avatar
Zhuohan Li committed
190
191
192
        if len(logprobs.text_offset) == 0:
            logprobs.text_offset.append(initial_text_offset)
        else:
193
194
            logprobs.text_offset.append(logprobs.text_offset[-1] +
                                        last_token_len)
Zhuohan Li's avatar
Zhuohan Li committed
195
196
        last_token_len = len(token)

197
198
199
200
201
        if num_output_top_logprobs:
            logprobs.top_logprobs.append({
                tokenizer.convert_ids_to_tokens(i): p
                for i, p in step_top_logprobs.items()
            } if step_top_logprobs else None)
Zhuohan Li's avatar
Zhuohan Li committed
202
203
204
    return logprobs


205
@app.post("/v1/chat/completions")
206
207
async def create_chat_completion(request: ChatCompletionRequest,
                                 raw_request: Request):
208
209
210
211
212
213
214
215
216
217
218
219
220
    """Completion API similar to OpenAI's API.

    See  https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI ChatCompletion API.

    NOTE: Currently we do not support the following features:
        - function_call (Users should implement this by themselves)
        - logit_bias (to be supported by vLLM engine)
    """
    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

221
    if request.logit_bias is not None and len(request.logit_bias) > 0:
222
223
224
225
        # TODO: support logit_bias in vLLM engine.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

226
227
228
229
230
231
232
233
234
    try:
        prompt = tokenizer.apply_chat_template(
            conversation=request.messages,
            tokenize=False,
            add_generation_prompt=request.add_generation_prompt)
    except Exception as e:
        logger.error(f"Error in applying chat template from request: {str(e)}")
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

235
    token_ids, error_check_ret = await check_length(request, prompt=prompt)
236
237
238
239
240
    if error_check_ret is not None:
        return error_check_ret

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
241
    created_time = int(time.monotonic())
242
    chunk_object_type = "chat.completion.chunk"
243
    try:
244
        spaces_between_special_tokens = request.spaces_between_special_tokens
245
246
247
248
249
250
251
        sampling_params = SamplingParams(
            n=request.n,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
            temperature=request.temperature,
            top_p=request.top_p,
            stop=request.stop,
252
            stop_token_ids=request.stop_token_ids,
253
254
255
256
257
            max_tokens=request.max_tokens,
            best_of=request.best_of,
            top_k=request.top_k,
            ignore_eos=request.ignore_eos,
            use_beam_search=request.use_beam_search,
258
            skip_special_tokens=request.skip_special_tokens,
259
            spaces_between_special_tokens=spaces_between_special_tokens,
260
261
262
263
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

264
265
    result_generator = engine.generate(prompt, sampling_params, request_id,
                                       token_ids)
266

267
268
269
270
271
    def get_role() -> str:
        if request.add_generation_prompt:
            return response_role
        else:
            return request.messages[-1]["role"]
272
273

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
274
275
        # Send first response for each request.n (index) with the role
        role = get_role()
276
277
        for i in range(request.n):
            choice_data = ChatCompletionResponseStreamChoice(
278
                index=i, delta=DeltaMessage(role=role), finish_reason=None)
279
            chunk = ChatCompletionStreamResponse(id=request_id,
280
281
                                                 object=chunk_object_type,
                                                 created=created_time,
282
283
284
285
                                                 choices=[choice_data],
                                                 model=model_name)
            data = chunk.json(exclude_unset=True, ensure_ascii=False)
            yield f"data: {data}\n\n"
286

287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
        # Send response to echo the input portion of the last message
        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]
            if last_msg_content:
                for i in range(request.n):
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=last_msg_content),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"

        # Send response for each token for each request.n (index)
311
312
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
313
        finish_reason_sent = [False] * request.n
314
315
316
317
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341

                if finish_reason_sent[i]:
                    continue

                if output.finish_reason is None:
                    # Send token-by-token response for each request.n
                    delta_text = output.text[len(previous_texts[i]):]
                    previous_texts[i] = output.text
                    completion_tokens = len(output.token_ids)
                    previous_num_tokens[i] = completion_tokens
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(content=delta_text),
                        finish_reason=None)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    data = chunk.json(exclude_unset=True, ensure_ascii=False)
                    yield f"data: {data}\n\n"
                else:
                    # Send the finish response for each request.n only once
342
343
344
345
346
347
                    prompt_tokens = len(res.prompt_token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i, delta=[], finish_reason=output.finish_reason)
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)
                    if final_usage is not None:
                        chunk.usage = final_usage
                    data = chunk.json(exclude_unset=True,
                                      exclude_none=True,
                                      ensure_ascii=False)
                    yield f"data: {data}\n\n"
                    finish_reason_sent[i] = True
        # Send the final done message after all response.n are finished
364
        yield "data: [DONE]\n\n"
365

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
    async def completion_full_generator():
        final_res: RequestOutput = None
        async for res in result_generator:
            if await raw_request.is_disconnected():
                # Abort the request if the client disconnects.
                await engine.abort(request_id)
                return create_error_response(HTTPStatus.BAD_REQUEST,
                                             "Client disconnected")
            final_res = res
        assert final_res is not None

        choices = []
        role = get_role()
        for output in final_res.outputs:
            choice_data = ChatCompletionResponseChoice(
                index=output.index,
                message=ChatMessage(role=role, content=output.text),
                finish_reason=output.finish_reason,
            )
            choices.append(choice_data)

        if request.echo:
            last_msg_content = ""
            if request.messages and isinstance(
                    request.messages, list) and request.messages[-1].get(
                        "content") and request.messages[-1].get(
                            "role") == role:
                last_msg_content = request.messages[-1]["content"]

            for choice in choices:
                full_message = last_msg_content + choice.message.content
                choice.message.content = full_message

        num_prompt_tokens = len(final_res.prompt_token_ids)
        num_generated_tokens = sum(
            len(output.token_ids) for output in final_res.outputs)
        usage = UsageInfo(
            prompt_tokens=num_prompt_tokens,
            completion_tokens=num_generated_tokens,
            total_tokens=num_prompt_tokens + num_generated_tokens,
        )
        response = ChatCompletionResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=choices,
            usage=usage,
413
414
        )

415
        return response
416

417
    # Streaming response
418
    if request.stream:
419
        return StreamingResponse(completion_stream_generator(),
420
                                 media_type="text/event-stream")
421
422
    else:
        return await completion_full_generator()
423
424


Zhuohan Li's avatar
Zhuohan Li committed
425
@app.post("/v1/completions")
426
async def create_completion(request: CompletionRequest, raw_request: Request):
427
428
429
430
431
432
433
434
    """Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/completions/create
    for the API specification. This API mimics the OpenAI Completion API.

    NOTE: Currently we do not support the following features:
        - suffix (the language models we currently support do not support
          suffix)
Woosuk Kwon's avatar
Woosuk Kwon committed
435
        - logit_bias (to be supported by vLLM engine)
436
    """
Zhuohan Li's avatar
Zhuohan Li committed
437
438
439
440
441

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

442
443
    # OpenAI API supports echoing the prompt when max_tokens is 0.
    echo_without_generation = request.echo and request.max_tokens == 0
Zhuohan Li's avatar
Zhuohan Li committed
444
445
446
447

    if request.suffix is not None:
        # The language models we currently support do not support suffix.
        return create_error_response(HTTPStatus.BAD_REQUEST,
448
                                     "suffix is not currently supported")
Zhuohan Li's avatar
Zhuohan Li committed
449

450
    if request.logit_bias is not None and len(request.logit_bias) > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
451
        # TODO: support logit_bias in vLLM engine.
Zhuohan Li's avatar
Zhuohan Li committed
452
453
454
455
456
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
457
458

    use_token_ids = False
459
    if isinstance(request.prompt, list):
460
461
462
        if len(request.prompt) == 0:
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "please provide at least one prompt")
463
464
465
466
467
468
469
470
471
472
473
474
        first_element = request.prompt[0]
        if isinstance(first_element, int):
            use_token_ids = True
            prompt = request.prompt
        elif isinstance(first_element, (str, list)):
            # TODO: handles multiple prompt case in list[list[int]]
            if len(request.prompt) > 1:
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "multiple prompts in a batch is not currently supported")
            use_token_ids = not isinstance(first_element, str)
            prompt = request.prompt[0]
475
476
    else:
        prompt = request.prompt
477

478
479
480
481
    if use_token_ids:
        _, error_check_ret = await check_length(request, prompt_ids=prompt)
    else:
        token_ids, error_check_ret = await check_length(request, prompt=prompt)
482
483
484
    if error_check_ret is not None:
        return error_check_ret

485
    created_time = int(time.monotonic())
Zhuohan Li's avatar
Zhuohan Li committed
486
    try:
487
        spaces_between_special_tokens = request.spaces_between_special_tokens
Zhuohan Li's avatar
Zhuohan Li committed
488
489
490
491
492
493
494
495
496
        sampling_params = SamplingParams(
            n=request.n,
            best_of=request.best_of,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
            stop=request.stop,
497
            stop_token_ids=request.stop_token_ids,
Zhuohan Li's avatar
Zhuohan Li committed
498
            ignore_eos=request.ignore_eos,
499
500
            max_tokens=request.max_tokens
            if not echo_without_generation else 1,
Zhuohan Li's avatar
Zhuohan Li committed
501
502
            logprobs=request.logprobs,
            use_beam_search=request.use_beam_search,
503
            prompt_logprobs=request.logprobs if request.echo else None,
504
            skip_special_tokens=request.skip_special_tokens,
505
            spaces_between_special_tokens=spaces_between_special_tokens,
Zhuohan Li's avatar
Zhuohan Li committed
506
507
508
509
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

510
511
512
513
514
515
516
517
    if use_token_ids:
        result_generator = engine.generate(None,
                                           sampling_params,
                                           request_id,
                                           prompt_token_ids=prompt)
    else:
        result_generator = engine.generate(prompt, sampling_params, request_id,
                                           token_ids)
Zhuohan Li's avatar
Zhuohan Li committed
518
519
520

    # Similar to the OpenAI API, when n != best_of, we do not stream the
    # results. In addition, we do not stream the results when use beam search.
521
522
523
    stream = (request.stream
              and (request.best_of is None or request.n == request.best_of)
              and not request.use_beam_search)
Zhuohan Li's avatar
Zhuohan Li committed
524

525
526
527
528
529
    def create_stream_response_json(
        index: int,
        text: str,
        logprobs: Optional[LogProbs] = None,
        finish_reason: Optional[str] = None,
530
        usage: Optional[UsageInfo] = None,
531
    ) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
532
533
534
535
536
537
538
539
540
541
542
543
        choice_data = CompletionResponseStreamChoice(
            index=index,
            text=text,
            logprobs=logprobs,
            finish_reason=finish_reason,
        )
        response = CompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
544
545
546
        if usage is not None:
            response.usage = usage
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
Zhuohan Li's avatar
Zhuohan Li committed
547
548
549
550
551
552

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
553
        has_echoed = [False] * request.n
Zhuohan Li's avatar
Zhuohan Li committed
554
555
556
557
558
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
559
560
561
562
563
564
565
566
567
568
569
570
571
                token_ids = output.token_ids[previous_num_tokens[i]:]
                top_logprobs = output.logprobs[previous_num_tokens[i]:]
                offsets = len(previous_texts[i])
                if request.echo and not has_echoed[i]:
                    if not echo_without_generation:
                        delta_text = res.prompt + delta_text
                        token_ids = res.prompt_token_ids + token_ids
                        top_logprobs = res.prompt_logprobs + top_logprobs
                    else:
                        delta_text = res.prompt
                        token_ids = res.prompt_token_ids
                        top_logprobs = res.prompt_logprobs
                    has_echoed[i] = True
Zhuohan Li's avatar
Zhuohan Li committed
572
573
                if request.logprobs is not None:
                    logprobs = create_logprobs(
574
575
576
577
578
                        token_ids=token_ids,
                        top_logprobs=top_logprobs,
                        num_output_top_logprobs=request.logprobs,
                        initial_text_offset=offsets,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
579
580
581
582
                else:
                    logprobs = None
                previous_texts[i] = output.text
                previous_num_tokens[i] = len(output.token_ids)
583
                finish_reason = output.finish_reason
Zhuohan Li's avatar
Zhuohan Li committed
584
585
586
587
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                    logprobs=logprobs,
588
                    finish_reason=finish_reason,
Zhuohan Li's avatar
Zhuohan Li committed
589
590
591
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
592
593
                    logprobs = (LogProbs()
                                if request.logprobs is not None else None)
594
595
596
597
598
599
600
                    prompt_tokens = len(res.prompt_token_ids)
                    completion_tokens = len(output.token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
601
602
603
604
605
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        logprobs=logprobs,
                        finish_reason=output.finish_reason,
606
                        usage=final_usage,
Zhuohan Li's avatar
Zhuohan Li committed
607
608
                    )
                    yield f"data: {response_json}\n\n"
609
        yield "data: [DONE]\n\n"
Zhuohan Li's avatar
Zhuohan Li committed
610
611
612
613

    # Streaming response
    if stream:
        return StreamingResponse(completion_stream_generator(),
614
                                 media_type="text/event-stream")
Zhuohan Li's avatar
Zhuohan Li committed
615
616
617
618

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
619
620
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
621
            await engine.abort(request_id)
622
623
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
Zhuohan Li's avatar
Zhuohan Li committed
624
625
626
        final_res = res
    assert final_res is not None
    choices = []
627
628
629
    prompt_token_ids = final_res.prompt_token_ids
    prompt_logprobs = final_res.prompt_logprobs
    prompt_text = final_res.prompt
Zhuohan Li's avatar
Zhuohan Li committed
630
631
    for output in final_res.outputs:
        if request.logprobs is not None:
632
633
634
635
636
637
638
639
640
641
642
643
644
645
            if not echo_without_generation:
                token_ids = output.token_ids
                top_logprobs = output.logprobs
                if request.echo:
                    token_ids = prompt_token_ids + token_ids
                    top_logprobs = prompt_logprobs + top_logprobs
            else:
                token_ids = prompt_token_ids
                top_logprobs = prompt_logprobs
            logprobs = create_logprobs(
                token_ids=token_ids,
                top_logprobs=top_logprobs,
                num_output_top_logprobs=request.logprobs,
            )
Zhuohan Li's avatar
Zhuohan Li committed
646
647
        else:
            logprobs = None
648
649
650
651
652
653
        if not echo_without_generation:
            output_text = output.text
            if request.echo:
                output_text = prompt_text + output_text
        else:
            output_text = prompt_text
Zhuohan Li's avatar
Zhuohan Li committed
654
655
        choice_data = CompletionResponseChoice(
            index=output.index,
656
            text=output_text,
Zhuohan Li's avatar
Zhuohan Li committed
657
658
659
660
661
662
            logprobs=logprobs,
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
663
664
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
Zhuohan Li's avatar
Zhuohan Li committed
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = CompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
682

Zhuohan Li's avatar
Zhuohan Li committed
683
684
685
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
686

Zhuohan Li's avatar
Zhuohan Li committed
687
688
689
690
691
692
693
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


if __name__ == "__main__":
694
    args = parse_args()
Zhuohan Li's avatar
Zhuohan Li committed
695
696
697
698
699
700
701
702
703
704
705

    app.add_middleware(
        CORSMiddleware,
        allow_origins=args.allowed_origins,
        allow_credentials=args.allow_credentials,
        allow_methods=args.allowed_methods,
        allow_headers=args.allowed_headers,
    )

    logger.info(f"args: {args}")

706
707
708
709
710
    if args.served_model_name is not None:
        served_model = args.served_model_name
    else:
        served_model = args.model

711
712
    response_role = args.response_role

Zhuohan Li's avatar
Zhuohan Li committed
713
    engine_args = AsyncEngineArgs.from_cli_args(args)
714
    engine = AsyncLLMEngine.from_engine_args(engine_args)
715
    engine_model_config = asyncio.run(engine.get_model_config())
716
    max_model_len = engine_model_config.max_model_len
Zhuohan Li's avatar
Zhuohan Li committed
717
718

    # A separate tokenizer to map token IDs to strings.
719
720
721
722
    tokenizer = get_tokenizer(
        engine_model_config.tokenizer,
        tokenizer_mode=engine_model_config.tokenizer_mode,
        trust_remote_code=engine_model_config.trust_remote_code)
723
    load_chat_template(args, tokenizer)
Zhuohan Li's avatar
Zhuohan Li committed
724

725
726
727
728
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="info",
729
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE)