api_server.py 24.8 KB
Newer Older
1
2
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/serve/openai_api_server.py
Zhuohan Li's avatar
Zhuohan Li committed
3
4

import argparse
5
import asyncio
Zhuohan Li's avatar
Zhuohan Li committed
6
7
import json
import time
8
9
from http import HTTPStatus
from typing import AsyncGenerator, Dict, List, Optional, Tuple, Union
Zhuohan Li's avatar
Zhuohan Li committed
10
11

import fastapi
12
import uvicorn
13
from fastapi import Request
Zhuohan Li's avatar
Zhuohan Li committed
14
15
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
16
from fastapi.responses import JSONResponse, StreamingResponse, Response
17
from packaging import version
Zhuohan Li's avatar
Zhuohan Li committed
18

Woosuk Kwon's avatar
Woosuk Kwon committed
19
20
21
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.entrypoints.openai.protocol import (
Zhuohan Li's avatar
Zhuohan Li committed
22
    CompletionRequest, CompletionResponse, CompletionResponseChoice,
23
    CompletionResponseStreamChoice, CompletionStreamResponse,
24
25
26
27
    ChatCompletionRequest, ChatCompletionResponse,
    ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
    ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
    LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo)
Woosuk Kwon's avatar
Woosuk Kwon committed
28
29
30
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
31
from vllm.transformers_utils.tokenizer import get_tokenizer
Woosuk Kwon's avatar
Woosuk Kwon committed
32
from vllm.utils import random_uuid
Zhuohan Li's avatar
Zhuohan Li committed
33

34
try:
35
    import fastchat
36
37
38
39
40
41
    from fastchat.conversation import Conversation, SeparatorStyle
    from fastchat.model.model_adapter import get_conversation_template
    _fastchat_available = True
except ImportError:
    _fastchat_available = False

42
TIMEOUT_KEEP_ALIVE = 5  # seconds
Zhuohan Li's avatar
Zhuohan Li committed
43
44
45
46

logger = init_logger(__name__)
served_model = None
app = fastapi.FastAPI()
47
engine = None
Zhuohan Li's avatar
Zhuohan Li committed
48
49
50
51


def create_error_response(status_code: HTTPStatus,
                          message: str) -> JSONResponse:
52
53
54
    return JSONResponse(ErrorResponse(message=message,
                                      type="invalid_request_error").dict(),
                        status_code=status_code.value)
Zhuohan Li's avatar
Zhuohan Li committed
55
56
57


@app.exception_handler(RequestValidationError)
58
async def validation_exception_handler(request, exc):  # pylint: disable=unused-argument
Zhuohan Li's avatar
Zhuohan Li committed
59
60
61
62
63
64
65
66
67
68
69
70
71
    return create_error_response(HTTPStatus.BAD_REQUEST, str(exc))


async def check_model(request) -> Optional[JSONResponse]:
    if request.model == served_model:
        return
    ret = create_error_response(
        HTTPStatus.NOT_FOUND,
        f"The model `{request.model}` does not exist.",
    )
    return ret


72
async def get_gen_prompt(request) -> str:
73
74
75
76
77
    if not _fastchat_available:
        raise ModuleNotFoundError(
            "fastchat is not installed. Please install fastchat to use "
            "the chat completion and conversation APIs: `$ pip install fschat`"
        )
78
79
80
81
82
    if version.parse(fastchat.__version__) < version.parse("0.2.23"):
        raise ImportError(
            f"fastchat version is low. Current version: {fastchat.__version__} "
            "Please upgrade fastchat to use: `$ pip install -U fschat`")

83
    conv = get_conversation_template(request.model)
84
85
    conv = Conversation(
        name=conv.name,
86
87
        system_template=conv.system_template,
        system_message=conv.system_message,
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
        roles=conv.roles,
        messages=list(conv.messages),  # prevent in-place modification
        offset=conv.offset,
        sep_style=SeparatorStyle(conv.sep_style),
        sep=conv.sep,
        sep2=conv.sep2,
        stop_str=conv.stop_str,
        stop_token_ids=conv.stop_token_ids,
    )

    if isinstance(request.messages, str):
        prompt = request.messages
    else:
        for message in request.messages:
            msg_role = message["role"]
            if msg_role == "system":
104
                conv.system_message = message["content"]
105
106
107
108
109
110
111
112
113
114
115
116
117
118
            elif msg_role == "user":
                conv.append_message(conv.roles[0], message["content"])
            elif msg_role == "assistant":
                conv.append_message(conv.roles[1], message["content"])
            else:
                raise ValueError(f"Unknown role: {msg_role}")

        # Add a blank message for the assistant.
        conv.append_message(conv.roles[1], None)
        prompt = conv.get_prompt()

    return prompt


119
120
121
122
123
124
125
126
127
128
129
130
async def check_length(
    request: Union[ChatCompletionRequest, CompletionRequest],
    prompt: Optional[str] = None,
    prompt_ids: Optional[List[int]] = None
) -> Tuple[List[int], Optional[JSONResponse]]:
    assert (not (prompt is None and prompt_ids is None)
            and not (prompt is not None and prompt_ids is not None)
            ), "Either prompt or prompt_ids should be provided."
    if prompt_ids is not None:
        input_ids = prompt_ids
    else:
        input_ids = tokenizer(prompt).input_ids
131
132
    token_num = len(input_ids)

133
134
    if request.max_tokens is None:
        request.max_tokens = max_model_len - token_num
135
    if token_num + request.max_tokens > max_model_len:
136
        return input_ids, create_error_response(
137
            HTTPStatus.BAD_REQUEST,
138
            f"This model's maximum context length is {max_model_len} tokens. "
139
140
141
142
143
144
            f"However, you requested {request.max_tokens + token_num} tokens "
            f"({token_num} in the messages, "
            f"{request.max_tokens} in the completion). "
            f"Please reduce the length of the messages or completion.",
        )
    else:
145
        return input_ids, None
146
147


148
149
150
151
152
153
@app.get("/health")
async def health() -> Response:
    """Health check."""
    return Response(status_code=200)


Zhuohan Li's avatar
Zhuohan Li committed
154
155
156
@app.get("/v1/models")
async def show_available_models():
    """Show available models. Right now we only have one model."""
157
158
159
160
161
    model_cards = [
        ModelCard(id=served_model,
                  root=served_model,
                  permission=[ModelPermission()])
    ]
Zhuohan Li's avatar
Zhuohan Li committed
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
    return ModelList(data=model_cards)


def create_logprobs(token_ids: List[int],
                    id_logprobs: List[Dict[int, float]],
                    initial_text_offset: int = 0) -> LogProbs:
    """Create OpenAI-style logprobs."""
    logprobs = LogProbs()
    last_token_len = 0
    for token_id, id_logprob in zip(token_ids, id_logprobs):
        token = tokenizer.convert_ids_to_tokens(token_id)
        logprobs.tokens.append(token)
        logprobs.token_logprobs.append(id_logprob[token_id])
        if len(logprobs.text_offset) == 0:
            logprobs.text_offset.append(initial_text_offset)
        else:
178
179
            logprobs.text_offset.append(logprobs.text_offset[-1] +
                                        last_token_len)
Zhuohan Li's avatar
Zhuohan Li committed
180
181
        last_token_len = len(token)

182
183
184
185
        logprobs.top_logprobs.append({
            tokenizer.convert_ids_to_tokens(i): p
            for i, p in id_logprob.items()
        })
Zhuohan Li's avatar
Zhuohan Li committed
186
187
188
    return logprobs


189
@app.post("/v1/chat/completions")
190
191
async def create_chat_completion(request: ChatCompletionRequest,
                                 raw_request: Request):
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
    """Completion API similar to OpenAI's API.

    See  https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI ChatCompletion API.

    NOTE: Currently we do not support the following features:
        - function_call (Users should implement this by themselves)
        - logit_bias (to be supported by vLLM engine)
    """
    logger.info(f"Received chat completion request: {request}")

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

207
    if request.logit_bias is not None and len(request.logit_bias) > 0:
208
209
210
211
212
        # TODO: support logit_bias in vLLM engine.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    prompt = await get_gen_prompt(request)
213
    token_ids, error_check_ret = await check_length(request, prompt=prompt)
214
215
216
217
218
    if error_check_ret is not None:
        return error_check_ret

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
219
    created_time = int(time.monotonic())
220
    try:
221
        spaces_between_special_tokens = request.spaces_between_special_tokens
222
223
224
225
226
227
228
        sampling_params = SamplingParams(
            n=request.n,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
            temperature=request.temperature,
            top_p=request.top_p,
            stop=request.stop,
229
            stop_token_ids=request.stop_token_ids,
230
231
232
233
234
            max_tokens=request.max_tokens,
            best_of=request.best_of,
            top_k=request.top_k,
            ignore_eos=request.ignore_eos,
            use_beam_search=request.use_beam_search,
235
            skip_special_tokens=request.skip_special_tokens,
236
            spaces_between_special_tokens=spaces_between_special_tokens,
237
238
239
240
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

241
242
    result_generator = engine.generate(prompt, sampling_params, request_id,
                                       token_ids)
243

244
245
246
247
    def create_stream_response_json(
        index: int,
        text: str,
        finish_reason: Optional[str] = None,
248
        usage: Optional[UsageInfo] = None,
249
    ) -> str:
250
251
252
253
254
255
256
257
258
259
260
        choice_data = ChatCompletionResponseStreamChoice(
            index=index,
            delta=DeltaMessage(content=text),
            finish_reason=finish_reason,
        )
        response = ChatCompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
261
262
263
264
        if usage is not None:
            response.usage = usage
        # exclude unset to leave details out of each sse
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
265
266
267
268
269
270
271
272
273
274
275

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        # First chunk with role
        for i in range(request.n):
            choice_data = ChatCompletionResponseStreamChoice(
                index=i,
                delta=DeltaMessage(role="assistant"),
                finish_reason=None,
            )
276
277
278
279
280
            chunk = ChatCompletionStreamResponse(id=request_id,
                                                 choices=[choice_data],
                                                 model=model_name)
            data = chunk.json(exclude_unset=True, ensure_ascii=False)
            yield f"data: {data}\n\n"
281
282
283
284
285
286
287
288
289

        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
                previous_texts[i] = output.text
290
291
                completion_tokens = len(output.token_ids)
                previous_num_tokens[i] = completion_tokens
292
293
294
295
296
297
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
298
299
300
301
302
303
                    prompt_tokens = len(res.prompt_token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
304
305
306
307
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        finish_reason=output.finish_reason,
308
                        usage=final_usage,
309
310
                    )
                    yield f"data: {response_json}\n\n"
311
        yield "data: [DONE]\n\n"
312
313
314
315

    # Streaming response
    if request.stream:
        return StreamingResponse(completion_stream_generator(),
316
                                 media_type="text/event-stream")
317
318
319
320
321
322

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
323
            await engine.abort(request_id)
324
325
326
327
328
329
330
331
332
333
334
335
336
337
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
        final_res = res
    assert final_res is not None
    choices = []
    for output in final_res.outputs:
        choice_data = ChatCompletionResponseChoice(
            index=output.index,
            message=ChatMessage(role="assistant", content=output.text),
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
338
339
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = ChatCompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
357

358
359
360
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
361

362
363
364
365
366
367
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


Zhuohan Li's avatar
Zhuohan Li committed
368
@app.post("/v1/completions")
369
async def create_completion(request: CompletionRequest, raw_request: Request):
370
371
372
373
374
375
    """Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/completions/create
    for the API specification. This API mimics the OpenAI Completion API.

    NOTE: Currently we do not support the following features:
Woosuk Kwon's avatar
Woosuk Kwon committed
376
        - echo (since the vLLM engine does not currently support
377
378
379
          getting the logprobs of prompt tokens)
        - suffix (the language models we currently support do not support
          suffix)
Woosuk Kwon's avatar
Woosuk Kwon committed
380
        - logit_bias (to be supported by vLLM engine)
381
    """
Zhuohan Li's avatar
Zhuohan Li committed
382
383
384
385
386
387
388
    logger.info(f"Received completion request: {request}")

    error_check_ret = await check_model(request)
    if error_check_ret is not None:
        return error_check_ret

    if request.echo:
Woosuk Kwon's avatar
Woosuk Kwon committed
389
        # We do not support echo since the vLLM engine does not
Zhuohan Li's avatar
Zhuohan Li committed
390
391
392
393
394
395
396
        # currently support getting the logprobs of prompt tokens.
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "echo is not currently supported")

    if request.suffix is not None:
        # The language models we currently support do not support suffix.
        return create_error_response(HTTPStatus.BAD_REQUEST,
397
                                     "suffix is not currently supported")
Zhuohan Li's avatar
Zhuohan Li committed
398

399
    if request.logit_bias is not None and len(request.logit_bias) > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
400
        # TODO: support logit_bias in vLLM engine.
Zhuohan Li's avatar
Zhuohan Li committed
401
402
403
404
405
        return create_error_response(HTTPStatus.BAD_REQUEST,
                                     "logit_bias is not currently supported")

    model_name = request.model
    request_id = f"cmpl-{random_uuid()}"
406
407

    use_token_ids = False
408
    if isinstance(request.prompt, list):
409
410
411
        if len(request.prompt) == 0:
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "please provide at least one prompt")
412
413
414
415
416
417
418
419
420
421
422
423
        first_element = request.prompt[0]
        if isinstance(first_element, int):
            use_token_ids = True
            prompt = request.prompt
        elif isinstance(first_element, (str, list)):
            # TODO: handles multiple prompt case in list[list[int]]
            if len(request.prompt) > 1:
                return create_error_response(
                    HTTPStatus.BAD_REQUEST,
                    "multiple prompts in a batch is not currently supported")
            use_token_ids = not isinstance(first_element, str)
            prompt = request.prompt[0]
424
425
    else:
        prompt = request.prompt
426

427
428
429
430
    if use_token_ids:
        _, error_check_ret = await check_length(request, prompt_ids=prompt)
    else:
        token_ids, error_check_ret = await check_length(request, prompt=prompt)
431
432
433
    if error_check_ret is not None:
        return error_check_ret

434
    created_time = int(time.monotonic())
Zhuohan Li's avatar
Zhuohan Li committed
435
    try:
436
        spaces_between_special_tokens = request.spaces_between_special_tokens
Zhuohan Li's avatar
Zhuohan Li committed
437
438
439
440
441
442
443
444
445
        sampling_params = SamplingParams(
            n=request.n,
            best_of=request.best_of,
            presence_penalty=request.presence_penalty,
            frequency_penalty=request.frequency_penalty,
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k,
            stop=request.stop,
446
            stop_token_ids=request.stop_token_ids,
Zhuohan Li's avatar
Zhuohan Li committed
447
448
449
450
            ignore_eos=request.ignore_eos,
            max_tokens=request.max_tokens,
            logprobs=request.logprobs,
            use_beam_search=request.use_beam_search,
451
            skip_special_tokens=request.skip_special_tokens,
452
            spaces_between_special_tokens=spaces_between_special_tokens,
Zhuohan Li's avatar
Zhuohan Li committed
453
454
455
456
        )
    except ValueError as e:
        return create_error_response(HTTPStatus.BAD_REQUEST, str(e))

457
458
459
460
461
462
463
464
    if use_token_ids:
        result_generator = engine.generate(None,
                                           sampling_params,
                                           request_id,
                                           prompt_token_ids=prompt)
    else:
        result_generator = engine.generate(prompt, sampling_params, request_id,
                                           token_ids)
Zhuohan Li's avatar
Zhuohan Li committed
465
466
467

    # Similar to the OpenAI API, when n != best_of, we do not stream the
    # results. In addition, we do not stream the results when use beam search.
468
469
470
    stream = (request.stream
              and (request.best_of is None or request.n == request.best_of)
              and not request.use_beam_search)
Zhuohan Li's avatar
Zhuohan Li committed
471

472
473
474
475
476
    def create_stream_response_json(
        index: int,
        text: str,
        logprobs: Optional[LogProbs] = None,
        finish_reason: Optional[str] = None,
477
        usage: Optional[UsageInfo] = None,
478
    ) -> str:
Zhuohan Li's avatar
Zhuohan Li committed
479
480
481
482
483
484
485
486
487
488
489
490
        choice_data = CompletionResponseStreamChoice(
            index=index,
            text=text,
            logprobs=logprobs,
            finish_reason=finish_reason,
        )
        response = CompletionStreamResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=[choice_data],
        )
491
492
493
        if usage is not None:
            response.usage = usage
        response_json = response.json(exclude_unset=True, ensure_ascii=False)
Zhuohan Li's avatar
Zhuohan Li committed
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520

        return response_json

    async def completion_stream_generator() -> AsyncGenerator[str, None]:
        previous_texts = [""] * request.n
        previous_num_tokens = [0] * request.n
        async for res in result_generator:
            res: RequestOutput
            for output in res.outputs:
                i = output.index
                delta_text = output.text[len(previous_texts[i]):]
                if request.logprobs is not None:
                    logprobs = create_logprobs(
                        output.token_ids[previous_num_tokens[i]:],
                        output.logprobs[previous_num_tokens[i]:],
                        len(previous_texts[i]))
                else:
                    logprobs = None
                previous_texts[i] = output.text
                previous_num_tokens[i] = len(output.token_ids)
                response_json = create_stream_response_json(
                    index=i,
                    text=delta_text,
                    logprobs=logprobs,
                )
                yield f"data: {response_json}\n\n"
                if output.finish_reason is not None:
521
522
                    logprobs = (LogProbs()
                                if request.logprobs is not None else None)
523
524
525
526
527
528
529
                    prompt_tokens = len(res.prompt_token_ids)
                    completion_tokens = len(output.token_ids)
                    final_usage = UsageInfo(
                        prompt_tokens=prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=prompt_tokens + completion_tokens,
                    )
Zhuohan Li's avatar
Zhuohan Li committed
530
531
532
533
534
                    response_json = create_stream_response_json(
                        index=i,
                        text="",
                        logprobs=logprobs,
                        finish_reason=output.finish_reason,
535
                        usage=final_usage,
Zhuohan Li's avatar
Zhuohan Li committed
536
537
                    )
                    yield f"data: {response_json}\n\n"
538
        yield "data: [DONE]\n\n"
Zhuohan Li's avatar
Zhuohan Li committed
539
540
541
542

    # Streaming response
    if stream:
        return StreamingResponse(completion_stream_generator(),
543
                                 media_type="text/event-stream")
Zhuohan Li's avatar
Zhuohan Li committed
544
545
546
547

    # Non-streaming response
    final_res: RequestOutput = None
    async for res in result_generator:
548
549
        if await raw_request.is_disconnected():
            # Abort the request if the client disconnects.
550
            await engine.abort(request_id)
551
552
            return create_error_response(HTTPStatus.BAD_REQUEST,
                                         "Client disconnected")
Zhuohan Li's avatar
Zhuohan Li committed
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
        final_res = res
    assert final_res is not None
    choices = []
    for output in final_res.outputs:
        if request.logprobs is not None:
            logprobs = create_logprobs(output.token_ids, output.logprobs)
        else:
            logprobs = None
        choice_data = CompletionResponseChoice(
            index=output.index,
            text=output.text,
            logprobs=logprobs,
            finish_reason=output.finish_reason,
        )
        choices.append(choice_data)

    num_prompt_tokens = len(final_res.prompt_token_ids)
570
571
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
Zhuohan Li's avatar
Zhuohan Li committed
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        completion_tokens=num_generated_tokens,
        total_tokens=num_prompt_tokens + num_generated_tokens,
    )
    response = CompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
    )

    if request.stream:
        # When user requests streaming but we don't stream, we still need to
        # return a streaming response with a single event.
        response_json = response.json(ensure_ascii=False)
589

Zhuohan Li's avatar
Zhuohan Li committed
590
591
592
        async def fake_stream_generator() -> AsyncGenerator[str, None]:
            yield f"data: {response_json}\n\n"
            yield "data: [DONE]\n\n"
593

Zhuohan Li's avatar
Zhuohan Li committed
594
595
596
597
598
599
600
601
        return StreamingResponse(fake_stream_generator(),
                                 media_type="text/event-stream")

    return response


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
602
        description="vLLM OpenAI-Compatible RESTful API server.")
603
    parser.add_argument("--host", type=str, default=None, help="host name")
Zhuohan Li's avatar
Zhuohan Li committed
604
    parser.add_argument("--port", type=int, default=8000, help="port number")
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
    parser.add_argument("--allow-credentials",
                        action="store_true",
                        help="allow credentials")
    parser.add_argument("--allowed-origins",
                        type=json.loads,
                        default=["*"],
                        help="allowed origins")
    parser.add_argument("--allowed-methods",
                        type=json.loads,
                        default=["*"],
                        help="allowed methods")
    parser.add_argument("--allowed-headers",
                        type=json.loads,
                        default=["*"],
                        help="allowed headers")
620
621
622
623
624
625
    parser.add_argument("--served-model-name",
                        type=str,
                        default=None,
                        help="The model name used in the API. If not "
                        "specified, the model name will be the same as "
                        "the huggingface name.")
626

Zhuohan Li's avatar
Zhuohan Li committed
627
    parser = AsyncEngineArgs.add_cli_args(parser)
Zhuohan Li's avatar
Zhuohan Li committed
628
629
630
631
632
633
634
635
636
637
638
639
    args = parser.parse_args()

    app.add_middleware(
        CORSMiddleware,
        allow_origins=args.allowed_origins,
        allow_credentials=args.allow_credentials,
        allow_methods=args.allowed_methods,
        allow_headers=args.allowed_headers,
    )

    logger.info(f"args: {args}")

640
641
642
643
644
    if args.served_model_name is not None:
        served_model = args.served_model_name
    else:
        served_model = args.model

Zhuohan Li's avatar
Zhuohan Li committed
645
    engine_args = AsyncEngineArgs.from_cli_args(args)
646
    engine = AsyncLLMEngine.from_engine_args(engine_args)
647
    engine_model_config = asyncio.run(engine.get_model_config())
648
    max_model_len = engine_model_config.max_model_len
Zhuohan Li's avatar
Zhuohan Li committed
649
650

    # A separate tokenizer to map token IDs to strings.
651
    tokenizer = get_tokenizer(engine_args.tokenizer,
652
653
                              tokenizer_mode=engine_args.tokenizer_mode,
                              trust_remote_code=engine_args.trust_remote_code)
Zhuohan Li's avatar
Zhuohan Li committed
654

655
656
657
658
    uvicorn.run(app,
                host=args.host,
                port=args.port,
                log_level="info",
659
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE)