import argparse import asyncio from http import HTTPStatus import json import time from typing import AsyncGenerator, Dict, List, Optional from packaging import version import fastapi from fastapi import BackgroundTasks, Request from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, StreamingResponse from fastchat.conversation import Conversation, SeparatorStyle from fastchat.model.model_adapter import get_conversation_template import uvicorn from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine import sys import os parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_dir) from openai_api_protocol_vllm import ( CompletionRequest, CompletionResponse, CompletionResponseChoice, CompletionResponseStreamChoice, CompletionStreamResponse, ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo) from vllm.logger import init_logger from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.transformers_utils.tokenizer import get_tokenizer from vllm.utils import random_uuid TIMEOUT_KEEP_ALIVE = 5 # seconds logger = init_logger(__name__) served_model = None app = fastapi.FastAPI() from fastchat.conversation import register_conv_template, get_conv_template from fastchat.model.model_adapter import BaseModelAdapter, model_adapters import fastchat def compare_version(version1, version2): # if v1 >= v2, return True, else return False v1 = version.parse(version1) v2 = version.parse(version2) return v1 >= v2 if compare_version(fastchat.__version__, '0.2.23'): use_old_conversation = False else: use_old_conversation = True def getConversation(name, system, roles, messages, offset, sep_style, sep, sep2=None, stop_str=None, stop_token_ids=None): if not use_old_conversation: return Conversation( name=name, system_message=system, roles=roles, messages=messages, offset=offset, sep_style=sep_style, sep=sep, sep2=sep2, stop_str=stop_str, stop_token_ids=stop_token_ids ) else: return Conversation( name=name, system=system, roles=roles, messages=messages, offset=offset, sep_style=sep_style, sep=sep, sep2=sep2, stop_str=stop_str, stop_token_ids=stop_token_ids ) # Chinese LLaMA Alpaca default template register_conv_template( getConversation( name="chinese-llama-alpaca", system="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n", roles=("### Instruction:\n", "### Response:"), messages=(), offset=0, sep_style=SeparatorStyle.NO_COLON_SINGLE, sep="\n\n", sep2="", ) ) # Chinese LLaMA Alpaca 2 default template register_conv_template( getConversation( name="chinese-llama-alpaca-2", system="[INST] <>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<>\n\n", roles=("[INST]", "[/INST]"), messages=(), offset=0, sep_style=SeparatorStyle.LLAMA2, sep=" ", sep2=" ", stop_token_ids=[2], ) ) class ChineseLLaMAAlpacaAdapter(BaseModelAdapter): """The model adapter for Chinese-LLaMA-Alpaca""" use_fast_tokenizer = False def match(self, model_path: str): return "chinese-llama-alpaca" in model_path.lower() def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("chinese-llama-alpaca") class ChineseLLaMAAlpaca2Adapter(BaseModelAdapter): """The model adapter for Chinese-LLaMA-Alpaca-2""" def match(self, model_path: str): return "chinese-llama-alpaca-2" in model_path.lower() def load_model(self, model_path: str, from_pretrained_kwargs: dict): model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) model.config.eos_token_id = tokenizer.eos_token_id model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("chinese-llama-alpaca-2") # add model adapters to head of List model_adapters model_adapters = [ChineseLLaMAAlpacaAdapter()] + model_adapters model_adapters = [ChineseLLaMAAlpaca2Adapter()] + model_adapters fastchat.model.model_adapter.model_adapters = model_adapters def create_error_response(status_code: HTTPStatus, message: str) -> JSONResponse: return JSONResponse(ErrorResponse(message=message, type="invalid_request_error").dict(), status_code=status_code.value) @app.exception_handler(RequestValidationError) async def validation_exception_handler(request, exc): # pylint: disable=unused-argument return create_error_response(HTTPStatus.BAD_REQUEST, str(exc)) async def check_model(request) -> Optional[JSONResponse]: if request.model == served_model: return ret = create_error_response( HTTPStatus.NOT_FOUND, f"The model `{request.model}` does not exist.", ) return ret async def get_gen_prompt(request) -> str: conv = get_conversation_template(request.model) conv = getConversation( name=conv.name, system=conv.system_message if not use_old_conversation else conv.system, roles=conv.roles, messages=list(conv.messages), # prevent in-place modification offset=conv.offset, sep_style=SeparatorStyle(conv.sep_style), sep=conv.sep, sep2=conv.sep2, stop_str=conv.stop_str, stop_token_ids=conv.stop_token_ids, ) if isinstance(request.messages, str): prompt = request.messages else: for message in request.messages: msg_role = message["role"] if msg_role == "system": if not use_old_conversation: conv.system_message = message["content"] else: conv.system = message["content"] elif msg_role == "user": conv.append_message(conv.roles[0], message["content"]) elif msg_role == "assistant": conv.append_message(conv.roles[1], message["content"]) else: raise ValueError(f"Unknown role: {msg_role}") # Add a blank message for the assistant. conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() return prompt async def get_gen_prompt_nochat(request) -> str: conv = get_conversation_template(request.model) conv = getConversation( name=conv.name, system=conv.system_message if not use_old_conversation else conv.system, roles=conv.roles, messages=list(conv.messages), # prevent in-place modification offset=conv.offset, sep_style=SeparatorStyle(conv.sep_style), sep=conv.sep, sep2=conv.sep2, stop_str=conv.stop_str, stop_token_ids=conv.stop_token_ids, ) prompt = request.prompt conv.append_message(conv.roles[0], prompt) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() return prompt async def check_length(request, prompt, model_config): if hasattr(model_config.hf_config, "max_sequence_length"): context_len = model_config.hf_config.max_sequence_length elif hasattr(model_config.hf_config, "seq_length"): context_len = model_config.hf_config.seq_length elif hasattr(model_config.hf_config, "max_position_embeddings"): context_len = model_config.hf_config.max_position_embeddings elif hasattr(model_config.hf_config, "seq_length"): context_len = model_config.hf_config.seq_length else: context_len = 2048 input_ids = tokenizer(prompt).input_ids token_num = len(input_ids) if token_num + request.max_tokens > context_len: return create_error_response( HTTPStatus.BAD_REQUEST, f"This model's maximum context length is {context_len} tokens. " f"However, you requested {request.max_tokens + token_num} tokens " f"({token_num} in the messages, " f"{request.max_tokens} in the completion). " f"Please reduce the length of the messages or completion.", ) else: return None @app.get("/v1/models") async def show_available_models(): """Show available models. Right now we only have one model.""" model_cards = [ ModelCard(id=served_model, root=served_model, permission=[ModelPermission()]) ] return ModelList(data=model_cards) def create_logprobs(token_ids: List[int], id_logprobs: List[Dict[int, float]], initial_text_offset: int = 0) -> LogProbs: """Create OpenAI-style logprobs.""" logprobs = LogProbs() last_token_len = 0 for token_id, id_logprob in zip(token_ids, id_logprobs): token = tokenizer.convert_ids_to_tokens(token_id) logprobs.tokens.append(token) logprobs.token_logprobs.append(id_logprob[token_id]) if len(logprobs.text_offset) == 0: logprobs.text_offset.append(initial_text_offset) else: logprobs.text_offset.append(logprobs.text_offset[-1] + last_token_len) last_token_len = len(token) logprobs.top_logprobs.append({ tokenizer.convert_ids_to_tokens(i): p for i, p in id_logprob.items() }) return logprobs @app.post("/v1/chat/completions") async def create_chat_completion(raw_request: Request): """Completion API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/chat/create for the API specification. This API mimics the OpenAI ChatCompletion API. NOTE: Currently we do not support the following features: - function_call (Users should implement this by themselves) - logit_bias (to be supported by vLLM engine) """ request = ChatCompletionRequest(**await raw_request.json()) logger.info(f"Received chat completion request: {request}") error_check_ret = await check_model(request) if error_check_ret is not None: return error_check_ret if request.logit_bias is not None: # TODO: support logit_bias in vLLM engine. return create_error_response(HTTPStatus.BAD_REQUEST, "logit_bias is not currently supported") prompt = await get_gen_prompt(request) error_check_ret = await check_length(request, prompt, engine_model_config) if error_check_ret is not None: return error_check_ret model_name = request.model request_id = f"cmpl-{random_uuid()}" created_time = int(time.time()) try: sampling_params = SamplingParams( n=request.n, presence_penalty=request.presence_penalty, frequency_penalty=request.frequency_penalty, temperature=request.temperature, top_p=request.top_p, stop=request.stop, max_tokens=request.max_tokens, best_of=request.best_of, top_k=request.top_k, ignore_eos=request.ignore_eos, use_beam_search=request.use_beam_search, ) except ValueError as e: return create_error_response(HTTPStatus.BAD_REQUEST, str(e)) result_generator = engine.generate(prompt, sampling_params, request_id) async def abort_request() -> None: await engine.abort(request_id) def create_stream_response_json( index: int, text: str, finish_reason: Optional[str] = None, ) -> str: choice_data = ChatCompletionResponseStreamChoice( index=index, delta=DeltaMessage(content=text), finish_reason=finish_reason, ) response = ChatCompletionStreamResponse( id=request_id, created=created_time, model=model_name, choices=[choice_data], ) response_json = response.json(ensure_ascii=False) return response_json async def completion_stream_generator() -> AsyncGenerator[str, None]: # First chunk with role for i in range(request.n): choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(role="assistant"), finish_reason=None, ) chunk = ChatCompletionStreamResponse(id=request_id, choices=[choice_data], model=model_name) data = chunk.json(exclude_unset=True, ensure_ascii=False) yield f"data: {data}\n\n" previous_texts = [""] * request.n previous_num_tokens = [0] * request.n async for res in result_generator: res: RequestOutput for output in res.outputs: i = output.index delta_text = output.text[len(previous_texts[i]):] previous_texts[i] = output.text previous_num_tokens[i] = len(output.token_ids) response_json = create_stream_response_json( index=i, text=delta_text, ) yield f"data: {response_json}\n\n" if output.finish_reason is not None: response_json = create_stream_response_json( index=i, text="", finish_reason=output.finish_reason, ) yield f"data: {response_json}\n\n" yield "data: [DONE]\n\n" # Streaming response if request.stream: background_tasks = BackgroundTasks() # Abort the request if the client disconnects. background_tasks.add_task(abort_request) return StreamingResponse(completion_stream_generator(), media_type="text/event-stream", background=background_tasks) # Non-streaming response final_res: RequestOutput = None async for res in result_generator: if await raw_request.is_disconnected(): # Abort the request if the client disconnects. await abort_request() return create_error_response(HTTPStatus.BAD_REQUEST, "Client disconnected") final_res = res assert final_res is not None choices = [] for output in final_res.outputs: choice_data = ChatCompletionResponseChoice( index=output.index, message=ChatMessage(role="assistant", content=output.text), finish_reason=output.finish_reason, ) choices.append(choice_data) num_prompt_tokens = len(final_res.prompt_token_ids) num_generated_tokens = sum( len(output.token_ids) for output in final_res.outputs) usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=num_generated_tokens, total_tokens=num_prompt_tokens + num_generated_tokens, ) response = ChatCompletionResponse( id=request_id, created=created_time, model=model_name, choices=choices, usage=usage, ) if request.stream: # When user requests streaming but we don't stream, we still need to # return a streaming response with a single event. response_json = response.json(ensure_ascii=False) async def fake_stream_generator() -> AsyncGenerator[str, None]: yield f"data: {response_json}\n\n" yield "data: [DONE]\n\n" return StreamingResponse(fake_stream_generator(), media_type="text/event-stream") return response @app.post("/v1/completions") async def create_completion(raw_request: Request): """Completion API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/completions/create for the API specification. This API mimics the OpenAI Completion API. NOTE: Currently we do not support the following features: - echo (since the vLLM engine does not currently support getting the logprobs of prompt tokens) - suffix (the language models we currently support do not support suffix) - logit_bias (to be supported by vLLM engine) """ request = CompletionRequest(**await raw_request.json()) logger.info(f"Received completion request: {request}") error_check_ret = await check_model(request) if error_check_ret is not None: return error_check_ret if request.echo: # We do not support echo since the vLLM engine does not # currently support getting the logprobs of prompt tokens. return create_error_response(HTTPStatus.BAD_REQUEST, "echo is not currently supported") if request.suffix is not None: # The language models we currently support do not support suffix. return create_error_response(HTTPStatus.BAD_REQUEST, "suffix is not currently supported") if request.logit_bias is not None: # TODO: support logit_bias in vLLM engine. return create_error_response(HTTPStatus.BAD_REQUEST, "logit_bias is not currently supported") model_name = request.model request_id = f"cmpl-{random_uuid()}" if isinstance(request.prompt, list): if len(request.prompt) == 0: return create_error_response(HTTPStatus.BAD_REQUEST, "please provide at least one prompt") if len(request.prompt) > 1: return create_error_response( HTTPStatus.BAD_REQUEST, "multiple prompts in a batch is not currently supported") prompt = request.prompt[0] else: prompt = request.prompt request.prompt = prompt prompt = await get_gen_prompt_nochat(request) created_time = int(time.time()) try: sampling_params = SamplingParams( n=request.n, best_of=request.best_of, presence_penalty=request.presence_penalty, frequency_penalty=request.frequency_penalty, temperature=request.temperature, top_p=request.top_p, top_k=request.top_k, stop=request.stop, ignore_eos=request.ignore_eos, max_tokens=request.max_tokens, logprobs=request.logprobs, use_beam_search=request.use_beam_search, ) except ValueError as e: return create_error_response(HTTPStatus.BAD_REQUEST, str(e)) result_generator = engine.generate(prompt, sampling_params, request_id) # Similar to the OpenAI API, when n != best_of, we do not stream the # results. In addition, we do not stream the results when use beam search. stream = (request.stream and (request.best_of is None or request.n == request.best_of) and not request.use_beam_search) async def abort_request() -> None: await engine.abort(request_id) def create_stream_response_json( index: int, text: str, logprobs: Optional[LogProbs] = None, finish_reason: Optional[str] = None, ) -> str: choice_data = CompletionResponseStreamChoice( index=index, text=text, logprobs=logprobs, finish_reason=finish_reason, ) response = CompletionStreamResponse( id=request_id, created=created_time, model=model_name, choices=[choice_data], ) response_json = response.json(ensure_ascii=False) return response_json async def completion_stream_generator() -> AsyncGenerator[str, None]: previous_texts = [""] * request.n previous_num_tokens = [0] * request.n async for res in result_generator: res: RequestOutput for output in res.outputs: i = output.index delta_text = output.text[len(previous_texts[i]):] if request.logprobs is not None: logprobs = create_logprobs( output.token_ids[previous_num_tokens[i]:], output.logprobs[previous_num_tokens[i]:], len(previous_texts[i])) else: logprobs = None previous_texts[i] = output.text previous_num_tokens[i] = len(output.token_ids) response_json = create_stream_response_json( index=i, text=delta_text, logprobs=logprobs, ) yield f"data: {response_json}\n\n" if output.finish_reason is not None: logprobs = (LogProbs() if request.logprobs is not None else None) response_json = create_stream_response_json( index=i, text="", logprobs=logprobs, finish_reason=output.finish_reason, ) yield f"data: {response_json}\n\n" yield "data: [DONE]\n\n" # Streaming response if stream: background_tasks = BackgroundTasks() # Abort the request if the client disconnects. background_tasks.add_task(abort_request) return StreamingResponse(completion_stream_generator(), media_type="text/event-stream", background=background_tasks) # Non-streaming response final_res: RequestOutput = None async for res in result_generator: if await raw_request.is_disconnected(): # Abort the request if the client disconnects. await abort_request() return create_error_response(HTTPStatus.BAD_REQUEST, "Client disconnected") final_res = res assert final_res is not None choices = [] for output in final_res.outputs: if request.logprobs is not None: logprobs = create_logprobs(output.token_ids, output.logprobs) else: logprobs = None choice_data = CompletionResponseChoice( index=output.index, text=output.text, logprobs=logprobs, finish_reason=output.finish_reason, ) choices.append(choice_data) num_prompt_tokens = len(final_res.prompt_token_ids) num_generated_tokens = sum( len(output.token_ids) for output in final_res.outputs) usage = UsageInfo( prompt_tokens=num_prompt_tokens, completion_tokens=num_generated_tokens, total_tokens=num_prompt_tokens + num_generated_tokens, ) response = CompletionResponse( id=request_id, created=created_time, model=model_name, choices=choices, usage=usage, ) if request.stream: # When user requests streaming but we don't stream, we still need to # return a streaming response with a single event. response_json = response.json(ensure_ascii=False) async def fake_stream_generator() -> AsyncGenerator[str, None]: yield f"data: {response_json}\n\n" yield "data: [DONE]\n\n" return StreamingResponse(fake_stream_generator(), media_type="text/event-stream") return response if __name__ == "__main__": parser = argparse.ArgumentParser( description="vLLM OpenAI-Compatible RESTful API server.") parser.add_argument("--host", type=str, default="localhost", help="host name") parser.add_argument("--port", type=int, default=8000, help="port number") parser.add_argument("--allow-credentials", action="store_true", help="allow credentials") parser.add_argument("--allowed-origins", type=json.loads, default=["*"], help="allowed origins") parser.add_argument("--allowed-methods", type=json.loads, default=["*"], help="allowed methods") parser.add_argument("--allowed-headers", type=json.loads, default=["*"], help="allowed headers") parser.add_argument("--served-model-name", type=str, default=None, help="The model name used in the API. If not " "specified, the model name will be the same as " "the huggingface name.") parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() app.add_middleware( CORSMiddleware, allow_origins=args.allowed_origins, allow_credentials=args.allow_credentials, allow_methods=args.allowed_methods, allow_headers=args.allowed_headers, ) logger.info(f"args: {args}") if args.served_model_name is not None: served_model = args.served_model_name else: served_model = args.model engine_args = AsyncEngineArgs.from_cli_args(args) engine = AsyncLLMEngine.from_engine_args(engine_args) engine_model_config = asyncio.run(engine.get_model_config()) # A separate tokenizer to map token IDs to strings. tokenizer = get_tokenizer(engine_args.tokenizer, tokenizer_mode=engine_args.tokenizer_mode, trust_remote_code=engine_args.trust_remote_code) uvicorn.run(app, host=args.host, port=args.port, log_level="info", timeout_keep_alive=TIMEOUT_KEEP_ALIVE)