Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
norm
vllm
Commits
98fe8cb5
"src/vscode:/vscode.git/clone" did not exist on "b2da59b197306a49d93db2a28247de9b0f187435"
Unverified
Commit
98fe8cb5
authored
Jul 03, 2023
by
Zhuohan Li
Committed by
GitHub
Jul 03, 2023
Browse files
[Server] Add option to specify chat template for chat endpoint (#345)
parent
ffa6d2f9
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
23 additions
and
6 deletions
+23
-6
requirements.txt
requirements.txt
+1
-0
vllm/entrypoints/openai/api_server.py
vllm/entrypoints/openai/api_server.py
+22
-6
No files found.
requirements.txt
View file @
98fe8cb5
...
@@ -9,3 +9,4 @@ xformers >= 0.0.19
...
@@ -9,3 +9,4 @@ xformers >= 0.0.19
fastapi
fastapi
uvicorn
uvicorn
pydantic
# Required for OpenAI server.
pydantic
# Required for OpenAI server.
fschat
# Required for OpenAI ChatCompletion Endpoint.
vllm/entrypoints/openai/api_server.py
View file @
98fe8cb5
...
@@ -36,6 +36,7 @@ TIMEOUT_KEEP_ALIVE = 5 # seconds
...
@@ -36,6 +36,7 @@ TIMEOUT_KEEP_ALIVE = 5 # seconds
logger
=
init_logger
(
__name__
)
logger
=
init_logger
(
__name__
)
served_model
=
None
served_model
=
None
chat_template
=
None
app
=
fastapi
.
FastAPI
()
app
=
fastapi
.
FastAPI
()
...
@@ -62,7 +63,7 @@ async def check_model(request) -> Optional[JSONResponse]:
...
@@ -62,7 +63,7 @@ async def check_model(request) -> Optional[JSONResponse]:
async
def
get_gen_prompt
(
request
)
->
str
:
async
def
get_gen_prompt
(
request
)
->
str
:
conv
=
get_conv_template
(
request
.
model
)
conv
=
get_conv_template
(
chat_template
)
conv
=
Conversation
(
conv
=
Conversation
(
name
=
conv
.
name
,
name
=
conv
.
name
,
system
=
conv
.
system
,
system
=
conv
.
system
,
...
@@ -553,13 +554,20 @@ if __name__ == "__main__":
...
@@ -553,13 +554,20 @@ if __name__ == "__main__":
type
=
json
.
loads
,
type
=
json
.
loads
,
default
=
[
"*"
],
default
=
[
"*"
],
help
=
"allowed headers"
)
help
=
"allowed headers"
)
parser
.
add_argument
(
"--served-model-name"
,
type
=
str
,
default
=
None
,
help
=
"The model name used in the API. If not "
"specified, the model name will be the same as "
"the huggingface name."
)
parser
.
add_argument
(
parser
.
add_argument
(
"--
served-model-nam
e"
,
"--
chat-templat
e"
,
type
=
str
,
type
=
str
,
default
=
None
,
default
=
None
,
help
=
"The model name used in the API. If not specified, "
help
=
"The chat template name used in the ChatCompletion endpoint. If "
"the model name will be the same as the "
"not specified, we use the API model name as the template name. See "
"huggingface name."
)
"https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py "
"for the list of available templates."
)
parser
=
AsyncEngineArgs
.
add_cli_args
(
parser
)
parser
=
AsyncEngineArgs
.
add_cli_args
(
parser
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
...
@@ -573,7 +581,15 @@ if __name__ == "__main__":
...
@@ -573,7 +581,15 @@ if __name__ == "__main__":
logger
.
info
(
f
"args:
{
args
}
"
)
logger
.
info
(
f
"args:
{
args
}
"
)
served_model
=
args
.
served_model_name
or
args
.
model
if
args
.
served_model_name
is
not
None
:
served_model
=
args
.
served_model_name
else
:
served_model
=
args
.
model
if
args
.
chat_template
is
not
None
:
chat_template
=
args
.
chat_template
else
:
chat_template
=
served_model
engine_args
=
AsyncEngineArgs
.
from_cli_args
(
args
)
engine_args
=
AsyncEngineArgs
.
from_cli_args
(
args
)
engine
=
AsyncLLMEngine
.
from_engine_args
(
engine_args
)
engine
=
AsyncLLMEngine
.
from_engine_args
(
engine_args
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment