Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
norm
vllm
Commits
97460585
Unverified
Commit
97460585
authored
Jan 12, 2024
by
arkohut
Committed by
GitHub
Jan 11, 2024
Browse files
Add gradio chatbot for openai webserver (#2307)
parent
f745847e
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
81 additions
and
0 deletions
+81
-0
examples/gradio_openai_chatbot_webserver.py
examples/gradio_openai_chatbot_webserver.py
+81
-0
No files found.
examples/gradio_openai_chatbot_webserver.py
0 → 100644
View file @
97460585
import
argparse
from
openai
import
OpenAI
import
gradio
as
gr
# Argument parser setup
parser
=
argparse
.
ArgumentParser
(
description
=
'Chatbot Interface with Customizable Parameters'
)
parser
.
add_argument
(
'--model-url'
,
type
=
str
,
default
=
'http://localhost:8000/v1'
,
help
=
'Model URL'
)
parser
.
add_argument
(
'-m'
,
'--model'
,
type
=
str
,
required
=
True
,
help
=
'Model name for the chatbot'
)
parser
.
add_argument
(
'--temp'
,
type
=
float
,
default
=
0.8
,
help
=
'Temperature for text generation'
)
parser
.
add_argument
(
'--stop-token-ids'
,
type
=
str
,
default
=
''
,
help
=
'Comma-separated stop token IDs'
)
parser
.
add_argument
(
"--host"
,
type
=
str
,
default
=
None
)
parser
.
add_argument
(
"--port"
,
type
=
int
,
default
=
8001
)
# Parse the arguments
args
=
parser
.
parse_args
()
# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key
=
"EMPTY"
openai_api_base
=
args
.
model_url
# Create an OpenAI client to interact with the API server
client
=
OpenAI
(
api_key
=
openai_api_key
,
base_url
=
openai_api_base
,
)
def
predict
(
message
,
history
):
# Convert chat history to OpenAI format
history_openai_format
=
[{
"role"
:
"system"
,
"content"
:
"You are a great ai assistant."
}]
for
human
,
assistant
in
history
:
history_openai_format
.
append
({
"role"
:
"user"
,
"content"
:
human
})
history_openai_format
.
append
({
"role"
:
"assistant"
,
"content"
:
assistant
})
history_openai_format
.
append
({
"role"
:
"user"
,
"content"
:
message
})
# Create a chat completion request and send it to the API server
stream
=
client
.
chat
.
completions
.
create
(
model
=
args
.
model
,
# Model name to use
messages
=
history_openai_format
,
# Chat history
temperature
=
args
.
temp
,
# Temperature for text generation
stream
=
True
,
# Stream response
extra_body
=
{
'repetition_penalty'
:
1
,
'stop_token_ids'
:
[
int
(
id
.
strip
())
for
id
in
args
.
stop_token_ids
.
split
(
','
)
if
id
.
strip
()
]
if
args
.
stop_token_ids
else
[]
})
# Read and return generated text from response stream
partial_message
=
""
for
chunk
in
stream
:
partial_message
+=
(
chunk
.
choices
[
0
].
delta
.
content
or
""
)
yield
partial_message
# Create and launch a chat interface with Gradio
gr
.
ChatInterface
(
predict
).
queue
().
launch
(
server_name
=
args
.
host
,
server_port
=
args
.
port
,
share
=
True
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment