"vscode:/vscode.git/clone" did not exist on "885e3c201a9bc39a70a6bd2744c5ef6843a6c156"
Commit 33e8c0a8 authored by laibao's avatar laibao
Browse files

No commit message

No commit message
parent fd5d9078
...@@ -160,51 +160,6 @@ curl http://localhost:8000/v1/chat/completions \ ...@@ -160,51 +160,6 @@ curl http://localhost:8000/v1/chat/completions \
pip install gradio pip install gradio
``` ```
1.1 基于gradio的llava对话
```python
#gradio_openai_vlm_webserver.py
import argparse
import gradio as gr
from openai import OpenAI
parser = argparse.ArgumentParser(description='LLaVA Chatbot Interface')
parser.add_argument('--model-url', type=str, default='http://localhost:8000/v1', help='Model URL')
parser.add_argument('-m', '--model', type=str, required=True, help='Model path')
parser.add_argument("--host", type=str, default=None)
parser.add_argument("--port", type=int, default=8001)
args = parser.parse_args()
openai_api_key = "EMPTY"
openai_api_base = args.model_url
client = OpenAI(api_key=openai_api_key, base_url=openai_api_base)
def predict(message, history, image):
history_openai_format = []
user_message = message
if image:
user_message += f" [local file]({image})"
history_openai_format.append({"role": "user", "content": user_message})
try:
chat_response = client.chat.completions.create(
model=args.model,
messages=history_openai_format
)
return chat_response.choices[0].message.content
except Exception as e:
return f"An error occurred: {str(e)}"
gr.ChatInterface(
predict,
additional_inputs=[
gr.Image(label="Upload Image", type="filepath",scale=1, width="50%")
],
title="LLaVA Chatbot",
description="Chat with the LLaVA model and upload images for visual understanding."
).queue().launch(server_name=args.host, server_port=args.port, share=True)
```
2.安装必要文件 2.安装必要文件
2.1 启动gradio服务,根据提示操作 2.1 启动gradio服务,根据提示操作
......
#gradio_openai_vlm_webserver.py
import argparse
import gradio as gr
from openai import OpenAI
parser = argparse.ArgumentParser(description='LLaVA Chatbot Interface')
parser.add_argument('--model-url', type=str, default='http://localhost:8000/v1', help='Model URL')
parser.add_argument('-m', '--model', type=str, required=True, help='Model path')
parser.add_argument("--host", type=str, default=None)
parser.add_argument("--port", type=int, default=8001)
args = parser.parse_args()
openai_api_key = "EMPTY"
openai_api_base = args.model_url
client = OpenAI(api_key=openai_api_key, base_url=openai_api_base)
def predict(message, history, image):
history_openai_format = []
user_message = message
if image:
user_message += f" [local file]({image})"
history_openai_format.append({"role": "user", "content": user_message})
try:
chat_response = client.chat.completions.create(
model=args.model,
messages=history_openai_format
)
return chat_response.choices[0].message.content
except Exception as e:
return f"An error occurred: {str(e)}"
gr.ChatInterface(
predict,
additional_inputs=[
gr.Image(label="Upload Image", type="filepath",scale=1, width="50%")
],
title="LLaVA Chatbot",
description="Chat with the LLaVA model and upload images for visual understanding."
).queue().launch(server_name=args.host, server_port=args.port, share=True)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment