gradio_openai_vlm_webserver.py 1.42 KB
Newer Older
laibao's avatar
laibao committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#gradio_openai_vlm_webserver.py
import argparse
import gradio as gr
from openai import OpenAI

parser = argparse.ArgumentParser(description='LLaVA Chatbot Interface')
parser.add_argument('--model-url', type=str, default='http://localhost:8000/v1', help='Model URL')
parser.add_argument('-m', '--model', type=str, required=True, help='Model path')
parser.add_argument("--host", type=str, default=None)
parser.add_argument("--port", type=int, default=8001)

args = parser.parse_args()

openai_api_key = "EMPTY"
openai_api_base = args.model_url

client = OpenAI(api_key=openai_api_key, base_url=openai_api_base)

def predict(message, history, image):
    history_openai_format = []
    user_message = message
    if image:
        user_message += f" [local file]({image})"
    history_openai_format.append({"role": "user", "content": user_message})
    try:
        chat_response = client.chat.completions.create(
            model=args.model,
            messages=history_openai_format
        )
        return chat_response.choices[0].message.content
    except Exception as e:
        return f"An error occurred: {str(e)}"
gr.ChatInterface(
    predict,
    additional_inputs=[
        gr.Image(label="Upload Image", type="filepath",scale=1, width="50%")
    ],
    title="LLaVA Chatbot",
    description="Chat with the LLaVA model and upload images for visual understanding."
).queue().launch(server_name=args.host, server_port=args.port, share=True)