Unverified Commit f6af3a65 authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

Cleanup readme, llava examples, usage examples and nccl init (#1194)

parent c9064e6f
""" """
Usage: Usage:
pip install opencv-python-headless pip install opencv-python-headless
python3 srt_example_llava.py
python3 srt_example_llava_v.py
""" """
import argparse import argparse
...@@ -9,6 +10,8 @@ import csv ...@@ -9,6 +10,8 @@ import csv
import os import os
import time import time
import requests
import sglang as sgl import sglang as sgl
......
"""
Usage: python3 srt_example_yi_vl.py
Requirements: transformers==4.38
"""
import sglang as sgl
@sgl.function
def image_qa(s, image_path, question):
s += sgl.user(sgl.image(image_path) + question)
s += sgl.assistant(sgl.gen("answer"))
def single():
state = image_qa.run(
image_path="images/cat.jpeg",
question="What is this?",
max_new_tokens=64,
stop="###",
)
print(state["answer"], "\n")
def stream():
state = image_qa.run(
image_path="images/cat.jpeg",
question="What is this?",
max_new_tokens=64,
stream=True,
stop="###",
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = image_qa.run_batch(
[
{"image_path": "images/cat.jpeg", "question": "What is this?"},
{"image_path": "images/dog.jpeg", "question": "What is this?"},
],
max_new_tokens=64,
stop="###",
)
for s in states:
print(s["answer"], "\n")
if __name__ == "__main__":
runtime = sgl.Runtime(model_path="BabyChou/Yi-VL-6B")
# runtime = sgl.Runtime(model_path="BabyChou/Yi-VL-34B")
sgl.set_default_backend(runtime)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
runtime.shutdown()
...@@ -4,7 +4,7 @@ Usage: ...@@ -4,7 +4,7 @@ Usage:
# Installing latest sglang. # Installing latest sglang.
# Endpoint Service CLI: # Endpoint Service CLI:
# python -m sglang.launch_server --model-path lmms-lab/llama3-llava-next-8b --tokenizer-path lmms-lab/llama3-llava-next-8b-tokenizer --port=30000 --host="127.0.0.1" --tp-size=4 python -m sglang.launch_server --model-path lmms-lab/llama3-llava-next-8b --port=30000
python3 http_llama3_llava_test.py python3 http_llama3_llava_test.py
...@@ -16,7 +16,6 @@ import argparse ...@@ -16,7 +16,6 @@ import argparse
import asyncio import asyncio
import copy import copy
import json import json
import time
import aiohttp import aiohttp
import requests import requests
......
"""
Usage:
python3 -m sglang.launch_server --model-path lmms-lab/llava-onevision-qwen2-72b-ov --port=30000 --tp-size=8 --chat-template=chatml-llava --chunked-prefill-size=16384
python3 http_llava_onevision_test.py
"""
import base64 import base64
import io import io
import os import os
...@@ -74,7 +82,6 @@ def video_stream_request_test(client, video_path): ...@@ -74,7 +82,6 @@ def video_stream_request_test(client, video_path):
print("------------------------Video Stream Request Test----------------------") print("------------------------Video Stream Request Test----------------------")
messages = prepare_video_messages(video_path) messages = prepare_video_messages(video_path)
start_time = time.time()
video_request = client.chat.completions.create( video_request = client.chat.completions.create(
model="default", model="default",
messages=messages, messages=messages,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment