srt_example_llava.py 1.63 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
2
3
"""
Usage: python3 srt_example_llava.py
"""
zhyncs's avatar
zhyncs committed
4

Lianmin Zheng's avatar
Lianmin Zheng committed
5
6
7
8
9
10
11
12
13
import sglang as sgl


@sgl.function
def image_qa(s, image_path, question):
    s += sgl.user(sgl.image(image_path) + question)
    s += sgl.assistant(sgl.gen("answer"))


14
15
def single():
    state = image_qa.run(
zhyncs's avatar
zhyncs committed
16
17
        image_path="images/cat.jpeg", question="What is this?", max_new_tokens=128
    )
18
19
20
21
22
23
24
25
    print(state["answer"], "\n")


def stream():
    state = image_qa.run(
        image_path="images/cat.jpeg",
        question="What is this?",
        max_new_tokens=64,
zhyncs's avatar
zhyncs committed
26
27
        stream=True,
    )
28
29
30
31
32
33
34
35
36

    for out in state.text_iter("answer"):
        print(out, end="", flush=True)
    print()


def batch():
    states = image_qa.run_batch(
        [
zhyncs's avatar
zhyncs committed
37
38
            {"image_path": "images/cat.jpeg", "question": "What is this?"},
            {"image_path": "images/dog.jpeg", "question": "What is this?"},
39
        ],
Yuanhan Zhang's avatar
Yuanhan Zhang committed
40
        max_new_tokens=128,
41
42
43
44
45
46
    )
    for s in states:
        print(s["answer"], "\n")


if __name__ == "__main__":
zhyncs's avatar
zhyncs committed
47
48
49
50
    runtime = sgl.Runtime(
        model_path="liuhaotian/llava-v1.6-vicuna-7b",
        tokenizer_path="llava-hf/llava-1.5-7b-hf",
    )
51
    sgl.set_default_backend(runtime)
52
53
    print(f"chat template: {runtime.endpoint.chat_template.name}")

54
55
56
    # Or you can use API models
    # sgl.set_default_backend(sgl.OpenAI("gpt-4-vision-preview"))
    # sgl.set_default_backend(sgl.VertexAI("gemini-pro-vision"))
57
58
59
60
61
62
63
64
65
66
67
68
69
70

    # Run a single request
    print("\n========== single ==========\n")
    single()

    # Stream output
    print("\n========== stream ==========\n")
    stream()

    # Run a batch of requests
    print("\n========== batch ==========\n")
    batch()

    runtime.shutdown()