offline_batch_inference_vlm.py 1.37 KB
Newer Older
1
2
"""
Usage:
3
python offline_batch_inference_vlm.py --model-path Qwen/Qwen2-VL-7B-Instruct
4
5
6
7
8
9
"""

import argparse
import dataclasses

import sglang as sgl
10
from sglang.srt.conversation import chat_templates
11
12
13
14
15
16
17
18
from sglang.srt.server_args import ServerArgs


def main(
    server_args: ServerArgs,
):
    vlm = sgl.Engine(**dataclasses.asdict(server_args))

19
20
21
22
23
24
    conv = chat_templates[server_args.chat_template].copy()
    image_token = conv.image_token

    image_url = "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true"

    prompt = f"What's in this image?\n{image_token}"
25

26
27
28
29
30
31
32
33
34
    sampling_params = {
        "temperature": 0.001,
        "max_new_tokens": 30,
    }

    output = vlm.generate(
        prompt=prompt,
        image_data=image_url,
        sampling_params=sampling_params,
35
36
37
    )

    print("===============================")
38
39
40
41
    print(f"Prompt: {prompt}")
    print(f"Generated text: {output['text']}")

    vlm.shutdown()
42
43
44
45
46
47
48
49


# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    ServerArgs.add_cli_args(parser)
    args = parser.parse_args()
50

51
52
    server_args = ServerArgs.from_cli_args(args)
    main(server_args)