mistral-small.py 5.65 KB
Newer Older
1
2
3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project

zhuwenwen's avatar
zhuwenwen committed
4
5
6
7
8
# ruff: noqa
import argparse

from vllm import LLM
from vllm.sampling_params import SamplingParams
9
from vllm.assets.image import ImageAsset
zhuwenwen's avatar
zhuwenwen committed
10

11
# This script is an offline demo for running Mistral-Small-3.1
zhuwenwen's avatar
zhuwenwen committed
12
13
14
15
16
17
#
# If you want to run a server/client setup, please follow this code:
#
# - Server:
#
# ```bash
18
19
20
21
22
23
24
25
# # Mistral format
# vllm serve mistralai/Mistral-Small-3.1-24B-Instruct-2503 \
#   --tokenizer-mode mistral --config-format mistral --load-format mistral \
#   --limit-mm-per-prompt '{"image":4}' --max-model-len 16384
#
# # HF format
# vllm serve mistralai/Mistral-Small-3.1-24B-Instruct-2503 \
#   --limit-mm-per-prompt '{"image":4}' --max-model-len 16384
zhuwenwen's avatar
zhuwenwen committed
26
27
28
29
30
31
32
33
34
# ```
#
# - Client:
#
# ```bash
# curl --location 'http://<your-node-url>:8000/v1/chat/completions' \
# --header 'Content-Type: application/json' \
# --header 'Authorization: Bearer token' \
# --data '{
35
#     "model": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
zhuwenwen's avatar
zhuwenwen committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#     "messages": [
#       {
#         "role": "user",
#         "content": [
#             {"type" : "text", "text": "Describe this image in detail please."},
#             {"type": "image_url", "image_url": {"url": "https://s3.amazonaws.com/cms.ipressroom.com/338/files/201808/5b894ee1a138352221103195_A680%7Ejogging-edit/A680%7Ejogging-edit_hero.jpg"}},
#             {"type" : "text", "text": "and this one as well. Answer in French."},
#             {"type": "image_url", "image_url": {"url": "https://www.wolframcloud.com/obj/resourcesystem/images/a0e/a0ee3983-46c6-4c92-b85d-059044639928/6af8cfb971db031b.png"}}
#         ]
#       }
#     ]
#   }'
# ```
#
# Usage:
#     python demo.py simple
#     python demo.py advanced

54
55
# Lower max_model_len and/or max_num_seqs on low-VRAM GPUs.
# These scripts have been tested on 2x L40 GPUs
zhuwenwen's avatar
zhuwenwen committed
56

57
58
59

def run_simple_demo(args: argparse.Namespace):
    model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
zhuwenwen's avatar
zhuwenwen committed
60
61
    sampling_params = SamplingParams(max_tokens=8192)

62
63
64
65
66
67
68
69
70
71
72
    llm = LLM(
        model=model_name,
        tokenizer_mode="mistral" if args.format == "mistral" else "auto",
        config_format="mistral" if args.format == "mistral" else "auto",
        load_format="mistral" if args.format == "mistral" else "auto",
        limit_mm_per_prompt={"image": 1},
        max_model_len=4096,
        max_num_seqs=2,
        tensor_parallel_size=2,
        disable_mm_preprocessor_cache=args.disable_mm_preprocessor_cache,
    )
zhuwenwen's avatar
zhuwenwen committed
73
74
75
76
77

    prompt = "Describe this image in one sentence."

    messages = [
        {
78
            "role": "user",
zhuwenwen's avatar
zhuwenwen committed
79
            "content": [
80
                {"type": "text", "text": prompt},
zhuwenwen's avatar
zhuwenwen committed
81
                {
82
83
                    "type": "image_pil",
                    "image_pil": ImageAsset("cherry_blossom").pil_image,
zhuwenwen's avatar
zhuwenwen committed
84
85
86
87
88
                },
            ],
        },
    ]
    outputs = llm.chat(messages, sampling_params=sampling_params)
89
    print("-" * 50)
zhuwenwen's avatar
zhuwenwen committed
90
    print(outputs[0].outputs[0].text)
91
    print("-" * 50)
zhuwenwen's avatar
zhuwenwen committed
92
93


94
95
96
def run_advanced_demo(args: argparse.Namespace):
    model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
    max_img_per_msg = 3
zhuwenwen's avatar
zhuwenwen committed
97
98
99
100
101
    max_tokens_per_img = 4096

    sampling_params = SamplingParams(max_tokens=8192, temperature=0.7)
    llm = LLM(
        model=model_name,
102
103
104
        tokenizer_mode="mistral" if args.format == "mistral" else "auto",
        config_format="mistral" if args.format == "mistral" else "auto",
        load_format="mistral" if args.format == "mistral" else "auto",
zhuwenwen's avatar
zhuwenwen committed
105
106
        limit_mm_per_prompt={"image": max_img_per_msg},
        max_model_len=max_img_per_msg * max_tokens_per_img,
107
108
        tensor_parallel_size=2,
        disable_mm_preprocessor_cache=args.disable_mm_preprocessor_cache,
zhuwenwen's avatar
zhuwenwen committed
109
110
111
112
113
114
115
116
117
118
    )

    prompt = "Describe the following image."

    url_1 = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
    url_2 = "https://picsum.photos/seed/picsum/200/300"
    url_3 = "https://picsum.photos/id/32/512/512"

    messages = [
        {
119
            "role": "user",
zhuwenwen's avatar
zhuwenwen committed
120
            "content": [
121
122
123
                {"type": "text", "text": prompt},
                {"type": "image_url", "image_url": {"url": url_1}},
                {"type": "image_url", "image_url": {"url": url_2}},
zhuwenwen's avatar
zhuwenwen committed
124
125
126
127
128
129
130
131
132
133
134
135
136
            ],
        },
        {
            "role": "assistant",
            "content": "The images show nature.",
        },
        {
            "role": "user",
            "content": "More details please and answer only in French!.",
        },
        {
            "role": "user",
            "content": [
137
                {"type": "image_url", "image_url": {"url": url_3}},
zhuwenwen's avatar
zhuwenwen committed
138
139
140
141
142
            ],
        },
    ]

    outputs = llm.chat(messages=messages, sampling_params=sampling_params)
143
    print("-" * 50)
zhuwenwen's avatar
zhuwenwen committed
144
    print(outputs[0].outputs[0].text)
145
    print("-" * 50)
zhuwenwen's avatar
zhuwenwen committed
146
147


148
def parse_args():
zhuwenwen's avatar
zhuwenwen committed
149
    parser = argparse.ArgumentParser(
150
151
        description="Run a demo in simple or advanced mode."
    )
zhuwenwen's avatar
zhuwenwen committed
152
153
154
155
156
157
158

    parser.add_argument(
        "mode",
        choices=["simple", "advanced"],
        help="Specify the demo mode: 'simple' or 'advanced'",
    )

159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
    parser.add_argument(
        "--format",
        choices=["mistral", "hf"],
        default="mistral",
        help="Specify the format of the model to load.",
    )

    parser.add_argument(
        "--disable-mm-preprocessor-cache",
        action="store_true",
        help="If True, disables caching of multi-modal preprocessor/mapper.",
    )
    return parser.parse_args()


def main():
    args = parse_args()
zhuwenwen's avatar
zhuwenwen committed
176
177
178

    if args.mode == "simple":
        print("Running simple demo...")
179
        run_simple_demo(args)
zhuwenwen's avatar
zhuwenwen committed
180
181
    elif args.mode == "advanced":
        print("Running advanced demo...")
182
        run_advanced_demo(args)
zhuwenwen's avatar
zhuwenwen committed
183
184
185
186


if __name__ == "__main__":
    main()