test_vision_openai_server_b.py 11 KB
Newer Older
1
2
import unittest

3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
from test_vision_openai_server_common import *

from sglang.test.test_utils import (
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
    popen_launch_server,
)


class TestPixtralServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "mistral-community/pixtral-12b"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
25
                "0.70",
26
27
                "--cuda-graph-max-bs",
                "4",
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
            ],
        )
        cls.base_url += "/v1"

    def test_video_chat_completion(self):
        pass


class TestMistral3_1Server(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "unsloth/Mistral-Small-3.1-24B-Instruct-2503"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
49
                "0.75",
50
51
                "--cuda-graph-max-bs",
                "4",
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
            ],
        )
        cls.base_url += "/v1"

    def test_video_chat_completion(self):
        pass


class TestDeepseekVL2Server(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "deepseek-ai/deepseek-vl2-small"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--context-length",
                "4096",
74
75
                "--cuda-graph-max-bs",
                "4",
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
            ],
        )
        cls.base_url += "/v1"

    def test_video_chat_completion(self):
        pass


class TestJanusProServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "deepseek-ai/Janus-Pro-7B"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
97
                "0.35",
98
99
                "--cuda-graph-max-bs",
                "4",
100
101
102
103
            ],
        )
        cls.base_url += "/v1"

104
    def test_video_images_chat_completion(self):
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
        pass

    def test_single_image_chat_completion(self):
        # Skip this test because it is flaky
        pass


## Skip for ci test
# class TestLlama4Server(TestOpenAIVisionServer):
#     @classmethod
#     def setUpClass(cls):
#         cls.model = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
#         cls.base_url = DEFAULT_URL_FOR_TEST
#         cls.api_key = "sk-123456"
#         cls.process = popen_launch_server(
#             cls.model,
#             cls.base_url,
#             timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
#             other_args=[
#                 "--chat-template",
#                 "llama-4",
#                 "--mem-fraction-static",
#                 "0.8",
#                 "--tp-size=8",
#                 "--context-length=8192",
130
131
132
133
#                 "--mm-attention-backend",
#                 "fa3",
#                 "--cuda-graph-max-bs",
#                 "4",
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#             ],
#         )
#         cls.base_url += "/v1"

#     def test_video_chat_completion(self):
#         pass


class TestGemma3itServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "google/gemma-3-4b-it"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
155
                "0.70",
156
                "--enable-multimodal",
157
158
                "--cuda-graph-max-bs",
                "4",
159
160
161
162
163
164
165
166
            ],
        )
        cls.base_url += "/v1"

    def test_video_chat_completion(self):
        pass


167
168
169
class TestGemma3nServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
170
        cls.model = "google/gemma-3n-E4B-it"
171
172
173
174
175
176
177
178
179
180
181
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
                "0.70",
                "--cuda-graph-max-bs",
182
                "4",
183
184
185
186
            ],
        )
        cls.base_url += "/v1"

187
188
189
190
191
    def test_audio_chat_completion(self):
        self._test_audio_speech_completion()
        # This _test_audio_ambient_completion test is way too complicated to pass for a small LLM
        # self._test_audio_ambient_completion()

192
193
194
    def _test_mixed_image_audio_chat_completion(self):
        self._test_mixed_image_audio_chat_completion()

195

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
class TestQwen2AudioServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "Qwen/Qwen2-Audio-7B-Instruct"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
                "0.70",
            ],
        )
        cls.base_url += "/v1"

    def test_audio_chat_completion(self):
        self._test_audio_speech_completion()
        self._test_audio_ambient_completion()

    # Qwen2Audio does not support image
    def test_single_image_chat_completion(self):
        pass

    # Qwen2Audio does not support image
    def test_multi_turn_chat_completion(self):
        pass

    # Qwen2Audio does not support image
    def test_multi_images_chat_completion(self):
        pass

    # Qwen2Audio does not support image
    def test_video_images_chat_completion(self):
        pass

    # Qwen2Audio does not support image
    def test_regex(self):
        pass

    # Qwen2Audio does not support image
    def test_mixed_batch(self):
        pass


Stefan He's avatar
Stefan He committed
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
class TestKimiVLServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
        cls.model = "moonshotai/Kimi-VL-A3B-Instruct"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--context-length",
                "4096",
                "--dtype",
                "bfloat16",
259
260
                "--cuda-graph-max-bs",
                "4",
Stefan He's avatar
Stefan He committed
261
262
263
            ],
        )
        cls.base_url += "/v1"
264

Stefan He's avatar
Stefan He committed
265
266
    def test_video_images_chat_completion(self):
        pass
267
268


269
270
271
class TestPhi4MMServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
272
273
274
275
276
277
278
279
        # Manually download LoRA adapter_config.json as it's not downloaded by the model loader by default.
        from huggingface_hub import constants, snapshot_download

        snapshot_download(
            "microsoft/Phi-4-multimodal-instruct",
            allow_patterns=["**/adapter_config.json"],
        )

280
281
282
        cls.model = "microsoft/Phi-4-multimodal-instruct"
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
283
284

        revision = "33e62acdd07cd7d6635badd529aa0a3467bb9c6a"
285
286
287
288
289
290
291
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=[
                "--trust-remote-code",
                "--mem-fraction-static",
292
                "0.70",
293
294
                "--disable-radix-cache",
                "--max-loras-per-batch",
295
                "2",
296
297
298
299
                "--revision",
                revision,
                "--lora-paths",
                f"vision={constants.HF_HUB_CACHE}/models--microsoft--Phi-4-multimodal-instruct/snapshots/{revision}/vision-lora",
300
                f"speech={constants.HF_HUB_CACHE}/models--microsoft--Phi-4-multimodal-instruct/snapshots/{revision}/speech-lora",
301
302
                "--cuda-graph-max-bs",
                "4",
303
304
305
306
            ],
        )
        cls.base_url += "/v1"

307
    def get_vision_request_kwargs(self):
308
309
310
311
312
313
314
        return {
            "extra_body": {
                "lora_path": "vision",
                "top_k": 1,
                "top_p": 1.0,
            }
        }
315

316
317
318
319
320
321
322
323
324
325
326
    def get_audio_request_kwargs(self):
        return {
            "extra_body": {
                "lora_path": "speech",
                "top_k": 1,
                "top_p": 1.0,
            }
        }

    def test_audio_chat_completion(self):
        self._test_audio_speech_completion()
327
        # This _test_audio_ambient_completion test is way too complicated to pass for a small LLM
328
        # self._test_audio_ambient_completion()
329
330


Zijian's avatar
Zijian committed
331
332
333
class TestVILAServer(TestOpenAIVisionServer):
    @classmethod
    def setUpClass(cls):
334
        cls.model = "Efficient-Large-Model/NVILA-Lite-2B-hf-0626"
Zijian's avatar
Zijian committed
335
336
        cls.base_url = DEFAULT_URL_FOR_TEST
        cls.api_key = "sk-123456"
337
        cls.revision = "6bde1de5964b40e61c802b375fff419edc867506"
Zijian's avatar
Zijian committed
338
339
340
341
342
343
344
345
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            api_key=cls.api_key,
            other_args=[
                "--trust-remote-code",
                "--context-length=65536",
346
                f"--revision={cls.revision}",
347
348
                "--cuda-graph-max-bs",
                "4",
Zijian's avatar
Zijian committed
349
350
351
352
353
            ],
        )
        cls.base_url += "/v1"


354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
# Skip for ci test
# class TestGLM41VServer(TestOpenAIVisionServer):
#     @classmethod
#     def setUpClass(cls):
#         cls.model = "zai-org/GLM-4.1V-9B-Thinking"
#         cls.base_url = DEFAULT_URL_FOR_TEST
#         cls.api_key = "sk-123456"
#         cls.process = popen_launch_server(
#             cls.model,
#             cls.base_url,
#             timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
#             other_args=[
#                 "--trust-remote-code",
#                 "--mem-fraction-static",
#                 "0.68",
#                 "--cuda-graph-max-bs",
#                 "4",
#                 "--reasoning-parser",
#                 "glm45",
#             ],
#         )
#         cls.base_url += "/v1"

#     def test_video_chat_completion(self):
#         self._test_video_chat_completion()


381
if __name__ == "__main__":
382
    del TestOpenAIVisionServer
383
    unittest.main()