test_vision_openai_server_common.py 18.9 KB
Newer Older
1
2
import base64
import io
Ying Sheng's avatar
Ying Sheng committed
3
import json
4
import os
5
from concurrent.futures import ThreadPoolExecutor
Ying Sheng's avatar
Ying Sheng committed
6

7
import numpy as np
Ying Sheng's avatar
Ying Sheng committed
8
import openai
9
10
import requests
from PIL import Image
Ying Sheng's avatar
Ying Sheng committed
11

12
from sglang.srt.utils import kill_process_tree
13
14
15
from sglang.test.test_utils import (
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
16
    CustomTestCase,
17
18
    popen_launch_server,
)
Ying Sheng's avatar
Ying Sheng committed
19

20
21
22
23
24
25
26
27
28
29
30
# image
IMAGE_MAN_IRONING_URL = "https://raw.githubusercontent.com/sgl-project/sgl-test-files/refs/heads/main/images/man_ironing_on_back_of_suv.png"
IMAGE_SGL_LOGO_URL = "https://raw.githubusercontent.com/sgl-project/sgl-test-files/refs/heads/main/images/sgl_logo.png"

# video
VIDEO_JOBS_URL = "https://raw.githubusercontent.com/sgl-project/sgl-test-files/refs/heads/main/videos/jobs_presenting_ipod.mp4"

# audio
AUDIO_TRUMP_SPEECH_URL = "https://raw.githubusercontent.com/sgl-project/sgl-test-files/refs/heads/main/audios/Trump_WEF_2018_10s.mp3"
AUDIO_BIRD_SONG_URL = "https://raw.githubusercontent.com/sgl-project/sgl-test-files/refs/heads/main/audios/bird_song.mp3"

Ying Sheng's avatar
Ying Sheng committed
31

32
class TestOpenAIVisionServer(CustomTestCase):
Ying Sheng's avatar
Ying Sheng committed
33
34
    @classmethod
    def setUpClass(cls):
35
        cls.model = "lmms-lab/llava-onevision-qwen2-0.5b-ov"
36
        cls.base_url = DEFAULT_URL_FOR_TEST
Ying Sheng's avatar
Ying Sheng committed
37
38
39
40
        cls.api_key = "sk-123456"
        cls.process = popen_launch_server(
            cls.model,
            cls.base_url,
41
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
Ying Sheng's avatar
Ying Sheng committed
42
43
44
45
46
47
            api_key=cls.api_key,
        )
        cls.base_url += "/v1"

    @classmethod
    def tearDownClass(cls):
48
        kill_process_tree(cls.process.pid)
Ying Sheng's avatar
Ying Sheng committed
49

50
51
52
53
54
55
    def get_audio_request_kwargs(self):
        return self.get_request_kwargs()

    def get_vision_request_kwargs(self):
        return self.get_request_kwargs()

56
57
58
    def get_request_kwargs(self):
        return {}

59
    def test_single_image_chat_completion(self):
Ying Sheng's avatar
Ying Sheng committed
60
61
62
63
64
65
66
67
68
69
        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        response = client.chat.completions.create(
            model="default",
            messages=[
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "image_url",
70
                            "image_url": {"url": IMAGE_MAN_IRONING_URL},
Ying Sheng's avatar
Ying Sheng committed
71
                        },
Ying Sheng's avatar
Ying Sheng committed
72
73
74
75
                        {
                            "type": "text",
                            "text": "Describe this image in a very short sentence.",
                        },
Ying Sheng's avatar
Ying Sheng committed
76
77
78
79
                    ],
                },
            ],
            temperature=0,
80
            **(self.get_vision_request_kwargs()),
Ying Sheng's avatar
Ying Sheng committed
81
82
83
        )

        assert response.choices[0].message.role == "assistant"
Ying Sheng's avatar
Ying Sheng committed
84
85
        text = response.choices[0].message.content
        assert isinstance(text, str)
86
        # `driver` is for gemma-3-it
87
88
89
90
91
92
93
94
95
96
        assert (
            "man" in text or "person" or "driver" in text
        ), f"text: {text}, should contain man, person or driver"
        assert (
            "cab" in text
            or "taxi" in text
            or "SUV" in text
            or "vehicle" in text
            or "car" in text
        ), f"text: {text}, should contain cab, taxi, SUV, vehicle or car"
Mick's avatar
Mick committed
97
        # MiniCPMO fails to recognize `iron`, but `hanging`
98
99
100
        assert (
            "iron" in text or "hang" in text or "cloth" in text or "holding" in text
        ), f"text: {text}, should contain iron, hang, cloth or holding"
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        assert response.id
        assert response.created
        assert response.usage.prompt_tokens > 0
        assert response.usage.completion_tokens > 0
        assert response.usage.total_tokens > 0

    def test_multi_turn_chat_completion(self):
        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        response = client.chat.completions.create(
            model="default",
            messages=[
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "image_url",
118
                            "image_url": {"url": IMAGE_MAN_IRONING_URL},
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
                        },
                        {
                            "type": "text",
                            "text": "Describe this image in a very short sentence.",
                        },
                    ],
                },
                {
                    "role": "assistant",
                    "content": [
                        {
                            "type": "text",
                            "text": "There is a man at the back of a yellow cab ironing his clothes.",
                        }
                    ],
                },
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "Repeat your previous answer."}
                    ],
                },
            ],
            temperature=0,
143
            **(self.get_vision_request_kwargs()),
144
145
146
147
148
        )

        assert response.choices[0].message.role == "assistant"
        text = response.choices[0].message.content
        assert isinstance(text, str)
149
150
151
        assert (
            "man" in text or "cab" in text
        ), f"text: {text}, should contain man or cab"
Ying Sheng's avatar
Ying Sheng committed
152
153
154
155
        assert response.id
        assert response.created
        assert response.usage.prompt_tokens > 0
        assert response.usage.completion_tokens > 0
156
157
        assert response.usage.total_tokens > 0

158
    def test_multi_images_chat_completion(self):
159
160
161
162
163
164
165
166
167
168
        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        response = client.chat.completions.create(
            model="default",
            messages=[
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "image_url",
Mick's avatar
Mick committed
169
                            "image_url": {"url": IMAGE_MAN_IRONING_URL},
170
                            "modalities": "multi-images",
171
172
173
                        },
                        {
                            "type": "image_url",
174
                            "image_url": {"url": IMAGE_SGL_LOGO_URL},
175
                            "modalities": "multi-images",
176
177
178
                        },
                        {
                            "type": "text",
179
180
                            "text": "I have two very different images. They are not related at all. "
                            "Please describe the first image in one sentence, and then describe the second image in another sentence.",
181
182
183
184
185
                        },
                    ],
                },
            ],
            temperature=0,
186
            **(self.get_vision_request_kwargs()),
187
188
189
190
191
        )

        assert response.choices[0].message.role == "assistant"
        text = response.choices[0].message.content
        assert isinstance(text, str)
Mick's avatar
Mick committed
192
193
194
        print("-" * 30)
        print(f"Multi images response:\n{text}")
        print("-" * 30)
195
196
197
198
199
200
        assert (
            "man" in text or "cab" in text or "SUV" in text or "taxi" in text
        ), f"text: {text}, should contain man, cab, SUV or taxi"
        assert (
            "logo" in text or '"S"' in text or "SG" in text
        ), f"text: {text}, should contain logo, S or SG"
201
202
203
204
        assert response.id
        assert response.created
        assert response.usage.prompt_tokens > 0
        assert response.usage.completion_tokens > 0
Ying Sheng's avatar
Ying Sheng committed
205
206
        assert response.usage.total_tokens > 0

207
    def prepare_video_images_messages(self, video_path):
208
209
        # the memory consumed by the Vision Attention varies a lot, e.g. blocked qkv vs full-sequence sdpa
        # the size of the video embeds differs from the `modality` argument when preprocessed
210
211
212
213
214
215
216

        # We import decord here to avoid a strange Segmentation fault (core dumped) issue.
        # The following import order will cause Segmentation fault.
        # import decord
        # from transformers import AutoTokenizer
        from decord import VideoReader, cpu

217
        max_frames_num = 10
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
        vr = VideoReader(video_path, ctx=cpu(0))
        total_frame_num = len(vr)
        uniform_sampled_frames = np.linspace(
            0, total_frame_num - 1, max_frames_num, dtype=int
        )
        frame_idx = uniform_sampled_frames.tolist()
        frames = vr.get_batch(frame_idx).asnumpy()

        base64_frames = []
        for frame in frames:
            pil_img = Image.fromarray(frame)
            buff = io.BytesIO()
            pil_img.save(buff, format="JPEG")
            base64_str = base64.b64encode(buff.getvalue()).decode("utf-8")
            base64_frames.append(base64_str)

        messages = [{"role": "user", "content": []}]
        frame_format = {
            "type": "image_url",
            "image_url": {"url": "data:image/jpeg;base64,{}"},
238
            "modalities": "image",
239
240
241
242
243
244
245
246
247
248
249
250
251
        }

        for base64_frame in base64_frames:
            frame_format["image_url"]["url"] = "data:image/jpeg;base64,{}".format(
                base64_frame
            )
            messages[0]["content"].append(frame_format.copy())

        prompt = {"type": "text", "text": "Please describe the video in detail."}
        messages[0]["content"].append(prompt)

        return messages

252
    def prepare_video_messages(self, video_path):
253
254
255
256
257
        messages = [
            {
                "role": "user",
                "content": [
                    {
258
259
                        "type": "video_url",
                        "video_url": {"url": f"{video_path}"},
260
261
262
263
264
265
266
                    },
                    {"type": "text", "text": "Please describe the video in detail."},
                ],
            },
        ]
        return messages

267
    def get_or_download_file(self, url: str) -> str:
268
        cache_dir = os.path.expanduser("~/.cache")
269
270
271
272
        if url is None:
            raise ValueError()
        file_name = url.split("/")[-1]
        file_path = os.path.join(cache_dir, file_name)
273
274
275
276
277
278
279
280
        os.makedirs(cache_dir, exist_ok=True)

        if not os.path.exists(file_path):
            response = requests.get(url)
            response.raise_for_status()

            with open(file_path, "wb") as f:
                f.write(response.content)
281
282
        return file_path

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
    # this test samples frames of video as input, but not video directly
    def test_video_images_chat_completion(self):
        url = VIDEO_JOBS_URL
        file_path = self.get_or_download_file(url)

        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        messages = self.prepare_video_images_messages(file_path)

        response = client.chat.completions.create(
            model="default",
            messages=messages,
            temperature=0,
            max_tokens=1024,
            stream=False,
        )

        video_response = response.choices[0].message.content

        print("-" * 30)
        print(f"Video images response:\n{video_response}")
        print("-" * 30)

        # Add assertions to validate the video response
        assert (
            "iPod" in video_response
            or "device" in video_response
            or "microphone" in video_response
311
312
313
314
315
316
        ), f"""
        ====================== video_response =====================
        {video_response}
        ===========================================================
        should contain 'iPod' or 'device' or 'microphone'
        """
317
318
319
320
321
        assert (
            "man" in video_response
            or "person" in video_response
            or "individual" in video_response
            or "speaker" in video_response
322
323
324
325
326
327
328
            or "Steve" in video_response
        ), f"""
        ====================== video_response =====================
        {video_response}
        ===========================================================
        should contain 'man' or 'person' or 'individual' or 'speaker'
        """
329
330
331
332
333
        assert (
            "present" in video_response
            or "examine" in video_response
            or "display" in video_response
            or "hold" in video_response
334
335
336
337
338
339
        ), f"""
        ====================== video_response =====================
        {video_response}
        ===========================================================
        should contain 'present' or 'examine' or 'display' or 'hold'
        """
340
341
342
343
344
        assert "black" in video_response or "dark" in video_response
        self.assertIsNotNone(video_response)
        self.assertGreater(len(video_response), 0)

    def _test_video_chat_completion(self):
345
346
        url = VIDEO_JOBS_URL
        file_path = self.get_or_download_file(url)
347
348
349
350
351

        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        messages = self.prepare_video_messages(file_path)

Mick's avatar
Mick committed
352
        response = client.chat.completions.create(
353
354
355
356
            model="default",
            messages=messages,
            temperature=0,
            max_tokens=1024,
Mick's avatar
Mick committed
357
            stream=False,
358
            **(self.get_vision_request_kwargs()),
359
        )
360

Mick's avatar
Mick committed
361
362
        video_response = response.choices[0].message.content

363
        print("-" * 30)
Mick's avatar
Mick committed
364
        print(f"Video response:\n{video_response}")
365
366
367
        print("-" * 30)

        # Add assertions to validate the video response
368
        assert (
369
370
371
            "iPod" in video_response
            or "device" in video_response
            or "microphone" in video_response
372
        ), f"video_response: {video_response}, should contain 'iPod' or 'device'"
Mick's avatar
Mick committed
373
374
375
376
        assert (
            "man" in video_response
            or "person" in video_response
            or "individual" in video_response
377
            or "speaker" in video_response
378
        ), f"video_response: {video_response}, should either have 'man' in video_response, or 'person' in video_response, or 'individual' in video_response or 'speaker' in video_response"
Mick's avatar
Mick committed
379
380
381
382
        assert (
            "present" in video_response
            or "examine" in video_response
            or "display" in video_response
383
            or "hold" in video_response
384
385
386
387
        ), f"video_response: {video_response}, should contain 'present', 'examine', 'display', or 'hold'"
        assert (
            "black" in video_response or "dark" in video_response
        ), f"video_response: {video_response}, should contain 'black' or 'dark'"
388
389
390
        self.assertIsNotNone(video_response)
        self.assertGreater(len(video_response), 0)

Ying Sheng's avatar
Ying Sheng committed
391
392
393
394
    def test_regex(self):
        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        regex = (
395
396
397
            r"""\{"""
            + r""""color":"[\w]+","""
            + r""""number_of_cars":[\d]+"""
Ying Sheng's avatar
Ying Sheng committed
398
399
400
            + r"""\}"""
        )

401
        extra_kwargs = self.get_vision_request_kwargs()
402
403
        extra_kwargs.setdefault("extra_body", {})["regex"] = regex

Ying Sheng's avatar
Ying Sheng committed
404
405
406
407
408
409
410
411
        response = client.chat.completions.create(
            model="default",
            messages=[
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "image_url",
412
                            "image_url": {"url": IMAGE_MAN_IRONING_URL},
Ying Sheng's avatar
Ying Sheng committed
413
414
415
416
417
418
419
420
421
                        },
                        {
                            "type": "text",
                            "text": "Describe this image in the JSON format.",
                        },
                    ],
                },
            ],
            temperature=0,
422
            **extra_kwargs,
Ying Sheng's avatar
Ying Sheng committed
423
424
425
426
427
428
429
430
431
432
433
        )
        text = response.choices[0].message.content

        try:
            js_obj = json.loads(text)
        except (TypeError, json.decoder.JSONDecodeError):
            print("JSONDecodeError", text)
            raise
        assert isinstance(js_obj["color"], str)
        assert isinstance(js_obj["number_of_cars"], int)

434
435
436
437
438
439
440
441
    def run_decode_with_image(self, image_id):
        client = openai.Client(api_key=self.api_key, base_url=self.base_url)

        content = []
        if image_id == 0:
            content.append(
                {
                    "type": "image_url",
442
                    "image_url": {"url": IMAGE_MAN_IRONING_URL},
443
444
445
446
447
448
                }
            )
        elif image_id == 1:
            content.append(
                {
                    "type": "image_url",
449
                    "image_url": {"url": IMAGE_SGL_LOGO_URL},
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
                }
            )
        else:
            pass

        content.append(
            {
                "type": "text",
                "text": "Describe this image in a very short sentence.",
            }
        )

        response = client.chat.completions.create(
            model="default",
            messages=[
                {"role": "user", "content": content},
            ],
            temperature=0,
468
            **(self.get_vision_request_kwargs()),
469
470
471
472
473
474
475
476
477
478
479
        )

        assert response.choices[0].message.role == "assistant"
        text = response.choices[0].message.content
        assert isinstance(text, str)

    def test_mixed_batch(self):
        image_ids = [0, 1, 2] * 4
        with ThreadPoolExecutor(4) as executor:
            list(executor.map(self.run_decode_with_image, image_ids))

Mick's avatar
Mick committed
480
481
482
483
484
485
486
487
488
    def prepare_audio_messages(self, prompt, audio_file_name):
        messages = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "audio_url",
                        "audio_url": {"url": f"{audio_file_name}"},
                    },
Mick's avatar
Mick committed
489
490
491
492
                    {
                        "type": "text",
                        "text": prompt,
                    },
Mick's avatar
Mick committed
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
                ],
            }
        ]

        return messages

    def get_audio_response(self, url: str, prompt, category):
        audio_file_path = self.get_or_download_file(url)
        client = openai.Client(api_key="sk-123456", base_url=self.base_url)

        messages = self.prepare_audio_messages(prompt, audio_file_path)

        response = client.chat.completions.create(
            model="default",
            messages=messages,
            temperature=0,
            max_tokens=128,
            stream=False,
511
            **(self.get_audio_request_kwargs()),
Mick's avatar
Mick committed
512
513
514
515
516
517
518
519
520
521
522
523
524
        )

        audio_response = response.choices[0].message.content

        print("-" * 30)
        print(f"audio {category} response:\n{audio_response}")
        print("-" * 30)

        audio_response = audio_response.lower()

        self.assertIsNotNone(audio_response)
        self.assertGreater(len(audio_response), 0)

525
        return audio_response.lower()
Mick's avatar
Mick committed
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550

    def _test_audio_speech_completion(self):
        # a fragment of Trump's speech
        audio_response = self.get_audio_response(
            AUDIO_TRUMP_SPEECH_URL,
            "I have an audio sample. Please repeat the person's words",
            category="speech",
        )
        assert "thank you" in audio_response
        assert "it's a privilege to be here" in audio_response
        assert "leader" in audio_response
        assert "science" in audio_response
        assert "art" in audio_response

    def _test_audio_ambient_completion(self):
        # bird song
        audio_response = self.get_audio_response(
            AUDIO_BIRD_SONG_URL,
            "Please listen to the audio snippet carefully and transcribe the content.",
            "ambient",
        )
        assert "bird" in audio_response

    def test_audio_chat_completion(self):
        pass