mm_plugin.py 88.2 KB
Newer Older
chenych's avatar
chenych committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's Transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llava/processing_llava.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

chenych's avatar
chenych committed
18
import inspect
luopl's avatar
luopl committed
19
import math
chenych's avatar
chenych committed
20
import os
luopl's avatar
luopl committed
21
import re
luopl's avatar
luopl committed
22
from copy import deepcopy
chenych's avatar
chenych committed
23
from dataclasses import dataclass
luopl's avatar
luopl committed
24
from io import BytesIO
shihm's avatar
uodata  
shihm committed
25
from typing import TYPE_CHECKING, BinaryIO, Literal, NotRequired, Optional, TypedDict, Union
luopl's avatar
luopl committed
26
27

import numpy as np
luopl's avatar
luopl committed
28
import torch
shihm's avatar
uodata  
shihm committed
29
import torchaudio
chenych's avatar
chenych committed
30
from transformers.image_utils import get_image_size, is_valid_image, to_numpy_array
chenych's avatar
chenych committed
31
32
33
34
from transformers.models.mllama.processing_mllama import (
    convert_sparse_cross_attention_mask_to_dense,
    get_cross_attention_token_mask,
)
luopl's avatar
luopl committed
35
36
from typing_extensions import override

chenych's avatar
chenych committed
37
from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER
shihm's avatar
uodata  
shihm committed
38
from ..extras.packages import is_pillow_available, is_pyav_available, is_transformers_version_greater_than
luopl's avatar
luopl committed
39
40
41
42
43
44
45
46
47
48
49


if is_pillow_available():
    from PIL import Image
    from PIL.Image import Image as ImageObject


if is_pyav_available():
    import av


chenych's avatar
chenych committed
50
51
52
if is_transformers_version_greater_than("4.52.0"):
    from transformers.image_utils import make_flat_list_of_images
    from transformers.video_utils import make_batched_videos
chenych's avatar
chenych committed
53
else:
chenych's avatar
chenych committed
54
55
56
    from transformers.image_utils import make_batched_videos, make_flat_list_of_images


luopl's avatar
luopl committed
57
58
if TYPE_CHECKING:
    from av.stream import Stream
chenych's avatar
chenych committed
59
    from numpy.typing import NDArray
luopl's avatar
luopl committed
60
    from transformers import PreTrainedTokenizer, ProcessorMixin
chenych's avatar
chenych committed
61
    from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor
luopl's avatar
luopl committed
62
    from transformers.image_processing_utils import BaseImageProcessor
shihm's avatar
uodata  
shihm committed
63
    from transformers.video_processing_utils import BaseVideoProcessor
luopl's avatar
luopl committed
64
65

    class EncodedImage(TypedDict):
shihm's avatar
uodata  
shihm committed
66
67
        path: str | None
        bytes: bytes | None
luopl's avatar
luopl committed
68

chenych's avatar
chenych committed
69
    ImageInput = Union[str, bytes, EncodedImage, BinaryIO, ImageObject]
chenych's avatar
chenych committed
70
    VideoInput = Union[str, BinaryIO, list[list[ImageInput]]]
chenych's avatar
chenych committed
71
72
    AudioInput = Union[str, BinaryIO, NDArray]

shihm's avatar
uodata  
shihm committed
73
74
75
76
77
78
79
80
81
82
83
84
    class RegularizedImageOutput(TypedDict):
        images: list[ImageObject]

    class RegularizedVideoOutput(TypedDict):
        videos: list[list[ImageObject]]
        durations: list[float]
        fps_per_video: NotRequired[list[float]]

    class RegularizedAudioOutput(TypedDict):
        audios: list[NDArray]
        sampling_rates: list[float]

chenych's avatar
chenych committed
85
86
87
88
89
90
91
92
    class MMProcessor(ProcessorMixin):
        patch_size: int
        image_seq_length: int
        num_additional_image_tokens: int
        vision_feature_select_strategy: Literal["default", "full"]

        def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
            pass
luopl's avatar
luopl committed
93
94


chenych's avatar
chenych committed
95
96
97
98
def _get_paligemma_token_type_ids(imglens: list[int], seqlens: list[int], processor: "MMProcessor") -> list[list[int]]:
    r"""Get paligemma token type ids for computing loss.

    It is slightly different with the original token type ids where the prompt part is 0.
luopl's avatar
luopl committed
99
100

    Returns:
chenych's avatar
chenych committed
101
102
        batch_token_type_ids: shape (batch_size, seq_length)

luopl's avatar
luopl committed
103
104
105
    """
    batch_token_type_ids = []
    for imglen, seqlen in zip(imglens, seqlens):
chenych's avatar
chenych committed
106
        image_seqlen = imglen * processor.image_seq_length
luopl's avatar
luopl committed
107
108
109
110
111
        batch_token_type_ids.append([0] * image_seqlen + [1] * (seqlen - image_seqlen))

    return batch_token_type_ids


chenych's avatar
chenych committed
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
def _get_gemma3_token_type_ids(batch_ids: list[list[int]], processor: "MMProcessor"):
    r"""Get gemma3 token type ids for computing loss.

    Returns:
        batch_token_type_ids: shape (batch_size, seq_length)

    """
    image_token_id: int = getattr(processor, "image_token_id")
    batch_token_type_ids = []
    for token_ids in batch_ids:
        token_ids = np.array(token_ids)
        token_type_ids = np.zeros_like(token_ids)
        token_type_ids[token_ids == image_token_id] = 1
        batch_token_type_ids.append(token_type_ids.tolist())

    return batch_token_type_ids


def _make_batched_images(images: list["ImageObject"], imglens: list[int]) -> list[list["ImageObject"]]:
    r"""Make nested list of images."""
    batch_images = []
    for imglen in imglens:
        batch_images.append(images[:imglen])
        images = images[imglen:]

    return batch_images


chenych's avatar
chenych committed
140
141
def _check_video_is_nested_images(video: "VideoInput") -> bool:
    r"""Check if the video is nested images."""
shihm's avatar
uodata  
shihm committed
142
    return isinstance(video, list) and all(isinstance(frame, (str, BinaryIO, dict, ImageObject)) for frame in video)
chenych's avatar
chenych committed
143
144


chenych's avatar
chenych committed
145
146
@dataclass
class MMPluginMixin:
shihm's avatar
uodata  
shihm committed
147
148
149
    image_token: str | None
    video_token: str | None
    audio_token: str | None
chenych's avatar
chenych committed
150
    expand_mm_tokens: bool = True
luopl's avatar
luopl committed
151
152
153

    def _validate_input(
        self,
chenych's avatar
chenych committed
154
155
156
157
        processor: Optional["MMProcessor"],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
luopl's avatar
luopl committed
158
    ) -> None:
chenych's avatar
chenych committed
159
160
161
162
163
164
        r"""Validate if this model accepts the input modalities."""
        image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
        video_processor: BaseImageProcessor = getattr(
            processor, "video_processor", getattr(processor, "image_processor", None)
        )
        feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None)
luopl's avatar
luopl committed
165
        if len(images) != 0 and self.image_token is None:
luopl's avatar
luopl committed
166
167
168
            raise ValueError(
                "This model does not support image input. Please check whether the correct `template` is used."
            )
luopl's avatar
luopl committed
169
170

        if len(videos) != 0 and self.video_token is None:
luopl's avatar
luopl committed
171
172
173
            raise ValueError(
                "This model does not support video input. Please check whether the correct `template` is used."
            )
luopl's avatar
luopl committed
174

chenych's avatar
chenych committed
175
176
177
178
179
180
        if len(audios) != 0 and self.audio_token is None:
            raise ValueError(
                "This model does not support audio input. Please check whether the correct `template` is used."
            )

        if self.image_token is not None and processor is None:
chenych's avatar
chenych committed
181
            raise ValueError("Processor was not found, please check and update your model file.")
chenych's avatar
chenych committed
182
183

        if self.image_token is not None and image_processor is None:
chenych's avatar
chenych committed
184
            raise ValueError("Image processor was not found, please check and update your model file.")
chenych's avatar
chenych committed
185

chenych's avatar
chenych committed
186
        if self.video_token is not None and video_processor is None:
chenych's avatar
chenych committed
187
            raise ValueError("Video processor was not found, please check and update your model file.")
chenych's avatar
chenych committed
188

chenych's avatar
chenych committed
189
        if self.audio_token is not None and feature_extractor is None:
chenych's avatar
chenych committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
            raise ValueError("Audio feature extractor was not found, please check and update your model file.")

    def _validate_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
    ):
        r"""Validate if the number of images, videos and audios match the number of placeholders in messages."""
        num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
        for message in messages:
            num_image_tokens += message["content"].count(IMAGE_PLACEHOLDER)
            num_video_tokens += message["content"].count(VIDEO_PLACEHOLDER)
            num_audio_tokens += message["content"].count(AUDIO_PLACEHOLDER)

        if len(images) != num_image_tokens:
            raise ValueError(
                f"The number of images does not match the number of {IMAGE_PLACEHOLDER} tokens in {messages}."
            )

        if len(videos) != num_video_tokens:
            raise ValueError(
                f"The number of videos does not match the number of {VIDEO_PLACEHOLDER} tokens in {messages}."
            )

        if len(audios) != num_audio_tokens:
            raise ValueError(
                f"The number of audios does not match the number of {AUDIO_PLACEHOLDER} tokens in {messages}."
            )
chenych's avatar
chenych committed
220
221
222
223

    def _preprocess_image(
        self, image: "ImageObject", image_max_pixels: int, image_min_pixels: int, **kwargs
    ) -> "ImageObject":
chenych's avatar
chenych committed
224
        r"""Pre-process a single image."""
chenych's avatar
chenych committed
225
226
        if (image.width * image.height) > image_max_pixels:
            resize_factor = math.sqrt(image_max_pixels / (image.width * image.height))
luopl's avatar
luopl committed
227
            width, height = int(image.width * resize_factor), int(image.height * resize_factor)
chenych's avatar
chenych committed
228
229
230
231
232
233
            image = image.resize((width, height))

        if (image.width * image.height) < image_min_pixels:
            resize_factor = math.sqrt(image_min_pixels / (image.width * image.height))
            width, height = int(image.width * resize_factor), int(image.height * resize_factor)
            image = image.resize((width, height))
luopl's avatar
luopl committed
234
235
236
237
238
239

        if image.mode != "RGB":
            image = image.convert("RGB")

        return image

chenych's avatar
chenych committed
240
241
    def _get_video_sample_indices(
        self, video_stream: "Stream", video_fps: float, video_maxlen: int, **kwargs
chenych's avatar
chenych committed
242
243
    ) -> list[int]:
        r"""Compute video sample indices according to fps."""
luopl's avatar
luopl committed
244
        total_frames = video_stream.frames
chenych's avatar
chenych committed
245
246
247
        if total_frames == 0:  # infinite video
            return np.linspace(0, video_maxlen - 1, video_maxlen).astype(np.int32)

chenych's avatar
chenych committed
248
        sample_frames = max(1, math.floor(float(video_stream.duration * video_stream.time_base) * video_fps))
luopl's avatar
luopl committed
249
        sample_frames = min(total_frames, video_maxlen, sample_frames)
chenych's avatar
chenych committed
250
        return np.linspace(0, total_frames - 1, sample_frames).astype(np.int32)
luopl's avatar
luopl committed
251

shihm's avatar
uodata  
shihm committed
252
    def _regularize_images(self, images: list["ImageInput"], **kwargs) -> "RegularizedImageOutput":
chenych's avatar
chenych committed
253
        r"""Regularize images to avoid error. Including reading and pre-processing."""
luopl's avatar
luopl committed
254
255
        results = []
        for image in images:
chenych's avatar
chenych committed
256
            if isinstance(image, (str, BinaryIO)):
luopl's avatar
luopl committed
257
                image = Image.open(image)
luopl's avatar
luopl committed
258
259
            elif isinstance(image, bytes):
                image = Image.open(BytesIO(image))
luopl's avatar
luopl committed
260
261
262
263
264
265
266
            elif isinstance(image, dict):
                if image["bytes"] is not None:
                    image = Image.open(BytesIO(image["bytes"]))
                else:
                    image = Image.open(image["path"])

            if not isinstance(image, ImageObject):
chenych's avatar
chenych committed
267
                raise ValueError(f"Expect input is a list of images, but got {type(image)}.")
luopl's avatar
luopl committed
268
269
270

            results.append(self._preprocess_image(image, **kwargs))

chenych's avatar
chenych committed
271
        return {"images": results}
luopl's avatar
luopl committed
272

shihm's avatar
uodata  
shihm committed
273
    def _regularize_videos(self, videos: list["VideoInput"], **kwargs) -> "RegularizedVideoOutput":
chenych's avatar
chenych committed
274
        r"""Regularizes videos to avoid error. Including reading, resizing and converting."""
luopl's avatar
luopl committed
275
        results = []
shihm's avatar
uodata  
shihm committed
276
        durations = []
luopl's avatar
luopl committed
277
        for video in videos:
chenych's avatar
chenych committed
278
            frames: list[ImageObject] = []
chenych's avatar
chenych committed
279
280
281
282
283
            if _check_video_is_nested_images(video):
                for frame in video:
                    if not is_valid_image(frame) and not isinstance(frame, dict) and not os.path.exists(frame):
                        raise ValueError("Invalid image found in video frames.")
                frames = video
shihm's avatar
uodata  
shihm committed
284
                durations.append(len(frames) / kwargs.get("video_fps", 2.0))
chenych's avatar
chenych committed
285
286
287
288
289
290
291
292
            else:
                container = av.open(video, "r")
                video_stream = next(stream for stream in container.streams if stream.type == "video")
                sample_indices = self._get_video_sample_indices(video_stream, **kwargs)
                container.seek(0)
                for frame_idx, frame in enumerate(container.decode(video_stream)):
                    if frame_idx in sample_indices:
                        frames.append(frame.to_image())
luopl's avatar
luopl committed
293

shihm's avatar
uodata  
shihm committed
294
295
296
297
298
                if video_stream.duration is None:
                    durations.append(len(frames) / kwargs.get("video_fps", 2.0))
                else:
                    durations.append(float(video_stream.duration * video_stream.time_base))

chenych's avatar
chenych committed
299
            frames = self._regularize_images(frames, **kwargs)["images"]
luopl's avatar
luopl committed
300
301
            results.append(frames)

shihm's avatar
uodata  
shihm committed
302
        return {"videos": results, "durations": durations}
luopl's avatar
luopl committed
303

chenych's avatar
chenych committed
304
305
    def _regularize_audios(
        self, audios: list["AudioInput"], sampling_rate: float, **kwargs
shihm's avatar
uodata  
shihm committed
306
    ) -> "RegularizedAudioOutput":
chenych's avatar
chenych committed
307
308
        r"""Regularizes audios to avoid error. Including reading and resampling."""
        results, sampling_rates = [], []
chenych's avatar
chenych committed
309
310
        for audio in audios:
            if not isinstance(audio, np.ndarray):
shihm's avatar
uodata  
shihm committed
311
312
313
314
315
316
317
318
                audio, sr = torchaudio.load(audio)
                if audio.shape[0] > 1:
                    audio = audio.mean(dim=0, keepdim=True)

                if sr != sampling_rate:
                    audio = torchaudio.functional.resample(audio, sr, sampling_rate)

                audio = audio.squeeze(0).numpy()
chenych's avatar
chenych committed
319
320

            results.append(audio)
chenych's avatar
chenych committed
321
            sampling_rates.append(sampling_rate)
chenych's avatar
chenych committed
322

chenych's avatar
chenych committed
323
        return {"audios": results, "sampling_rates": sampling_rates}
chenych's avatar
chenych committed
324

luopl's avatar
luopl committed
325
326
    def _get_mm_inputs(
        self,
chenych's avatar
chenych committed
327
328
329
330
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "MMProcessor",
shihm's avatar
uodata  
shihm committed
331
        imglens: list[int] | None = None,
chenych's avatar
chenych committed
332
333
    ) -> dict[str, "torch.Tensor"]:
        r"""Process visual inputs.
luopl's avatar
luopl committed
334
335
336
337
338
339
340

        Returns: (llava and paligemma)
            pixel_values: tensor with shape (B, C, H, W)

        Returns: (qwen2-vl)
            pixel_values: tensor with shape (num_patches, patch_dim)
            image_grid_thw: tensor with shape (num_images, 3), where the three numbers are time, width, height
chenych's avatar
chenych committed
341
342
343
344
345
346
347
348
349
                            where num_patches == torch.prod(image_grid_thw)

        Returns: (mllama)
            pixel_values: tensor with shape
                          (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width)
                          For example, (2, 1, 4, 3, 560, 560).
            aspect_ratio_ids: tensor with shape (batch_size, max_num_images). For example, (2, 1).
            aspect_ratio_mask: tensor with shape (batch_size, max_num_images, max_image_tiles). For example, (2, 1, 4).
            num_tiles: List[List[int]] with shape (batch_size, num_images_in_batch). For example, (2, 1).
luopl's avatar
luopl committed
350
351

        """
chenych's avatar
chenych committed
352
        mm_inputs = {}
luopl's avatar
luopl committed
353
        if len(images) != 0:
chenych's avatar
chenych committed
354
            image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
luopl's avatar
luopl committed
355
356
            images = self._regularize_images(
                images,
chenych's avatar
chenych committed
357
358
                image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
chenych's avatar
chenych committed
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
            )["images"]
            if imglens is not None:  # if imglens are provided, make batched images
                images = _make_batched_images(images, imglens)

            image_processor_kwargs = {}
            if getattr(processor, "image_do_pan_and_scan", False):  # gemma3 image processor
                image_processor_kwargs.update(
                    {
                        "do_pan_and_scan": True,
                        "pan_and_scan_min_crop_size": 256,
                        "pan_and_scan_max_num_crops": 4,
                        "pan_and_scan_min_ratio_to_activate": 1.2,
                    }
                )

            mm_inputs.update(image_processor(images, return_tensors="pt", **image_processor_kwargs))
luopl's avatar
luopl committed
375
376

        if len(videos) != 0:
chenych's avatar
chenych committed
377
378
379
            video_processor: BaseImageProcessor = getattr(
                processor, "video_processor", getattr(processor, "image_processor", None)
            )
luopl's avatar
luopl committed
380
381
            videos = self._regularize_videos(
                videos,
chenych's avatar
chenych committed
382
383
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
luopl's avatar
luopl committed
384
                video_fps=getattr(processor, "video_fps", 2.0),
chenych's avatar
chenych committed
385
                video_maxlen=getattr(processor, "video_maxlen", 128),
chenych's avatar
chenych committed
386
            )["videos"]
chenych's avatar
chenych committed
387
388
389
390
391
392
            if "videos" in inspect.signature(video_processor.preprocess).parameters:  # for qwen2_vl and video_llava
                mm_inputs.update(video_processor(images=None, videos=videos, return_tensors="pt"))
            else:  # for llava_next_video
                mm_inputs.update(video_processor(videos, return_tensors="pt"))

        if len(audios) != 0:
chenych's avatar
chenych committed
393
            feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None)
chenych's avatar
chenych committed
394
395
            audios = self._regularize_audios(
                audios,
chenych's avatar
chenych committed
396
397
                sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
            )["audios"]
chenych's avatar
chenych committed
398
399
400
            mm_inputs.update(
                feature_extractor(
                    audios,
chenych's avatar
chenych committed
401
                    sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
chenych's avatar
chenych committed
402
403
404
405
406
                    return_attention_mask=True,
                    padding="max_length",
                    return_tensors="pt",
                )
            )
chenych's avatar
chenych committed
407
            mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask", None)  # prevent conflicts
luopl's avatar
luopl committed
408
409
410

        return mm_inputs

chenych's avatar
chenych committed
411
412
413

@dataclass
class BasePlugin(MMPluginMixin):
luopl's avatar
luopl committed
414
415
    def process_messages(
        self,
chenych's avatar
chenych committed
416
417
418
419
420
421
422
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        r"""Pre-process input messages before tokenization for VLMs."""
chenych's avatar
chenych committed
423
        self._validate_input(processor, images, videos, audios)
luopl's avatar
luopl committed
424
425
426
427
        return messages

    def process_token_ids(
        self,
chenych's avatar
chenych committed
428
        input_ids: list[int],
shihm's avatar
uodata  
shihm committed
429
        labels: list[int] | None,
chenych's avatar
chenych committed
430
431
432
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
luopl's avatar
luopl committed
433
        tokenizer: "PreTrainedTokenizer",
chenych's avatar
chenych committed
434
        processor: Optional["MMProcessor"],
shihm's avatar
uodata  
shihm committed
435
    ) -> tuple[list[int], list[int] | None]:
chenych's avatar
chenych committed
436
        r"""Pre-process token ids after tokenization for VLMs."""
chenych's avatar
chenych committed
437
        self._validate_input(processor, images, videos, audios)
luopl's avatar
luopl committed
438
439
440
441
        return input_ids, labels

    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
442
443
444
445
446
447
448
449
450
451
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
        r"""Build batched multimodal inputs for VLMs.
luopl's avatar
luopl committed
452
453
454
455

        Arguments:
            images: a list of image inputs, shape (num_images,)
            videos: a list of video inputs, shape (num_videos,)
chenych's avatar
chenych committed
456
            audios: a list of audio inputs, shape (num_audios,)
luopl's avatar
luopl committed
457
458
            imglens: number of images in each sample, shape (batch_size,)
            vidlens: number of videos in each sample, shape (batch_size,)
chenych's avatar
chenych committed
459
            audlens: number of audios in each sample, shape (batch_size,)
luopl's avatar
luopl committed
460
            batch_ids: token ids of input samples, shape (batch_size, seq_len)
luopl's avatar
luopl committed
461
            processor: a processor for pre-processing images and videos
chenych's avatar
chenych committed
462

luopl's avatar
luopl committed
463
        """
chenych's avatar
chenych committed
464
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
465
        return self._get_mm_inputs(images, videos, audios, processor)
luopl's avatar
luopl committed
466
467


shihm's avatar
uodata  
shihm committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
@dataclass
class ErnieVLPlugin(BasePlugin):
    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
        self._validate_messages(messages, images, videos, audios)
        messages = deepcopy(messages)

        image_processor: BaseImageProcessor = getattr(processor, "image_processor")

        merge_length: int = getattr(image_processor, "merge_size") ** 2
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            image_grid_thw = mm_inputs.get("image_grid_thw", [])
            video_grid_thw = mm_inputs.get("video_grid_thw", [])
        else:
            image_grid_thw = [None] * len(images)
            video_grid_thw = [None] * len(videos)

        image_idx, video_idx = 0, 0
        for message in messages:
            content = message["content"]
            image_token = self.image_token or "<|IMAGE_PLACEHOLDER|>"
            video_token = self.video_token or "<|VIDEO_PLACEHOLDER|>"
            while IMAGE_PLACEHOLDER in content:
                image_seqlen = image_grid_thw[image_idx].prod() // merge_length if self.expand_mm_tokens else 1
                content = content.replace(
                    IMAGE_PLACEHOLDER,
                    f"Picture {image_idx + 1}:<|IMAGE_START|>{image_token * image_seqlen}<|IMAGE_END|>",
                    1,
                )
                image_idx += 1
            while VIDEO_PLACEHOLDER in content:
                video_seqlen = video_grid_thw[video_idx].prod() // merge_length if self.expand_mm_tokens else 1
                content = content.replace(
                    VIDEO_PLACEHOLDER,
                    f"Video {video_idx + 1}:<|VIDEO_START|>{video_token * video_seqlen}<|VIDEO_END|>",
                    1,
                )
                video_idx += 1
            message["content"] = content
        return messages


chenych's avatar
chenych committed
519
@dataclass
chenych's avatar
chenych committed
520
class Gemma3Plugin(BasePlugin):
luopl's avatar
luopl committed
521
522
523
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
524
525
526
527
528
529
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
530
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
531
        self._validate_messages(messages, images, videos, audios)
luopl's avatar
luopl committed
532
533
        num_image_tokens = 0
        messages = deepcopy(messages)
chenych's avatar
chenych committed
534
535
536
537
538
539
540
541
        boi_token: str = getattr(processor, "boi_token")
        full_image_sequence: str = getattr(processor, "full_image_sequence")
        image_str = full_image_sequence if self.expand_mm_tokens else boi_token

        do_pan_and_scan: bool = getattr(processor, "image_do_pan_and_scan", False)
        if do_pan_and_scan:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)

luopl's avatar
luopl committed
542
543
544
        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
chenych's avatar
chenych committed
545
546
547
548
549
550
551
552
553
                if do_pan_and_scan:
                    image_placeholder_str = (
                        "Here is the original image {{image}} and here are some crops to help you see better "
                        + " ".join(["{{image}}"] * mm_inputs["num_crops"][0][num_image_tokens])
                    )
                else:
                    image_placeholder_str = "{{image}}"

                content = content.replace(IMAGE_PLACEHOLDER, image_placeholder_str, 1)
luopl's avatar
luopl committed
554
                num_image_tokens += 1
luopl's avatar
luopl committed
555

chenych's avatar
chenych committed
556
            message["content"] = content.replace("{{image}}", image_str)
luopl's avatar
luopl committed
557
558
559
560
561
562

        return messages

    @override
    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
563
564
565
566
567
568
569
570
571
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
chenych's avatar
chenych committed
572
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
573
574
575
576
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
        mm_inputs.pop("num_crops", None)
        mm_inputs["token_type_ids"] = _get_gemma3_token_type_ids(batch_ids, processor)
        return mm_inputs
luopl's avatar
luopl committed
577
578


chenych's avatar
chenych committed
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
class Gemma3nPlugin(Gemma3Plugin):
    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
        self._validate_messages(messages, images, videos, audios)
        messages = deepcopy(messages)
        boi_token: str = getattr(processor, "boi_token")
        boa_token: str = getattr(processor, "boa_token")
        full_image_sequence: str = getattr(processor, "full_image_sequence")
        full_audio_sequence: str = getattr(processor, "full_audio_sequence")
        image_str = full_image_sequence if self.expand_mm_tokens else boi_token
        audio_str = full_audio_sequence if self.expand_mm_tokens else boa_token

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                content = content.replace(IMAGE_PLACEHOLDER, image_str, 1)

            while AUDIO_PLACEHOLDER in content:
                content = content.replace(AUDIO_PLACEHOLDER, audio_str, 1)

            message["content"] = content

        return messages


chenych's avatar
chenych committed
612
@dataclass
chenych's avatar
chenych committed
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
class InternVLPlugin(BasePlugin):
    @override
    def _get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "ProcessorMixin",
        **kwargs,
    ) -> dict[str, "torch.Tensor"]:
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")
        image_processor_kwargs = {}
        if getattr(processor, "crop_to_patches", False):
            image_processor_kwargs.update(
                {
                    "crop_to_patches": True,
                    "max_patches": 12,
                    "min_patches": 1,
                }
            )

        mm_inputs = {}
        image_video_patches = []

chenych's avatar
chenych committed
637
        if len(images) != 0:
chenych's avatar
chenych committed
638
639
640
641
642
643
            images = self._regularize_images(
                images,
                image_max_pixels=getattr(processor, "image_max_pixels", 1024 * 1024),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
            )["images"]

chenych's avatar
chenych committed
644
        if len(videos) != 0:
chenych's avatar
chenych committed
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
            videos = self._regularize_videos(
                videos,
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
                video_fps=getattr(processor, "video_fps", 2.0),
                video_maxlen=getattr(processor, "video_maxlen", 128),
            )["videos"]

        if len(images) != 0:
            images = make_flat_list_of_images(images)
            image_inputs = image_processor(images=images, return_tensors="pt", **image_processor_kwargs)
            image_num_patches = image_inputs.pop("num_patches")
            image_pixel_values = image_inputs.pop("pixel_values")
            image_num_patches_indices = np.cumsum(image_num_patches)

        if len(videos) != 0:
            videos = make_batched_videos(videos)
            num_frames_per_video = [len(video) for video in videos]
            patch_indices = np.cumsum(num_frames_per_video)
            image_processor_kwargs["crop_to_patches"] = False
            video_inputs = image_processor(images=videos, return_tensors="pt", **image_processor_kwargs)
            video_num_patches = video_inputs.pop("num_patches")
            video_pixel_values = video_inputs.pop("pixel_values")
            video_num_patches_indices = np.cumsum(video_num_patches)

        # NOT SUPPORT IMAGE VIDEO INTERLEAVED
        if len(images) != 0 and image_pixel_values is not None:
            for i in range(len(images)):
                start_index = image_num_patches_indices[i - 1] if i > 0 else 0
                end_index = image_num_patches_indices[i]
                image_video_patches.append(image_pixel_values[start_index:end_index])

        if len(videos) != 0 and video_pixel_values is not None:
            patch_indices_with_prefix = [0] + list(patch_indices)
            for i in range(len(videos)):
                current_patch_index = patch_indices_with_prefix[i]
                end_patch_index = patch_indices_with_prefix[i + 1]
                start_index = video_num_patches_indices[current_patch_index - 1] if i > 0 else 0
                end_index = video_num_patches_indices[end_patch_index - 1]
                image_video_patches.append(video_pixel_values[start_index:end_index])

        if len(images) != 0 or len(videos) != 0:
            mm_inputs["pixel_values"] = torch.cat(image_video_patches, dim=0)

        if len(images) != 0:
            mm_inputs.update({"image_num_patches": image_num_patches})

        if len(videos) != 0:
            mm_inputs.update({"video_patch_indices": patch_indices})
            mm_inputs.update({"video_num_patches": video_num_patches})

        return mm_inputs

    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["ProcessorMixin"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
708
709
        self._validate_messages(messages, images, videos, audios)
        num_image_tokens, num_video_tokens = 0, 0
chenych's avatar
chenych committed
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
        image_seqlen = getattr(processor, "image_seq_length") if self.expand_mm_tokens else 1
        messages = deepcopy(messages)
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)

        image_pixel_patch_list = mm_inputs.get("image_num_patches")  # pathes of images
        video_num_patches = mm_inputs.get("video_num_patches")  # all patches for frames of videos
        video_patch_indices = mm_inputs.get("video_patch_indices")  # num frames of per video

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                content = content.replace(
                    IMAGE_PLACEHOLDER,
                    f"<img>{'<IMG_CONTEXT>' * image_seqlen * image_pixel_patch_list[num_image_tokens]}</img>",
                    1,
                )
                num_image_tokens += 1

            while VIDEO_PLACEHOLDER in content:
                current_patch_index = video_patch_indices[num_video_tokens - 1] if num_video_tokens > 0 else 0
                end_patch_index = video_patch_indices[num_video_tokens]
                num_patches = list(video_num_patches[current_patch_index:end_patch_index])
                video_replaced_prompt = "\n".join(
                    f"Frame{i + 1}: <img>{'<IMG_CONTEXT>' * image_seqlen * num_patches[i]}</img>"
                    for i in range(len(num_patches))
                )
                content = content.replace(VIDEO_PLACEHOLDER, video_replaced_prompt, 1)
                num_video_tokens += 1

            message["content"] = content

        return messages

    @override
    def get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["ProcessorMixin"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
        self._validate_input(processor, images, videos, audios)
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
        mm_inputs.pop("image_num_patches", None)
        mm_inputs.pop("video_patch_indices", None)
        mm_inputs.pop("video_num_patches", None)
        return mm_inputs


chenych's avatar
chenych committed
763
764
765
766
class KimiVLPlugin(BasePlugin):
    @override
    def process_messages(self, messages, images, videos, audios, processor):
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
767
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
768
769
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
chenych's avatar
chenych committed
770
771
772
            image_grid_hws = mm_inputs.get("image_grid_hws", [])
        else:
            image_grid_hws = [None] * len(images)
chenych's avatar
chenych committed
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793

        num_image_tokens = 0
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")
        merge_length = math.prod(image_processor.merge_kernel_size)
        messages = deepcopy(messages)
        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                image_seqlen = image_grid_hws[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
                content = content.replace(
                    IMAGE_PLACEHOLDER,
                    f"<|media_start|>image<|media_content|>{self.image_token * image_seqlen}<|media_end|>",
                    1,
                )
                num_image_tokens += 1

            message["content"] = content

        return messages


chenych's avatar
chenych committed
794
@dataclass
chenych's avatar
chenych committed
795
class Llama4Plugin(BasePlugin):
luopl's avatar
luopl committed
796
797
798
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
799
800
801
802
803
804
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
805
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
806
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
807
808
809
810
811
812
813
814
815
816
817
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            if "pixel_values" in mm_inputs:
                image_height, image_width = mm_inputs["pixel_values"][0].shape[-2:]
                num_patches_per_chunk = int(
                    (image_height // processor.patch_size)
                    * (image_width // processor.patch_size)
                    // processor.downsample_ratio
                )
                aspect_ratios = mm_inputs.pop("aspect_ratios")

luopl's avatar
luopl committed
818
819
        num_image_tokens = 0
        messages = deepcopy(messages)
chenych's avatar
chenych committed
820
821
822
        for message in messages:
            content = message["content"]
            if self.expand_mm_tokens:
chenych's avatar
chenych committed
823
                placeholder_count = content.count(IMAGE_PLACEHOLDER)
chenych's avatar
chenych committed
824
825
826
827
828
829
830
831
832
833
834
835
                prompt_splits = content.split(IMAGE_PLACEHOLDER)
                new_content = []
                for local_image_index, split_part in enumerate(prompt_splits):
                    new_content.append(split_part)
                    if local_image_index < placeholder_count:
                        tokens_for_this_image = processor._prompt_split_image(
                            aspect_ratios[num_image_tokens], num_patches_per_chunk
                        )
                        num_image_tokens += 1
                        new_content.append(tokens_for_this_image)

                content = "".join(new_content)
chenych's avatar
chenych committed
836
837
            else:
                content = content.replace(IMAGE_PLACEHOLDER, self.image_token)
chenych's avatar
chenych committed
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855

            message["content"] = content

        return messages

    @override
    def get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
856
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
chenych's avatar
chenych committed
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
        mm_inputs.pop("aspect_ratios", None)
        return mm_inputs


@dataclass
class LlavaPlugin(BasePlugin):
    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
873
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
        messages = deepcopy(messages)
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            if "pixel_values" in mm_inputs:
                height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0]))
                image_seqlen = (height // processor.patch_size) * (
                    width // processor.patch_size
                ) + processor.num_additional_image_tokens
                if processor.vision_feature_select_strategy == "default":
                    image_seqlen -= 1
        else:
            image_seqlen = 1

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)

            message["content"] = content.replace("{{image}}", self.image_token)

        return messages


@dataclass
class LlavaNextPlugin(BasePlugin):
    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
909
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
910
911
912
913
914
915
916
        num_image_tokens = 0
        messages = deepcopy(messages)
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            if "pixel_values" in mm_inputs:
                image_sizes = iter(mm_inputs["image_sizes"].tolist())
                height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0][0]))
luopl's avatar
luopl committed
917

luopl's avatar
luopl committed
918
919
        for message in messages:
            content = message["content"]
luopl's avatar
luopl committed
920
            while IMAGE_PLACEHOLDER in content:
luopl's avatar
luopl committed
921
922
923
                if self.expand_mm_tokens:
                    orig_height, orig_width = next(image_sizes)
                    image_seqlen = processor._get_number_of_features(orig_height, orig_width, height, width)
chenych's avatar
chenych committed
924
                    if processor.vision_feature_select_strategy == "default":
luopl's avatar
luopl committed
925
926
927
                        image_seqlen -= 1
                else:
                    image_seqlen = 1
luopl's avatar
luopl committed
928
929

                content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
luopl's avatar
luopl committed
930
                num_image_tokens += 1
luopl's avatar
luopl committed
931
932
933
934
935
936

            message["content"] = content.replace("{{image}}", self.image_token)

        return messages


chenych's avatar
chenych committed
937
@dataclass
luopl's avatar
luopl committed
938
939
940
941
class LlavaNextVideoPlugin(BasePlugin):
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
942
943
944
945
946
947
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
948
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
949
        self._validate_messages(messages, images, videos, audios)
luopl's avatar
luopl committed
950
        messages = deepcopy(messages)
chenych's avatar
chenych committed
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            if "pixel_values" in mm_inputs:
                image_sizes = iter(mm_inputs["image_sizes"].tolist())
                height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0][0]))

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                if self.expand_mm_tokens:
                    orig_height, orig_width = next(image_sizes)
                    image_seqlen = processor._get_number_of_features(orig_height, orig_width, height, width)
                    if processor.vision_feature_select_strategy == "default":
                        image_seqlen -= 1
                else:
                    image_seqlen = 1

                content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)

            message["content"] = content.replace("{{image}}", self.image_token)

        if self.expand_mm_tokens:
            if "pixel_values_videos" in mm_inputs:
                one_video = to_numpy_array(mm_inputs.get("pixel_values_videos")[0])
                height, width = get_image_size(one_video[0])
                num_frames = one_video.shape[0]  # frame dim is always after batch dim
chenych's avatar
chenych committed
977
978
                image_seqlen = (height // processor.patch_size) * (width // processor.patch_size)
                video_seqlen = image_seqlen // 4 * num_frames  # divide by 4 needed for avg pooling layer
chenych's avatar
chenych committed
979
980
        else:
            video_seqlen = 1
chenych's avatar
chenych committed
981

chenych's avatar
chenych committed
982
983
984
985
        for message in messages:
            content = message["content"]
            while VIDEO_PLACEHOLDER in content:
                content = content.replace(VIDEO_PLACEHOLDER, "{{video}}" * video_seqlen, 1)
luopl's avatar
luopl committed
986

chenych's avatar
chenych committed
987
            message["content"] = content.replace("{{video}}", self.video_token)
luopl's avatar
luopl committed
988
989
990
991

        return messages


chenych's avatar
chenych committed
992
@dataclass
luopl's avatar
luopl committed
993
class MiniCPMVPlugin(BasePlugin):
chenych's avatar
chenych committed
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
    @override
    def _get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "MMProcessor",
        **kwargs,
    ) -> dict[str, "torch.Tensor"]:
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")
        mm_inputs = {}
        if len(images) != 0:
            images = self._regularize_images(
                images,
                image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
            )["images"]
            if "valid_image_nums_ls" in kwargs:
                valid_image_nums_ls = kwargs["valid_image_nums_ls"]
                new_images = []
                idx = 0
                for valid_image_nums in valid_image_nums_ls:
                    new_images.append(images[idx : idx + valid_image_nums])
                    idx += valid_image_nums

                images = new_images

            image_inputs = image_processor(
                images, do_pad=True, max_slice_nums=image_processor.max_slice_nums, return_tensors="pt"
            )
            mm_inputs.update(image_inputs)

        if len(videos) != 0:
            videos = self._regularize_videos(
                videos,
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
                video_fps=getattr(processor, "video_fps", 2.0),
                video_maxlen=getattr(processor, "video_maxlen", 128),
            )["videos"]
            video_inputs = image_processor(videos, do_pad=True, max_slice_nums=2, return_tensors="pt")
            mm_inputs.update(video_inputs)

        if len(audios) != 0:
            audios = self._regularize_audios(
                audios,
                sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
            )["audios"]
            if "valid_audio_nums_ls" in kwargs:
                valid_audio_nums_ls = kwargs["valid_audio_nums_ls"]
                audios_ls = []
                idx = 0
                for valid_audio_nums in valid_audio_nums_ls:
                    audios_ls.append(audios[idx : idx + valid_audio_nums])
                    idx += valid_audio_nums
            else:
                audios_ls = [audios]

            audio_features, audio_feature_lens, audio_phs = processor.audio_feature_extract(
                audios_ls,
                chunk_input=True,
                sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
            )
            audio_feature_lens = [torch.tensor(audio_feature_len) for audio_feature_len in audio_feature_lens]
            mm_inputs.update({"audio_features": audio_features, "audio_feature_lens": audio_feature_lens})
            if kwargs.get("ret_phs", False):
                mm_inputs.update({"audio_phs": audio_phs})

        return mm_inputs

luopl's avatar
luopl committed
1064
1065
1066
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1067
1068
1069
1070
1071
1072
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1073
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1074
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
1075
        num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
luopl's avatar
luopl committed
1076
        messages = deepcopy(messages)
chenych's avatar
chenych committed
1077
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")
chenych's avatar
chenych committed
1078
        mm_inputs, audio_inputs = {}, {}
luopl's avatar
luopl committed
1079
1080
1081
1082
1083
1084
        if len(images) != 0 and len(videos) != 0:
            raise ValueError("MiniCPM-V model does not support input images and videos at the same time.")

        if len(videos) != 0:
            max_slice_nums = 2
            use_image_id = False
chenych's avatar
chenych committed
1085
            mm_inputs = self._get_mm_inputs([], videos, [], processor)
luopl's avatar
luopl committed
1086
1087
1088
1089
        else:
            max_slice_nums = image_processor.max_slice_nums
            use_image_id = image_processor.use_image_id

chenych's avatar
chenych committed
1090
        for i, message in enumerate(messages):
luopl's avatar
luopl committed
1091
1092
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
luopl's avatar
luopl committed
1093
                content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
luopl's avatar
luopl committed
1094
                num_image_tokens += 1
luopl's avatar
luopl committed
1095
1096
1097
1098
1099
1100

            while VIDEO_PLACEHOLDER in content:
                video_seqlen = len(mm_inputs["pixel_values"][num_video_tokens]) if self.expand_mm_tokens else 1
                content = content.replace(VIDEO_PLACEHOLDER, "{{image}}" * video_seqlen, 1)
                num_video_tokens += 1

chenych's avatar
chenych committed
1101
1102
1103
1104
1105
1106
1107
            while AUDIO_PLACEHOLDER in content:
                content = content.replace(AUDIO_PLACEHOLDER, "{{audio}}", 1)
                num_audio_tokens += 1

            message["content"] = content.replace("{{image}}", "()").replace(
                "{{audio}}", "(<audio>./</audio>)"
            )
luopl's avatar
luopl committed
1108

chenych's avatar
chenych committed
1109
        if len(images):
chenych's avatar
chenych committed
1110
1111
            mm_inputs = self._get_mm_inputs(images, [], [], processor)

chenych's avatar
chenych committed
1112
        if len(audios):
chenych's avatar
chenych committed
1113
            audio_inputs = self._get_mm_inputs([], [], audios, processor, ret_phs=True)
luopl's avatar
luopl committed
1114

chenych's avatar
chenych committed
1115
        if self.expand_mm_tokens and mm_inputs:
luopl's avatar
luopl committed
1116
1117
            pattern = "()"
            image_sizes = mm_inputs["image_sizes"]
chenych's avatar
chenych committed
1118
            idx = 0
luopl's avatar
luopl committed
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
            for index, message in enumerate(messages):
                text = message["content"]
                image_tags = re.findall(pattern, text)
                text_chunks = text.split(pattern)
                final_text = ""
                for i in range(len(image_tags)):
                    final_text = (
                        final_text
                        + text_chunks[i]
                        + image_processor.get_slice_image_placeholder(
chenych's avatar
chenych committed
1129
                            image_sizes[0][idx], idx, max_slice_nums, use_image_id
luopl's avatar
luopl committed
1130
1131
                        )
                    )
chenych's avatar
chenych committed
1132
1133
1134
1135
1136
                    idx += 1

                final_text += text_chunks[-1]
                messages[index]["content"] = final_text

chenych's avatar
chenych committed
1137
        if self.expand_mm_tokens and audio_inputs:
chenych's avatar
chenych committed
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
            pattern = "(<audio>./</audio>)"
            idx = 0
            for index, message in enumerate(messages):
                text = message["content"]
                audio_tags = re.findall(pattern, text)
                text_chunks = text.split(pattern)
                final_text = ""
                for i in range(len(audio_tags)):
                    audio_placeholder = audio_inputs["audio_phs"][0][idx]
                    final_text = final_text + text_chunks[i] + audio_placeholder
                    idx += 1
luopl's avatar
luopl committed
1149
1150
1151
1152
1153
1154
1155
1156
1157

                final_text += text_chunks[-1]
                messages[index]["content"] = final_text

        return messages

    @override
    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
1158
1159
1160
1161
1162
1163
1164
1165
1166
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
chenych's avatar
chenych committed
1167
1168
        self._validate_input(processor, images, videos, audios)
        # image bound
luopl's avatar
luopl committed
1169
1170
        image_bounds_list = []
        valid_image_nums_ls = []
chenych's avatar
chenych committed
1171
        for i, input_ids in enumerate(batch_ids):
luopl's avatar
luopl committed
1172
1173
1174
1175
1176
1177
1178
1179
            input_ids_ = torch.tensor(input_ids)
            start_cond = (input_ids_ == processor.tokenizer.im_start_id) | (
                input_ids_ == processor.tokenizer.slice_start_id
            )
            end_cond = (input_ids_ == processor.tokenizer.im_end_id) | (input_ids_ == processor.tokenizer.slice_end_id)
            image_start_tokens = torch.where(start_cond)[0]
            image_start_tokens += 1
            image_end_tokens = torch.where(end_cond)[0]
chenych's avatar
chenych committed
1180
            valid_image_nums_ls.append(imglens[i])
luopl's avatar
luopl committed
1181
1182
            image_bounds = torch.hstack(
                [
chenych's avatar
chenych committed
1183
1184
                    image_start_tokens.unsqueeze(-1),
                    image_end_tokens.unsqueeze(-1),
luopl's avatar
luopl committed
1185
1186
1187
1188
                ]
            )
            image_bounds_list.append(image_bounds)

chenych's avatar
chenych committed
1189
1190
1191
1192
1193
        mm_inputs = self._get_mm_inputs(images, videos, [], processor, valid_image_nums_ls=valid_image_nums_ls)
        if "tgt_sizes" not in mm_inputs:
            dummy_data = [torch.empty(0) for _ in range(len(batch_ids))]
            mm_inputs.update({"tgt_sizes": dummy_data, "pixel_values": dummy_data, "image_sizes": dummy_data})

luopl's avatar
luopl committed
1194
        mm_inputs.update({"image_bound": image_bounds_list})
chenych's avatar
chenych committed
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220

        if len(audios) > 0:
            # audio bound
            audio_bounds_ls = []
            spk_bounds_ls = []
            valid_audio_nums_ls = []

            for input_ids, audiolen in zip(batch_ids, audlens):
                input_ids_ = torch.tensor(input_ids)
                audio_start_idx = torch.where(input_ids_ == processor.tokenizer.audio_start_id)[0]
                audio_end_idx = torch.where(input_ids_ == processor.tokenizer.audio_end_id)[0]
                assert len(audio_start_idx) == len(audio_end_idx)
                audio_bounds = torch.hstack([(audio_start_idx + 1).unsqueeze(-1), audio_end_idx.unsqueeze(-1)])
                audio_bounds_ls.append(audio_bounds)
                valid_audio_nums_ls.append(audiolen)

                spk_start_idx = torch.where(input_ids_ == processor.tokenizer.spk_start_id)[0]
                spk_end_idx = torch.where(input_ids_ == processor.tokenizer.spk_end_id)[0]
                assert len(spk_start_idx) == len(spk_end_idx)
                spk_bounds = torch.hstack([(spk_start_idx + 1).unsqueeze(-1), spk_end_idx.unsqueeze(-1)])
                spk_bounds_ls.append(spk_bounds)

            audio_inputs = self._get_mm_inputs([], [], audios, processor, valid_audio_nums_ls=valid_audio_nums_ls)
            mm_inputs.update(audio_inputs)
            mm_inputs.update({"audio_bounds": audio_bounds_ls, "spk_bounds": spk_bounds_ls})

luopl's avatar
luopl committed
1221
1222
1223
        return mm_inputs


chenych's avatar
chenych committed
1224
@dataclass
luopl's avatar
luopl committed
1225
1226
1227
1228
class MllamaPlugin(BasePlugin):
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1229
1230
1231
1232
1233
1234
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1235
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1236
        self._validate_messages(messages, images, videos, audios)
luopl's avatar
luopl committed
1237
1238
1239
1240
1241
1242
1243
1244
1245
        num_image_tokens = 0
        messages = deepcopy(messages)
        for message in messages:
            content = message["content"]
            num_image_tokens += content.count(IMAGE_PLACEHOLDER)
            message["content"] = content.replace(IMAGE_PLACEHOLDER, self.image_token)

        return messages

chenych's avatar
chenych committed
1246
    @override
luopl's avatar
luopl committed
1247
1248
    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
1249
1250
1251
1252
1253
1254
1255
1256
1257
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
chenych's avatar
chenych committed
1258
1259
1260
1261
        self._validate_input(processor, images, videos, audios)
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor, imglens)
        if mm_inputs:
            num_tiles = mm_inputs.pop("num_tiles")
chenych's avatar
chenych committed
1262
1263
            image_token_id: int = getattr(processor, "image_token_id")
            max_image_tiles: int = getattr(processor.image_processor, "max_image_tiles")
chenych's avatar
chenych committed
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
            cross_attention_token_mask = [
                get_cross_attention_token_mask(input_ids, image_token_id) for input_ids in batch_ids
            ]
            mm_inputs["cross_attention_mask"] = torch.from_numpy(
                convert_sparse_cross_attention_mask_to_dense(
                    cross_attention_token_mask,
                    num_tiles=num_tiles,
                    max_num_tiles=max_image_tiles,
                    length=max(len(input_ids) for input_ids in batch_ids),
                )
            )  # shape: (batch_size, length, max_num_images, max_num_tiles)

luopl's avatar
luopl committed
1276
1277
1278
        return mm_inputs


chenych's avatar
chenych committed
1279
@dataclass
luopl's avatar
luopl committed
1280
1281
1282
1283
class PaliGemmaPlugin(BasePlugin):
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1284
1285
1286
1287
1288
1289
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1290
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1291
        self._validate_messages(messages, images, videos, audios)
luopl's avatar
luopl committed
1292
1293
1294
1295
1296
        num_image_tokens = 0
        messages = deepcopy(messages)
        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
chenych's avatar
chenych committed
1297
                content = content.replace(IMAGE_PLACEHOLDER, "", 1)
luopl's avatar
luopl committed
1298
                num_image_tokens += 1
luopl's avatar
luopl committed
1299

chenych's avatar
chenych committed
1300
            message["content"] = content
luopl's avatar
luopl committed
1301
1302
1303
1304
1305
1306

        return messages

    @override
    def process_token_ids(
        self,
chenych's avatar
chenych committed
1307
        input_ids: list[int],
shihm's avatar
uodata  
shihm committed
1308
        labels: list[int] | None,
chenych's avatar
chenych committed
1309
1310
1311
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
luopl's avatar
luopl committed
1312
        tokenizer: "PreTrainedTokenizer",
chenych's avatar
chenych committed
1313
        processor: Optional["MMProcessor"],
shihm's avatar
uodata  
shihm committed
1314
    ) -> tuple[list[int], list[int] | None]:
chenych's avatar
chenych committed
1315
        self._validate_input(processor, images, videos, audios)
luopl's avatar
luopl committed
1316
        num_images = len(images)
chenych's avatar
chenych committed
1317
        image_seqlen = processor.image_seq_length if self.expand_mm_tokens else 0  # skip mm token
luopl's avatar
luopl committed
1318
        image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
chenych's avatar
chenych committed
1319
        input_ids = [image_token_id] * num_images * image_seqlen + input_ids
luopl's avatar
luopl committed
1320
        if labels is not None:
chenych's avatar
chenych committed
1321
            labels = [IGNORE_INDEX] * num_images * image_seqlen + labels
luopl's avatar
luopl committed
1322
1323
1324
1325
1326
1327

        return input_ids, labels

    @override
    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
1328
1329
1330
1331
1332
1333
1334
1335
1336
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
chenych's avatar
chenych committed
1337
        self._validate_input(processor, images, videos, audios)
luopl's avatar
luopl committed
1338
        seqlens = [len(input_ids) for input_ids in batch_ids]
chenych's avatar
chenych committed
1339
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
luopl's avatar
luopl committed
1340
1341
1342
1343
        mm_inputs["token_type_ids"] = _get_paligemma_token_type_ids(imglens, seqlens, processor)
        return mm_inputs


chenych's avatar
chenych committed
1344
@dataclass
luopl's avatar
luopl committed
1345
1346
1347
1348
class PixtralPlugin(BasePlugin):
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1349
1350
1351
1352
1353
1354
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1355
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1356
        self._validate_messages(messages, images, videos, audios)
luopl's avatar
luopl committed
1357
        messages = deepcopy(messages)
chenych's avatar
chenych committed
1358
1359
1360
1361
1362
1363
1364
1365
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            if "pixel_values" in mm_inputs:
                # BC for transformers < 4.49.0
                if isinstance(mm_inputs["image_sizes"], list):
                    image_sizes = iter(mm_inputs["image_sizes"][0])
                else:
                    image_sizes = iter(mm_inputs["image_sizes"].tolist())
chenych's avatar
chenych committed
1366

chenych's avatar
chenych committed
1367
1368
                image_break_token: str = getattr(processor, "image_break_token")
                image_end_token: str = getattr(processor, "image_end_token")
chenych's avatar
chenych committed
1369

luopl's avatar
luopl committed
1370
1371
1372
        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
luopl's avatar
luopl committed
1373
                if self.expand_mm_tokens:
chenych's avatar
chenych committed
1374
                    patch_size = processor.patch_size * getattr(processor, "spatial_merge_size", 1)
chenych's avatar
chenych committed
1375
                    height, width = next(image_sizes)
chenych's avatar
chenych committed
1376
1377
                    num_height_tokens = height // patch_size
                    num_width_tokens = width // patch_size
chenych's avatar
chenych committed
1378
                    replace_tokens = [[self.image_token] * num_width_tokens + [image_break_token]] * num_height_tokens
luopl's avatar
luopl committed
1379
1380
1381
1382
                    replace_tokens = [item for sublist in replace_tokens for item in sublist]  # flatten list
                    replace_tokens[-1] = image_end_token
                    replace_str = "".join(replace_tokens)
                else:
chenych's avatar
chenych committed
1383
                    replace_str = self.image_token
luopl's avatar
luopl committed
1384

luopl's avatar
luopl committed
1385
1386
1387
1388
1389
1390
1391
1392
1393
                content = content.replace(IMAGE_PLACEHOLDER, replace_str, 1)

            message["content"] = content

        return messages

    @override
    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
1394
1395
1396
1397
1398
1399
1400
1401
1402
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
chenych's avatar
chenych committed
1403
1404
        self._validate_input(processor, images, videos, audios)
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
chenych's avatar
chenych committed
1405
1406
1407
1408
1409
        # ref to this commit https://github.com/huggingface/transformers/pull/35122
        # after transformers 4.49.0, the `image_sizes` is mandatory as an input parameter for Pixtral VisionEncoder forwarding.
        # it can be passed into `LlavaConditionalGeneration` as a parameter.
        if not is_transformers_version_greater_than("4.49.0"):
            mm_inputs.pop("image_sizes", None)
luopl's avatar
luopl committed
1410
1411
1412
        return mm_inputs


chenych's avatar
chenych committed
1413
1414
1415
1416
1417
@dataclass
class Qwen2AudioPlugin(BasePlugin):
    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1418
1419
1420
1421
1422
1423
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1424
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1425
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
1426
1427
1428
        bos_token: str = getattr(processor, "audio_bos_token")
        eos_token: str = getattr(processor, "audio_eos_token")
        messages = deepcopy(messages)
chenych's avatar
chenych committed
1429
1430
1431
1432
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs([], [], audios, processor)
            if "feature_attention_mask" in mm_inputs:
                audio_lengths = mm_inputs["feature_attention_mask"].sum(-1).tolist()
chenych's avatar
chenych committed
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454

        for message in messages:
            content = message["content"]
            while AUDIO_PLACEHOLDER in content:
                if self.expand_mm_tokens:
                    audio_length = audio_lengths.pop(0)
                    input_length = (audio_length - 1) // 2 + 1
                    audio_seqlen = (input_length - 2) // 2 + 1
                else:
                    audio_seqlen = 1

                content = content.replace(
                    AUDIO_PLACEHOLDER, f"{bos_token}{self.audio_token * audio_seqlen}{eos_token}", 1
                )

            message["content"] = content

        return messages

    @override
    def get_mm_inputs(
        self,
chenych's avatar
chenych committed
1455
1456
1457
1458
1459
1460
1461
1462
1463
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["MMProcessor"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
chenych's avatar
chenych committed
1464
1465
1466
1467
1468
1469
        self._validate_input(processor, images, videos, audios)
        return self._get_mm_inputs(images, videos, audios, processor)


@dataclass
class Qwen2VLPlugin(BasePlugin):
shihm's avatar
uodata  
shihm committed
1470
1471
1472
    vision_bos_token: str = "<|vision_start|>"
    vision_eos_token: str = "<|vision_end|>"

luopl's avatar
luopl committed
1473
1474
1475
1476
1477
    @override
    def _preprocess_image(self, image: "ImageObject", **kwargs) -> "ImageObject":
        image = super()._preprocess_image(image, **kwargs)
        if min(image.width, image.height) < 28:
            width, height = max(image.width, 28), max(image.height, 28)
chenych's avatar
chenych committed
1478
            image = image.resize((width, height))
luopl's avatar
luopl committed
1479
1480
1481

        if image.width / image.height > 200:
            width, height = image.height * 180, image.height
chenych's avatar
chenych committed
1482
            image = image.resize((width, height))
luopl's avatar
luopl committed
1483
1484
1485

        if image.height / image.width > 200:
            width, height = image.width, image.width * 180
chenych's avatar
chenych committed
1486
            image = image.resize((width, height))
luopl's avatar
luopl committed
1487
1488
1489
1490

        return image

    @override
shihm's avatar
uodata  
shihm committed
1491
1492
    def _regularize_videos(self, videos: list["VideoInput"], **kwargs) -> "RegularizedVideoOutput":
        results, fps_per_video, durations = [], [], []
luopl's avatar
luopl committed
1493
        for video in videos:
chenych's avatar
chenych committed
1494
            frames: list[ImageObject] = []
chenych's avatar
chenych committed
1495
1496
1497
1498
1499
1500
1501
            if _check_video_is_nested_images(video):
                for frame in video:
                    if not is_valid_image(frame) and not isinstance(frame, dict) and not os.path.exists(frame):
                        raise ValueError("Invalid image found in video frames.")

                frames = video
                fps_per_video.append(kwargs.get("video_fps", 2.0))
shihm's avatar
uodata  
shihm committed
1502
                durations.append(len(frames) / kwargs.get("video_fps", 2.0))
chenych's avatar
chenych committed
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
            else:
                container = av.open(video, "r")
                video_stream = next(stream for stream in container.streams if stream.type == "video")
                sample_indices = self._get_video_sample_indices(video_stream, **kwargs)
                container.seek(0)
                for frame_idx, frame in enumerate(container.decode(video_stream)):
                    if frame_idx in sample_indices:
                        frames.append(frame.to_image())

                if video_stream.duration is None:
                    fps_per_video.append(kwargs.get("video_fps", 2.0))
shihm's avatar
uodata  
shihm committed
1514
                    durations.append(len(frames) / kwargs.get("video_fps", 2.0))
chenych's avatar
chenych committed
1515
1516
                else:
                    fps_per_video.append(len(sample_indices) / float(video_stream.duration * video_stream.time_base))
shihm's avatar
uodata  
shihm committed
1517
                    durations.append(float(video_stream.duration * video_stream.time_base))
luopl's avatar
luopl committed
1518

chenych's avatar
chenych committed
1519
            if len(frames) % 2 != 0:
luopl's avatar
luopl committed
1520
1521
                frames.append(frames[-1])

chenych's avatar
chenych committed
1522
            frames = self._regularize_images(frames, **kwargs)["images"]
luopl's avatar
luopl committed
1523
1524
            results.append(frames)

shihm's avatar
uodata  
shihm committed
1525
        return {"videos": results, "fps_per_video": fps_per_video, "durations": durations}
chenych's avatar
chenych committed
1526
1527
1528
1529

    @override
    def _get_mm_inputs(
        self,
chenych's avatar
chenych committed
1530
1531
1532
1533
1534
1535
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "MMProcessor",
    ) -> dict[str, "torch.Tensor"]:
        image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
shihm's avatar
uodata  
shihm committed
1536
        video_processor: BaseVideoProcessor = getattr(processor, "video_processor", None)
chenych's avatar
chenych committed
1537
1538
1539
1540
1541
1542
        mm_inputs = {}
        if len(images) != 0:
            images = self._regularize_images(
                images,
                image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
chenych's avatar
chenych committed
1543
            )["images"]
chenych's avatar
chenych committed
1544
1545
1546
            mm_inputs.update(image_processor(images, return_tensors="pt"))

        if len(videos) != 0:
chenych's avatar
chenych committed
1547
            video_data = self._regularize_videos(
chenych's avatar
chenych committed
1548
1549
1550
1551
1552
1553
                videos,
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
                video_fps=getattr(processor, "video_fps", 2.0),
                video_maxlen=getattr(processor, "video_maxlen", 128),
            )
shihm's avatar
uodata  
shihm committed
1554
            mm_inputs.update(video_processor(videos=video_data["videos"], return_tensors="pt"))
chenych's avatar
chenych committed
1555
1556
1557
            temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
            if "second_per_grid_ts" in processor.model_input_names:
                mm_inputs["second_per_grid_ts"] = [temporal_patch_size / fps for fps in video_data["fps_per_video"]]
chenych's avatar
chenych committed
1558
1559

        return mm_inputs
luopl's avatar
luopl committed
1560
1561
1562
1563

    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1564
1565
1566
1567
1568
1569
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1570
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1571
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
1572
1573
        num_image_tokens, num_video_tokens = 0, 0
        messages = deepcopy(messages)
chenych's avatar
chenych committed
1574
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")
chenych's avatar
chenych committed
1575

luopl's avatar
luopl committed
1576
        merge_length: int = getattr(image_processor, "merge_size") ** 2
chenych's avatar
chenych committed
1577
1578
1579
1580
1581
1582
1583
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            image_grid_thw = mm_inputs.get("image_grid_thw", [])
            video_grid_thw = mm_inputs.get("video_grid_thw", [])
        else:
            image_grid_thw = [None] * len(images)
            video_grid_thw = [None] * len(videos)
luopl's avatar
luopl committed
1584
1585
1586
1587

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
luopl's avatar
luopl committed
1588
                image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
luopl's avatar
luopl committed
1589
                content = content.replace(
shihm's avatar
uodata  
shihm committed
1590
1591
1592
                    IMAGE_PLACEHOLDER,
                    f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
                    1,
luopl's avatar
luopl committed
1593
1594
1595
1596
                )
                num_image_tokens += 1

            while VIDEO_PLACEHOLDER in content:
luopl's avatar
luopl committed
1597
                video_seqlen = video_grid_thw[num_video_tokens].prod() // merge_length if self.expand_mm_tokens else 1
luopl's avatar
luopl committed
1598
                content = content.replace(
shihm's avatar
uodata  
shihm committed
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
                    VIDEO_PLACEHOLDER,
                    f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}",
                    1,
                )
                num_video_tokens += 1

            message["content"] = content

        return messages


@dataclass
class Qwen3VLPlugin(Qwen2VLPlugin):
    @override
    def _get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "MMProcessor",
    ) -> dict[str, "torch.Tensor"]:
        image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
        video_processor: BaseImageProcessor = getattr(processor, "video_processor", None)
        mm_inputs = {}
        if len(images) != 0:
            images = self._regularize_images(
                images,
                image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
            )["images"]
            mm_inputs.update(image_processor(images, return_tensors="pt"))

        if len(videos) != 0:
            videos = self._regularize_videos(
                videos,
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
                video_fps=getattr(processor, "video_fps", 2.0),
                video_maxlen=getattr(processor, "video_maxlen", 128),
            )
            video_metadata = [
                {"fps": getattr(processor, "video_fps", 24.0), "duration": duration, "total_num_frames": len(video)}
                for video, duration in zip(videos["videos"], videos["durations"])
            ]
            mm_inputs.update(
                video_processor(
                    videos=videos["videos"],
                    video_metadata=video_metadata,
                    fps=getattr(processor, "video_fps", 2.0),
                    return_metadata=True,
                )
            )
            temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
            if "second_per_grid_ts" in processor.model_input_names:
                mm_inputs["second_per_grid_ts"] = [temporal_patch_size / fps for fps in videos["fps_per_video"]]

        return mm_inputs

    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
        self._validate_messages(messages, images, videos, audios)
        num_image_tokens, num_video_tokens = 0, 0
        messages = deepcopy(messages)
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")
        video_processor: BaseImageProcessor = getattr(processor, "video_processor")

        image_merge_length: int = getattr(image_processor, "merge_size") ** 2
        video_merge_length: int = getattr(video_processor, "merge_size") ** 2
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            image_grid_thw = mm_inputs.get("image_grid_thw", [])
            video_grid_thw = mm_inputs.get("video_grid_thw", [])
            num_frames = video_grid_thw[0][0] if len(video_grid_thw) > 0 else 0  # hard code for now
            video_metadata = mm_inputs.get("video_metadata", {})

        else:
            image_grid_thw = [None] * len(images)
            video_grid_thw = [None] * len(videos)
            num_frames = 0
            timestamps = [0]

        for idx, message in enumerate(messages):
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                image_seqlen = (
                    image_grid_thw[num_image_tokens].prod() // image_merge_length if self.expand_mm_tokens else 1
                )
                content = content.replace(
                    IMAGE_PLACEHOLDER,
                    f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
                    1,
luopl's avatar
luopl committed
1698
                )
shihm's avatar
uodata  
shihm committed
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
                num_image_tokens += 1

            while VIDEO_PLACEHOLDER in content:
                if self.expand_mm_tokens:
                    metadata = video_metadata[idx]
                    timestamps = processor._calculate_timestamps(
                        metadata.frames_indices,
                        metadata.fps,
                        video_processor.merge_size,
                    )
                    video_structure = ""
                    for frame_index in range(num_frames):
                        video_seqlen = (
                            video_grid_thw[num_video_tokens][1:].prod() // video_merge_length
                            if self.expand_mm_tokens
                            else 1
                        )
                        timestamp_sec = timestamps[frame_index]
                        frame_structure = (
                            f"<{timestamp_sec:.1f} seconds>"
                            f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}"
                        )
                        video_structure += frame_structure
                else:
                    video_structure = f"{self.vision_bos_token}{self.video_token}{self.vision_eos_token}"

                content = content.replace(VIDEO_PLACEHOLDER, video_structure, 1)
luopl's avatar
luopl committed
1726
1727
1728
1729
1730
1731
                num_video_tokens += 1

            message["content"] = content

        return messages

chenych's avatar
chenych committed
1732

chenych's avatar
chenych committed
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
@dataclass
class GLM4VPlugin(Qwen2VLPlugin):
    @override
    def _get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "MMProcessor",
    ) -> dict[str, "torch.Tensor"]:
        image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
        video_processor: BaseImageProcessor = getattr(processor, "video_processor", None)
        mm_inputs = {}
        if len(images) != 0:
            images = self._regularize_images(
                images,
                image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
            )["images"]
            mm_inputs.update(image_processor(images, return_tensors="pt"))

        if len(videos) != 0:
            video_data = self._regularize_videos(
                videos,
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
                video_fps=getattr(processor, "video_fps", 2.0),
                video_maxlen=getattr(processor, "video_maxlen", 128),
            )
            # prepare video metadata
            video_metadata = [
shihm's avatar
uodata  
shihm committed
1764
1765
                {"fps": 2, "duration": duration, "total_frames": len(video)}
                for video, duration in zip(video_data["videos"], video_data["durations"])
chenych's avatar
chenych committed
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
            ]
            mm_inputs.update(video_processor(images=None, videos=video_data["videos"], video_metadata=video_metadata))

        return mm_inputs

    @override
    def process_messages(
        self,
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
        self._validate_input(processor, images, videos, audios)
        self._validate_messages(messages, images, videos, audios)
        num_image_tokens, num_video_tokens = 0, 0
        messages = deepcopy(messages)
        image_processor: BaseImageProcessor = getattr(processor, "image_processor")

        merge_length: int = getattr(image_processor, "merge_size") ** 2
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            image_grid_thw = mm_inputs.get("image_grid_thw", [])
            video_grid_thw = mm_inputs.get("video_grid_thw", [])
            num_frames = video_grid_thw[0][0] if len(video_grid_thw) > 0 else 0  # hard code for now
            timestamps = mm_inputs.get("timestamps", [])

            if hasattr(timestamps, "tolist"):
                timestamps = timestamps.tolist()

            if not timestamps:
                timestamps_list = []
            elif isinstance(timestamps[0], list):
                timestamps_list = timestamps[0]
            else:
                timestamps_list = timestamps

            unique_timestamps = timestamps_list.copy()
            selected_timestamps = unique_timestamps[:num_frames]
            while len(selected_timestamps) < num_frames:
                selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)

        else:
            image_grid_thw = [None] * len(images)
            video_grid_thw = [None] * len(videos)
            num_frames = 0
            selected_timestamps = [0]

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
                content = content.replace(
                    IMAGE_PLACEHOLDER, f"<|begin_of_image|>{self.image_token * image_seqlen}<|end_of_image|>", 1
                )
                num_image_tokens += 1

            while VIDEO_PLACEHOLDER in content:
                video_structure = ""
                for frame_index in range(num_frames):
                    video_seqlen = (
                        video_grid_thw[num_video_tokens][1:].prod() // merge_length if self.expand_mm_tokens else 1
                    )
                    timestamp_sec = selected_timestamps[frame_index]
                    frame_structure = (
                        f"<|begin_of_image|>{self.image_token * video_seqlen}<|end_of_image|>{timestamp_sec}"
                    )
                    video_structure += frame_structure

shihm's avatar
uodata  
shihm committed
1836
1837
1838
                if not self.expand_mm_tokens:
                    video_structure = self.video_token

chenych's avatar
chenych committed
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
                content = content.replace(VIDEO_PLACEHOLDER, f"<|begin_of_video|>{video_structure}<|end_of_video|>", 1)
                num_video_tokens += 1

            message["content"] = content

        return messages

    @override
    def get_mm_inputs(
        self,
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        imglens: list[int],
        vidlens: list[int],
        audlens: list[int],
        batch_ids: list[list[int]],
        processor: Optional["ProcessorMixin"],
    ) -> dict[str, Union[list[int], "torch.Tensor"]]:
        self._validate_input(processor, images, videos, audios)
        mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
        mm_inputs.pop("timestamps", None)
        return mm_inputs


shihm's avatar
uodata  
shihm committed
1864
@dataclass
chenych's avatar
chenych committed
1865
class Qwen2OmniPlugin(Qwen2VLPlugin):
shihm's avatar
uodata  
shihm committed
1866
1867
1868
    audio_bos_token: str = "<|audio_start|>"
    audio_eos_token: str = "<|audio_end|>"

luopl's avatar
luopl committed
1869
    @override
chenych's avatar
chenych committed
1870
    def _get_mm_inputs(
luopl's avatar
luopl committed
1871
        self,
chenych's avatar
chenych committed
1872
1873
1874
1875
1876
1877
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: "MMProcessor",
    ) -> dict[str, "torch.Tensor"]:
        image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
shihm's avatar
uodata  
shihm committed
1878
        video_processor: BaseVideoProcessor = getattr(processor, "video_processor", None)
chenych's avatar
chenych committed
1879
1880
1881
1882
1883
1884
1885
1886
1887
        feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None)
        mm_inputs = {}
        if len(images) != 0:
            images = self._regularize_images(
                images,
                image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
                image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
            )["images"]
            mm_inputs.update(image_processor(images, return_tensors="pt"))
chenych's avatar
chenych committed
1888

chenych's avatar
chenych committed
1889
1890
1891
1892
1893
1894
1895
1896
        if len(videos) != 0:
            video_dict = self._regularize_videos(
                videos,
                image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
                image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
                video_fps=getattr(processor, "video_fps", 2.0),
                video_maxlen=getattr(processor, "video_maxlen", 128),
            )
shihm's avatar
uodata  
shihm committed
1897
            mm_inputs.update(video_processor(videos=video_dict["videos"], return_tensors="pt"))
chenych's avatar
chenych committed
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
            temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
            mm_inputs["video_second_per_grid"] = torch.tensor(
                [temporal_patch_size / fps for fps in video_dict["fps_per_video"]]
            )

        if len(audios) != 0:
            audios = self._regularize_audios(
                audios,
                sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
            )["audios"]
            mm_inputs.update(
                feature_extractor(
                    audios,
                    sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
                    return_attention_mask=True,
                    padding="max_length",
                    return_tensors="pt",
                )
            )
            mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask")  # prevent conflicts
luopl's avatar
luopl committed
1918

chenych's avatar
chenych committed
1919
        return mm_inputs
luopl's avatar
luopl committed
1920
1921
1922
1923

    @override
    def process_messages(
        self,
chenych's avatar
chenych committed
1924
1925
1926
1927
1928
1929
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
1930
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
1931
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
1932
        num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
luopl's avatar
luopl committed
1933
        messages = deepcopy(messages)
chenych's avatar
chenych committed
1934
1935
1936
1937
        image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)

        merge_length = processor.image_processor.merge_size**2
        use_audio_in_video = getattr(processor, "use_audio_in_video", False)
chenych's avatar
chenych committed
1938
1939
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
chenych's avatar
chenych committed
1940
1941
1942
            image_grid_thw = mm_inputs.get("image_grid_thw", [])
            video_grid_thw = mm_inputs.get("video_grid_thw", [])
            if "feature_attention_mask" in mm_inputs:
shihm's avatar
uodata  
shihm committed
1943
1944
1945
1946
1947
1948
1949
1950
                if processor.__class__.__name__ == "Qwen3OmniMoeProcessor":  # for qwen3omni
                    input_lengths = mm_inputs["feature_attention_mask"].sum(-1)
                    input_lengths_leave = input_lengths % 100
                    feature_lengths = (input_lengths_leave - 1) // 2 + 1
                    audio_lengths = ((feature_lengths - 1) // 2 + 1 - 1) // 2 + 1 + (input_lengths // 100) * 13
                else:
                    input_lengths = (mm_inputs["feature_attention_mask"].sum(-1).numpy() - 1) // 2 + 1
                    audio_lengths = (input_lengths - 2) // 2 + 1
chenych's avatar
chenych committed
1951
1952
        else:
            mm_inputs = {}
chenych's avatar
chenych committed
1953
1954
1955
            image_grid_thw = [None] * len(images)
            video_grid_thw = [None] * len(videos)
            audio_lengths = [None] * len(audios)
chenych's avatar
chenych committed
1956
1957
1958
1959

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
chenych's avatar
chenych committed
1960
                image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
chenych's avatar
chenych committed
1961
                content = content.replace(
shihm's avatar
uodata  
shihm committed
1962
1963
1964
                    IMAGE_PLACEHOLDER,
                    f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
                    1,
chenych's avatar
chenych committed
1965
1966
                )
                num_image_tokens += 1
luopl's avatar
luopl committed
1967

chenych's avatar
chenych committed
1968
1969
1970
1971
1972
1973
            if (
                use_audio_in_video and len(audios) and len(videos)
            ):  # if use the audio of video # deal video token and audio token togather
                if len(videos) != len(audios):
                    raise ValueError(
                        f"Number of videos ({len(videos)}) must match number of audios ({len(audios)}) when using audio in video."
chenych's avatar
chenych committed
1974
                    )
luopl's avatar
luopl committed
1975
1976

                while VIDEO_PLACEHOLDER in content:
chenych's avatar
chenych committed
1977
1978
1979
1980
1981
1982
                    video_pos = content.find(VIDEO_PLACEHOLDER)
                    audio_pos = content.find(AUDIO_PLACEHOLDER, video_pos)
                    if audio_pos == -1 or audio_pos < video_pos:
                        raise ValueError(
                            f"Each {VIDEO_PLACEHOLDER} must be followed by an {AUDIO_PLACEHOLDER} when using audio in video."
                        )
chenych's avatar
chenych committed
1983

chenych's avatar
chenych committed
1984
1985
1986
1987
1988
1989
                    audio_t_index = torch.arange(audio_lengths[num_audio_tokens])
                    video_t_index = (
                        torch.arange(video_grid_thw[num_video_tokens][0])
                        .view(-1, 1, 1)
                        .expand(
                            -1,
chenych's avatar
chenych committed
1990
1991
                            video_grid_thw[num_video_tokens][1] // image_processor.merge_size,
                            video_grid_thw[num_video_tokens][2] // image_processor.merge_size,
chenych's avatar
chenych committed
1992
1993
1994
1995
1996
1997
1998
                        )
                        .flatten()
                        * mm_inputs["video_second_per_grid"][num_video_tokens]
                        * 25  # FIXME hardcode of position_id_per_seconds=25
                    ).long()
                    t_ntoken_per_chunk = 50  # FIXME hardcode: [25 * 2]
                    video_chunk_indices = processor.get_chunked_index(video_t_index, t_ntoken_per_chunk)
chenych's avatar
chenych committed
1999
                    audio_chunk_indices = processor.get_chunked_index(audio_t_index, t_ntoken_per_chunk)
chenych's avatar
chenych committed
2000
                    placeholder_string = ""
shihm's avatar
uodata  
shihm committed
2001
                    placeholder_string += self.vision_bos_token + self.audio_bos_token
chenych's avatar
chenych committed
2002
2003
2004
2005
2006
                    for j in range(max(len(video_chunk_indices), len(audio_chunk_indices))):
                        video_chunk_index = video_chunk_indices[j] if j < len(video_chunk_indices) else None
                        audio_chunk_index = audio_chunk_indices[j] if j < len(audio_chunk_indices) else None
                        if video_chunk_index is not None:
                            placeholder_string += self.video_token * (video_chunk_index[1] - video_chunk_index[0])
chenych's avatar
chenych committed
2007

chenych's avatar
chenych committed
2008
2009
2010
                        if audio_chunk_index is not None:
                            placeholder_string += self.audio_token * (audio_chunk_index[1] - audio_chunk_index[0])

shihm's avatar
uodata  
shihm committed
2011
                    placeholder_string += self.audio_eos_token + self.vision_eos_token
chenych's avatar
chenych committed
2012
2013
2014
2015
                    content = content.replace(VIDEO_PLACEHOLDER, placeholder_string, 1)
                    content = content.replace(AUDIO_PLACEHOLDER, "", 1)
                    num_audio_tokens += 1
                    num_video_tokens += 1
chenych's avatar
chenych committed
2016
2017
2018
2019
            else:
                while AUDIO_PLACEHOLDER in content:
                    audio_seqlen = audio_lengths[num_audio_tokens] if self.expand_mm_tokens else 1
                    content = content.replace(
shihm's avatar
uodata  
shihm committed
2020
2021
2022
                        AUDIO_PLACEHOLDER,
                        f"{self.audio_bos_token}{self.audio_token * audio_seqlen}{self.audio_eos_token}",
                        1,
chenych's avatar
chenych committed
2023
2024
2025
2026
2027
2028
2029
2030
                    )
                    num_audio_tokens += 1

                while VIDEO_PLACEHOLDER in content:
                    video_seqlen = (
                        video_grid_thw[num_video_tokens].prod() // merge_length if self.expand_mm_tokens else 1
                    )
                    content = content.replace(
shihm's avatar
uodata  
shihm committed
2031
2032
2033
                        VIDEO_PLACEHOLDER,
                        f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}",
                        1,
chenych's avatar
chenych committed
2034
2035
                    )
                    num_video_tokens += 1
chenych's avatar
chenych committed
2036
2037
2038

            message["content"] = content

luopl's avatar
luopl committed
2039
2040
        return messages

chenych's avatar
chenych committed
2041
2042
2043

@dataclass
class VideoLlavaPlugin(BasePlugin):
luopl's avatar
luopl committed
2044
    @override
chenych's avatar
chenych committed
2045
    def process_messages(
luopl's avatar
luopl committed
2046
        self,
chenych's avatar
chenych committed
2047
2048
2049
2050
2051
2052
        messages: list[dict[str, str]],
        images: list["ImageInput"],
        videos: list["VideoInput"],
        audios: list["AudioInput"],
        processor: Optional["MMProcessor"],
    ) -> list[dict[str, str]]:
chenych's avatar
chenych committed
2053
        self._validate_input(processor, images, videos, audios)
chenych's avatar
chenych committed
2054
        self._validate_messages(messages, images, videos, audios)
chenych's avatar
chenych committed
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
        num_image_tokens, num_video_tokens = 0, 0
        messages = deepcopy(messages)
        num_frames = 0
        if self.expand_mm_tokens:
            mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
            if "pixel_values_images" in mm_inputs:
                height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values_images"][0]))
                num_frames = 1

            if "pixel_values_videos" in mm_inputs:
                one_video = to_numpy_array(mm_inputs["pixel_values_videos"][0])
                height, width = get_image_size(one_video[0])
                num_frames = one_video.shape[0]  # frame dim is always after batch dim

            if "pixel_values_images" in mm_inputs or "pixel_values_videos" in mm_inputs:
                image_seqlen = (height // processor.patch_size) * (
                    width // processor.patch_size
                ) + processor.num_additional_image_tokens
                video_seqlen = image_seqlen * num_frames
                if processor.vision_feature_select_strategy == "default":
                    image_seqlen -= 1
        else:
            image_seqlen, video_seqlen = 1, 1

        for message in messages:
            content = message["content"]
            while IMAGE_PLACEHOLDER in content:
                content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
                num_image_tokens += 1

            while VIDEO_PLACEHOLDER in content:
                content = content.replace(VIDEO_PLACEHOLDER, "{{video}}" * video_seqlen, 1)
                num_video_tokens += 1

            content = content.replace("{{image}}", self.image_token)
            message["content"] = content.replace("{{video}}", self.video_token)

        return messages
luopl's avatar
luopl committed
2093
2094
2095
2096


PLUGINS = {
    "base": BasePlugin,
shihm's avatar
uodata  
shihm committed
2097
    "ernie_vl": ErnieVLPlugin,
chenych's avatar
chenych committed
2098
    "gemma3": Gemma3Plugin,
chenych's avatar
chenych committed
2099
2100
    "glm4v": GLM4VPlugin,
    "gemma3n": Gemma3nPlugin,
chenych's avatar
chenych committed
2101
    "intern_vl": InternVLPlugin,
chenych's avatar
chenych committed
2102
    "kimi_vl": KimiVLPlugin,
chenych's avatar
chenych committed
2103
    "llama4": Llama4Plugin,
luopl's avatar
luopl committed
2104
2105
2106
    "llava": LlavaPlugin,
    "llava_next": LlavaNextPlugin,
    "llava_next_video": LlavaNextVideoPlugin,
luopl's avatar
luopl committed
2107
2108
    "minicpm_v": MiniCPMVPlugin,
    "mllama": MllamaPlugin,
luopl's avatar
luopl committed
2109
    "paligemma": PaliGemmaPlugin,
luopl's avatar
luopl committed
2110
    "pixtral": PixtralPlugin,
chenych's avatar
chenych committed
2111
    "qwen2_audio": Qwen2AudioPlugin,
chenych's avatar
chenych committed
2112
    "qwen2_omni": Qwen2OmniPlugin,
chenych's avatar
chenych committed
2113
    "qwen2_vl": Qwen2VLPlugin,
shihm's avatar
uodata  
shihm committed
2114
    "qwen3_vl": Qwen3VLPlugin,
luopl's avatar
luopl committed
2115
2116
2117
2118
    "video_llava": VideoLlavaPlugin,
}


chenych's avatar
chenych committed
2119
2120
def register_mm_plugin(name: str, plugin_class: type["BasePlugin"]) -> None:
    r"""Register a multimodal plugin."""
chenych's avatar
chenych committed
2121
2122
2123
2124
2125
2126
    if name in PLUGINS:
        raise ValueError(f"Multimodal plugin {name} already exists.")

    PLUGINS[name] = plugin_class


luopl's avatar
luopl committed
2127
2128
def get_mm_plugin(
    name: str,
shihm's avatar
uodata  
shihm committed
2129
2130
2131
2132
    image_token: str | None = None,
    video_token: str | None = None,
    audio_token: str | None = None,
    **kwargs,
luopl's avatar
luopl committed
2133
) -> "BasePlugin":
chenych's avatar
chenych committed
2134
    r"""Get plugin for multimodal inputs."""
chenych's avatar
chenych committed
2135
    if name not in PLUGINS:
luopl's avatar
luopl committed
2136
        raise ValueError(f"Multimodal plugin `{name}` not found.")
luopl's avatar
luopl committed
2137

shihm's avatar
uodata  
shihm committed
2138
    return PLUGINS[name](image_token, video_token, audio_token, **kwargs)