Unverified Commit e1020dc5 authored by Mick's avatar Mick Committed by GitHub
Browse files

refactor: simply MultimodalTokens logic (#7924)

parent 3586b4ce
......@@ -21,7 +21,7 @@ class BaseMultiModalProcessorOutput:
# input_text, with each frame of video/image represented with a image_token
input_text: str
# frames loaded from image and video, in given order
# frames loaded from image, in given order
images: Optional[list[Union[Image.Image, dict]]] = None
# videos
......@@ -44,14 +44,26 @@ class BaseMultiModalProcessorOutput:
@dataclasses.dataclass
class MultimodalSpecialTokens:
image_token: Optional[Union[int, str, List[str]]] = None
video_token: Optional[Union[int, str, List[str]]] = None
audio_token: Optional[Union[int, str, List[str]]] = None
image_token: Optional[Union[str, List[str]]] = None
video_token: Optional[Union[str, List[str]]] = None
audio_token: Optional[Union[str, List[str]]] = None
image_token_id: Optional[int] = None
video_token_id: Optional[int] = None
audio_token_id: Optional[int] = None
image_token_regex: Optional[re.Pattern] = None
video_token_regex: Optional[re.Pattern] = None
audio_token_regex: Optional[re.Pattern] = None
combined_regex: Optional[re.Pattern] = None
def build(self, processor):
self.convert_to_strs(processor)
self.parse_regex()
self.get_combined_regex()
return self
def convert_to_str(self, token: Union[str, int], processor) -> str:
if token is None:
return token
......@@ -60,11 +72,14 @@ class MultimodalSpecialTokens:
return processor.tokenizer.convert_ids_to_tokens([token])[0]
def convert_to_strs(self, processor):
self.image_token = self.convert_to_str(self.image_token, processor)
self.video_token = self.convert_to_str(self.video_token, processor)
self.audio_token = self.convert_to_str(self.audio_token, processor)
def get_modality_of_token(self, token) -> Optional[Modality]:
if not self.image_token:
self.image_token = self.convert_to_str(self.image_token_id, processor)
if not self.video_token:
self.video_token = self.convert_to_str(self.video_token_id, processor)
if not self.audio_token:
self.audio_token = self.convert_to_str(self.audio_token_id, processor)
def get_modality_of_token(self, token: str) -> Optional[Modality]:
"""
:return: the modality associated with the given token, if the token is a special_token or matches with the multimodal token regex
"""
......@@ -94,7 +109,12 @@ class MultimodalSpecialTokens:
if self.audio_token_regex is None and self.audio_token is not None:
self.audio_token_regex = re.compile(re.escape(self.audio_token))
def combine_regex(self) -> re.Pattern:
def get_combined_regex(self) -> re.Pattern:
"""
Builds and returns a regex, used to split input str into tokens (with mm special tokens)
"""
if self.combined_regex:
return self.combined_regex
tokens = [
self.image_token_regex,
self.video_token_regex,
......@@ -107,7 +127,8 @@ class MultimodalSpecialTokens:
patterns.append(t.pattern)
flags |= t.flags
combined = "(" + "|".join(f"(?:{p})" for p in patterns) + ")"
return re.compile(combined, flags)
self.combined_regex = re.compile(combined, flags)
return self.combined_regex
class BaseMultimodalProcessor(ABC):
......@@ -341,9 +362,8 @@ class BaseMultimodalProcessor(ABC):
discard_alpha_channel: if True, discards the alpha channel in the returned images
"""
multimodal_tokens.convert_to_strs(self._processor)
multimodal_tokens.parse_regex()
multimodal_tokens_pattern = multimodal_tokens.combine_regex()
multimodal_tokens_pattern = multimodal_tokens.get_combined_regex()
if isinstance(prompt, list) and return_text:
assert len(prompt) and isinstance(prompt[0], int)
prompt = self._processor.tokenizer.decode(prompt)
......@@ -445,7 +465,6 @@ class BaseMultimodalProcessor(ABC):
return result = [(2,4),(6,7)]
"""
mask = input_ids == mm_token_id
start_positions = (mask & ~torch.roll(mask, 1)).nonzero(as_tuple=True)[0]
end_positions = (mask & ~torch.roll(mask, -1)).nonzero(as_tuple=True)[0]
......@@ -554,7 +573,9 @@ class BaseMultimodalProcessor(ABC):
return collected_items, input_ids, ret
def process_and_combine_mm_data(
self, base_output: BaseMultiModalProcessorOutput
self,
base_output: BaseMultiModalProcessorOutput,
mm_tokens: MultimodalSpecialTokens,
) -> Tuple[List[MultimodalDataItem], torch.Tensor, dict]:
"""
Process multimodal data and return the combined multimodal items and input_ids.
......@@ -618,22 +639,14 @@ class BaseMultimodalProcessor(ABC):
# Add offsets to all items
for mm_item in all_collected_items:
if mm_item.modality in [Modality.IMAGE, Modality.MULTI_IMAGES]:
mm_item.offsets = self.get_mm_items_offset(
input_ids=input_ids,
mm_token_id=self.IM_TOKEN_ID,
)
elif mm_item.modality == Modality.AUDIO:
mm_item.offsets = self.get_mm_items_offset(
input_ids=input_ids,
mm_token_id=self.AUDIO_TOKEN_ID,
)
elif mm_item.modality == Modality.VIDEO:
mm_item.offsets = self.get_mm_items_offset(
input_ids=input_ids,
mm_token_id=self.VIDEO_TOKEN_ID,
)
else:
raise ValueError(f"Unknown modality: {mm_item.modality}")
mm_item.offsets = self.get_mm_items_offset(
input_ids=input_ids,
mm_token_id={
Modality.IMAGE: mm_tokens.image_token_id,
Modality.MULTI_IMAGES: mm_tokens.image_token_id,
Modality.VIDEO: mm_tokens.video_token_id,
Modality.AUDIO: mm_tokens.audio_token_id,
}.get(mm_item.modality, None),
)
return all_collected_items, input_ids, ret
......@@ -33,7 +33,9 @@ class DeepseekVL2ImageProcessor(BaseMultimodalProcessor):
def __init__(self, hf_config, server_args, _processor):
super().__init__(hf_config, server_args, _processor)
self.IMAGE_TOKEN = ")"
self.audio_token = "(<audio>./</audio>)"
self.video_token = "(<video>./</video>)"
self.mm_tokens = MultimodalSpecialTokens(
image_token="()",
audio_token="(<audio>./</audio>)",
video_token="(<video>./</video>)",
).build(_processor)
async def process_mm_data_async(
self,
......@@ -35,11 +37,7 @@ class MiniCPMMultimodalProcessor(BaseMultimodalProcessor):
max_req_input_len=max_req_input_len,
audio_data=audio_data,
image_data=image_data,
multimodal_tokens=MultimodalSpecialTokens(
image_token=self.image_token,
video_token=self.video_token,
audio_token=self.audio_token,
),
multimodal_tokens=self.mm_tokens,
)
if base_output is None:
return None
......
......@@ -26,8 +26,8 @@ class Mllama4ImageProcessor(BaseMultimodalProcessor):
self.eoi_token_index = hf_config.eoi_token_index
self.image_token_index = hf_config.image_token_index
self.multimodal_tokens = MultimodalSpecialTokens(
image_token=_processor.image_token
)
image_token=_processor.image_token,
).build(_processor)
async def process_mm_data_async(
self,
......
......@@ -21,7 +21,7 @@ class Phi4MMImageProcessor(BaseMultimodalProcessor):
super().__init__(hf_config, server_args, _processor)
self.multimodal_tokens = MultimodalSpecialTokens(
image_token=_IMAGE_SPECIAL_TOKEN,
)
).build(_processor)
async def process_mm_data_async(
self,
......
......@@ -55,7 +55,7 @@ class PixtralProcessor(BaseMultimodalProcessor):
self.patch_size = self.vision_config.patch_size
self.multimodal_tokens = MultimodalSpecialTokens(
image_token=_processor.image_token
)
).build(_processor)
_processor.tokenizer.add_special_tokens(
{
"pad_token": getattr(hf_config, "pad_token", self.PAD_TOKEN),
......
......@@ -203,16 +203,9 @@ class Qwen2_5VLImageProcessor(SGLangBaseProcessor):
def __init__(self, hf_config, server_args, _processor):
super().__init__(hf_config, server_args, _processor)
# The single, pre-expanded image token.
self.IMAGE_TOKEN = "<|vision_start|><|image_pad|><|vision_end|>"
# The regex that matches expanded image tokens.
self.IMAGE_TOKEN_REGEX = re.compile(
r"<\|vision_start\|>(?:<\|image_pad\|>)+<\|vision_end\|>"
)
self.IM_START_TOKEN_ID = hf_config.vision_start_token_id
self.IM_END_TOKEN_ID = hf_config.vision_end_token_id
self.IM_TOKEN_ID = hf_config.image_token_id
self.VIDEO_TOKEN_ID = hf_config.video_token_id
self.vision_start_token_id = hf_config.vision_start_token_id
self.vision_end_token_id = hf_config.vision_end_token_id
self.NUM_TOKEN_PER_FRAME = 770
......@@ -220,12 +213,14 @@ class Qwen2_5VLImageProcessor(SGLangBaseProcessor):
self.MIN_PIXELS = 4 * 28 * 28
self.MAX_PIXELS = 16384 * 28 * 28
self.MAX_RATIO = 200
# TODO(mick): move all MultimodalSpecialTokens initializations into processor init
self.mm_special_tokens = MultimodalSpecialTokens(
image_token=self.IMAGE_TOKEN,
image_token_regex=self.IMAGE_TOKEN_REGEX,
video_token=self.VIDEO_TOKEN_ID,
)
self.mm_tokens = MultimodalSpecialTokens(
image_token="<|vision_start|><|image_pad|><|vision_end|>",
image_token_id=hf_config.image_token_id,
image_token_regex=re.compile(
r"<\|vision_start\|>(?:<\|image_pad\|>)+<\|vision_end\|>"
),
video_token_id=hf_config.video_token_id,
).build(_processor)
async def process_mm_data_async(
self,
......@@ -241,7 +236,7 @@ class Qwen2_5VLImageProcessor(SGLangBaseProcessor):
prompt=input_text,
image_data=image_data,
video_data=request_obj.video_data,
multimodal_tokens=self.mm_special_tokens,
multimodal_tokens=self.mm_tokens,
max_req_input_len=max_req_input_len,
)
......@@ -255,13 +250,15 @@ class Qwen2_5VLImageProcessor(SGLangBaseProcessor):
await preprocess_video(video) for video in base_output.videos
]
mm_items, input_ids, ret = self.process_and_combine_mm_data(base_output)
mm_items, input_ids, ret = self.process_and_combine_mm_data(
base_output, self.mm_tokens
)
input_ids = input_ids.flatten()
mrope_positions, mrope_position_delta = MRotaryEmbedding.get_rope_index(
spatial_merge_size=self.hf_config.vision_config.spatial_merge_size,
image_token_id=self.IM_TOKEN_ID,
video_token_id=self.VIDEO_TOKEN_ID,
image_token_id=self.mm_tokens.image_token_id,
video_token_id=self.mm_tokens.video_token_id,
vision_start_token_id=self.vision_start_token_id,
model_type=self.hf_config.model_type,
tokens_per_second=getattr(
......@@ -279,8 +276,8 @@ class Qwen2_5VLImageProcessor(SGLangBaseProcessor):
"mm_items": mm_items,
"im_start_id": self.IM_START_TOKEN_ID,
"im_end_id": self.IM_END_TOKEN_ID,
"im_token_id": self.IM_TOKEN_ID,
"video_token_id": self.VIDEO_TOKEN_ID,
"im_token_id": self.mm_tokens.image_token_id,
"video_token_id": self.mm_tokens.video_token_id,
"mrope_positions": mrope_positions,
"mrope_position_delta": mrope_position_delta,
}
from typing import Any, Dict, List, Optional, Type, cast
from typing import Any, Dict, List, Optional, Type
import torch.nn as nn
from transformers.configuration_utils import PretrainedConfig
......@@ -10,7 +10,6 @@ from sglang.srt.managers.io_struct import (
GenerateReqInput,
ImageDataInputItem,
)
from sglang.srt.managers.schedule_batch import Modality, MultimodalDataItem
from sglang.srt.models.vila import VILAForConditionalGeneration
from sglang.srt.multimodal.processors.base_processor import (
BaseMultimodalProcessor,
......@@ -37,8 +36,11 @@ class VILAMultimodalProcessor(BaseMultimodalProcessor):
_processor: VILAProcessor,
) -> None:
super().__init__(hf_config, server_args, _processor)
self.IM_TOKEN_ID = hf_config.image_token_id
self.VIDEO_TOKEN_ID = hf_config.video_token_id
self.mm_tokens = MultimodalSpecialTokens(
image_token=self._processor.tokenizer.image_token,
image_token_id=hf_config.image_token_id,
video_token_id=hf_config.video_token_id,
).build(_processor)
async def process_mm_data_async(
self,
......@@ -50,18 +52,18 @@ class VILAMultimodalProcessor(BaseMultimodalProcessor):
) -> Optional[Dict[str, Any]]:
base_output = self.load_mm_data(
prompt=input_text,
multimodal_tokens=MultimodalSpecialTokens(
image_token=self._processor.tokenizer.image_token
),
multimodal_tokens=self.mm_tokens,
max_req_input_len=max_req_input_len,
image_data=image_data,
)
mm_items, input_ids, _ = self.process_and_combine_mm_data(base_output)
mm_items, input_ids, _ = self.process_and_combine_mm_data(
base_output, self.mm_tokens
)
return {
"input_ids": input_ids.tolist(),
"mm_items": mm_items,
"im_token_id": self.IM_TOKEN_ID,
"video_token_id": self.VIDEO_TOKEN_ID,
"im_token_id": self.mm_tokens.image_token_id,
"video_token_id": self.mm_tokens.video_token_id,
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment