import json import math import os from concurrent.futures import ThreadPoolExecutor, as_completed import torch import transformers from PIL import Image from tqdm import tqdm import torchaudio from vita import conversation as conversation_lib from vita.config import * from vita.config import AudioFolder, FolderDict from vita.config.dataset_config import * from vita.constants import AUDIO_TOKEN_INDEX, GLOBAL_WEIGHTS_PATH, IGNORE_INDEX, IMAGE_TOKEN_INDEX from vita.util.data_utils_video_audio import DataArguments, LazySupervisedDataset from vita.util.data_utils_video_audio_neg_patch import find_closest_aspect_ratio from vita.util.mm_utils import tokenizer_image_audio_token, tokenizer_image_token image_token_num = 256 token_thre = 9500 # datasets = NLP + HumanCentric + VideoQA + NaturalQA + VideoCap + OCRCap + NaturalCap datasets = NaturalCap0 + OCRCap0 + VideoCap0 + NaturalQA0 # datasets = VideoQA + HumanCentric + NLP # datasets = [SGInternvid0] datasets = NaturalCap0 datasets = OCRCap0 datasets = VideoCap0 + NaturalQA0 + [TextSFT0] out_file_name = "debug.json" parser = transformers.HfArgumentParser((DataArguments)) tokenizer = transformers.AutoTokenizer.from_pretrained( f"{GLOBAL_WEIGHTS_PATH}/Mixtral-8x7B_New/mg2hg", cache_dir=None, model_max_length=8192, padding_side="right", use_fast=True, ) long_json = [] def dynamic_preprocess( image, min_num=2, max_num=12, image_size=448, use_thumbnail=False, img_mean=0 ): orig_width, orig_height = image.size aspect_ratio = orig_width / orig_height # calculate the existing image aspect ratio target_ratios = set( (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num ) target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) # find the closest aspect ratio to the target target_aspect_ratio = find_closest_aspect_ratio( aspect_ratio, target_ratios, orig_width, orig_height, image_size ) # expand target_aspect_ratio to even for each size new_target_aspect_ratio = [e if e % 2 == 0 else e + 1 for e in target_aspect_ratio] blocks_big = int(0.5 * new_target_aspect_ratio[0] * 0.5 * new_target_aspect_ratio[1]) return blocks_big def get_wav_duration(file_path): waveform, sample_rate = torchaudio.load(file_path) duration = waveform.size(1) / sample_rate return duration def process_item(item, tokenizer): conv = conversation_lib.default_conversation.copy() roles = {"human": conv.roles[0], "gpt": conv.roles[1]} source = item["conversations"] conv.messages = [] modality = "lang" for j, sentence in enumerate(source): role = roles[sentence["from"]] assert role == conv.roles[j % 2], f"{source}" conv.append_message(role, sentence["value"]) if "" in sentence["value"]: modality = "image" elif "