from llava.model.builder import load_pretrained_model from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IGNORE_INDEX from llava.conversation import conv_templates, SeparatorStyle from PIL import Image import requests import copy import torch import os from pathlib import Path current_dir = str(Path(__file__).resolve().parent) # Load model # pretrained = "lmms-lab/llava-onevision-qwen2-0.5b-ov" pretrained = os.path.join(current_dir, "ckpts", "llava-onevision-qwen2-0.5b-ov") model_name = "llava_qwen" device = "cuda" device_map = "auto" llava_model_args = { "multimodal": True, } overwrite_config = {} overwrite_config["image_aspect_ratio"] = "pad" llava_model_args["overwrite_config"] = overwrite_config tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, device_map=device_map, **llava_model_args) model.eval() # Load two images url1 = os.path.join(current_dir, "examples", "llava_v1_5_radar.jpg") url2 = os.path.join(current_dir, "examples", "llava_logo.png") # image1 = Image.open(requests.get(url1, stream=True).raw) # image2 = Image.open(requests.get(url2, stream=True).raw) image1 = Image.open(url1) image2 = Image.open(url2) images = [image1, image2] image_tensors = process_images(images, image_processor, model.config) image_tensors = [_image.to(dtype=torch.float16, device=device) for _image in image_tensors] # Prepare interleaved text-image input conv_template = "qwen_1_5" question = f"{DEFAULT_IMAGE_TOKEN} This is the first image. Can you describe what you see?\n\nNow, let's look at another image: {DEFAULT_IMAGE_TOKEN}\nWhat's the difference between these two images?" conv = copy.deepcopy(conv_templates[conv_template]) conv.append_message(conv.roles[0], question) conv.append_message(conv.roles[1], None) prompt_question = conv.get_prompt() input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device) image_sizes = [image.size for image in images] # Generate response cont = model.generate( input_ids, images=image_tensors, image_sizes=image_sizes, do_sample=False, temperature=0, max_new_tokens=4096, ) text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True) print(text_outputs[0])