import argparse import json import os import random import time import torch from internvl.model.internvl_chat import InternVLChatModel from internvl.train.dataset import build_transform, dynamic_preprocess from PIL import Image from tqdm import tqdm from transformers import AutoTokenizer ds_collections = { 'mmvet': { 'root': 'data/mm-vet/images', 'question': 'data/mm-vet/llava-mm-vet.jsonl', 'metric': None, 'max_new_tokens': 1000, 'min_new_tokens': 1, } } def collate_fn(batches, tokenizer): pixel_values = torch.cat([_['pixel_values'] for _ in batches], dim=0) questions = [_['question'] for _ in batches] question_ids = [_['question_id'] for _ in batches] annotations = [_['annotation'] for _ in batches] return pixel_values, questions, question_ids, annotations class VQADataset(torch.utils.data.Dataset): def __init__(self, root, data, prompt, input_size=224, dynamic_image_size=False, use_thumbnail=False, max_num=6): self.root = root self.data = open(data).readlines() self.prompt = prompt self.input_size = input_size self.dynamic_image_size = dynamic_image_size self.use_thumbnail = use_thumbnail self.max_num = max_num self.transform = build_transform(is_train=False, input_size=input_size) def __len__(self): return len(self.data) def __getitem__(self, idx): data = json.loads(self.data[idx].strip()) image, question, question_id, annotation = data['image'], data[ 'text'], data['question_id'], data.get('answer', None) image = os.path.join(self.root, image) image = Image.open(image).convert('RGB') if self.dynamic_image_size: images = dynamic_preprocess(image, image_size=self.input_size, use_thumbnail=self.use_thumbnail, max_num=self.max_num) else: images = [image] pixel_values = [self.transform(image) for image in images] pixel_values = torch.stack(pixel_values) question = question + ' ' + self.prompt return question_id, question, pixel_values, annotation def evaluate_chat_model(): random.seed(args.seed) prompt = '' for ds_name in args.datasets: dataset = VQADataset( root=ds_collections[ds_name]['root'], data=ds_collections[ds_name]['question'], prompt=prompt, input_size=image_size, dynamic_image_size=args.dynamic, use_thumbnail=use_thumbnail, max_num=args.max_num ) outputs = {} for _, (question_id, question, pixel_values, annotations) in tqdm(enumerate(dataset)): pixel_values = pixel_values.to(torch.bfloat16).cuda() generation_config = dict( num_beams=args.num_beams, max_new_tokens=ds_collections[ds_name]['max_new_tokens'], min_new_tokens=ds_collections[ds_name]['min_new_tokens'], do_sample=True if args.temperature > 0 else False, temperature=args.temperature, ) pred = model.chat( tokenizer=tokenizer, pixel_values=pixel_values, question=question, generation_config=generation_config, verbose=True ) outputs[f'v1_{question_id}'] = pred print(f'Evaluating {ds_name} ...') time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime()) results_file = f'{ds_name}_{time_prefix}.json' results_file = os.path.join(args.out_dir, results_file) json.dump(outputs, open(results_file, 'w')) print('Results saved to {}'.format(results_file)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint', type=str, default='') parser.add_argument('--datasets', type=str, default='pope') parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--num-workers', type=int, default=1) parser.add_argument('--num-beams', type=int, default=5) parser.add_argument('--temperature', type=float, default=0.0) parser.add_argument('--out-dir', type=str, default='results') parser.add_argument('--seed', type=int, default=0) parser.add_argument('--dynamic', action='store_true') parser.add_argument('--max-num', type=int, default=6) parser.add_argument('--load-in-8bit', action='store_true') parser.add_argument('--load-in-4bit', action='store_true') parser.add_argument('--auto', action='store_true') args = parser.parse_args() if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) args.datasets = args.datasets.split(',') print('datasets:', args.datasets) assert args.batch_size == 1, 'Only batch size 1 is supported' if args.auto: os.environ['CUDA_LAUNCH_BLOCKING'] = '1' kwargs = {'device_map': 'auto'} if args.auto else {} tokenizer = AutoTokenizer.from_pretrained(args.checkpoint, trust_remote_code=True, use_fast=False) model = InternVLChatModel.from_pretrained( args.checkpoint, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, load_in_8bit=args.load_in_8bit, load_in_4bit=args.load_in_4bit, **kwargs).eval() if not args.load_in_8bit and not args.load_in_4bit and not args.auto: model = model.cuda() image_size = model.config.force_image_size or model.config.vision_config.image_size use_thumbnail = model.config.use_thumbnail total_params = sum(p.numel() for p in model.parameters()) / 1e9 if total_params > 20 or args.dynamic: args.num_beams = 1 print(f'[test] total_params: {total_params}B, use num_beams: {args.num_beams}') else: print(f'[test] total_params: {total_params}B') print(f'[test] image_size: {image_size}') print(f'[test] template: {model.config.template}') print(f'[test] dynamic_image_size: {args.dynamic}') print(f'[test] use_thumbnail: {use_thumbnail}') evaluate_chat_model()