Unverified Commit 3a232db4 authored by Haodong Duan's avatar Haodong Duan Committed by GitHub
Browse files

[Deperecate] Remove multi-modal related stuff (#1072)



* Remove MultiModal

* update index.rst

* update README

* remove mmbench codes

* update news

---------
Co-authored-by: default avatarLeymore <zfz-960727@163.com>
parent f1ee11de
from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.Flickr30kCaption',
data_root='data/flickr30k',
ann_file='annotations/dataset_flickr30k.json',
data_prefix='images',
split='val',
pipeline=val_pipeline)
qwen_flickr30k_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_flickr30k_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'),
is_caption_task=True,
)
# evaluation settings
qwen_flickr30k_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
) # noqa
]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.GQA',
data_root='data/gqa',
data_prefix='images',
ann_file='annotations/testdev_balanced_questions.json',
pipeline=val_pipeline)
qwen_gqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_gqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'options', 'category', 'l2-category', 'context',
'index', 'options_dict'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
qwen_mmbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLMMBenchPromptConstructor)
)
# evaluation settings
qwen_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/qwenvl-chat-7b-mmbench.xlsx')
]
from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'options', 'category', 'l2-category', 'context',
'index', 'options_dict'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='/mnt/petrelfs/share_data/yuanyike/cnbench_v010_rolling.tsv',
pipeline=val_pipeline,
sys_prompt='请从以下选项中选择一个正确选项。')
qwen_mmbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLMMBenchPromptConstructor)
)
# evaluation settings
qwen_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/qwenvl-chat-7b-cnbench-v010.xlsx')
]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
qwen_ocrvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_ocrvqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
qwen_okvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_okvqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLChatScienceQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution'
])
]
dataset = dict(type='mmpretrain.ScienceQA',
data_root='./data/scienceqa',
split='val',
split_file='pid_splits.json',
ann_file='problems.json',
image_only=True,
data_prefix=dict(img_path='val'),
pipeline=val_pipeline)
qwen_scienceqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_scienceqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatScienceQAPromptConstructor)
)
# evaluation settings
qwen_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.TextVQA',
data_root='data/textvqa',
ann_file='annotations/TextVQA_0.5.1_val.json',
pipeline=val_pipeline,
data_prefix='images/train_images',
)
qwen_textvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_textvqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VizWiz',
data_root='data/vizwiz/',
data_prefix='Images/val',
ann_file='Annotations/val.json',
pipeline=val_pipeline)
qwen_vizwiz_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_vizwiz_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/coco',
data_prefix='images/val2014',
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/v2_mscoco_val2014_annotations.json',
pipeline=val_pipeline)
qwen_vqav2_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_vqav2_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor, QwenVLChatVSRPostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VSR',
data_root='data/vsr/',
data_prefix='images/',
ann_file='annotations/test.json',
pipeline=val_pipeline)
qwen_vsr_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_vsr_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor),
post_processor=dict(type=QwenVLChatVSRPostProcessor)
)
# evaluation settings
qwen_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
from mmengine.config import read_base
with read_base():
from .minigpt_4.minigpt_4_7b_mmbench import (minigpt_4_mmbench_dataloader,
minigpt_4_mmbench_evaluator,
minigpt_4_mmbench_load_from,
minigpt_4_mmbench_model)
models = [minigpt_4_mmbench_model]
datasets = [minigpt_4_mmbench_dataloader]
evaluators = [minigpt_4_mmbench_evaluator]
load_froms = [minigpt_4_mmbench_load_from]
num_gpus = 8
num_procs = 8
launcher = 'pytorch'
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMBasePromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.COCOCaption',
data_root='data/coco',
data_prefix=dict(img_path='images'),
ann_file='annotations/coco_karpathy_val.json',
pipeline=val_pipeline)
visualglm_coco_caption_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_coco_caption_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
is_caption_task=True,
prompt_constructor=dict(type=VisualGLMBasePromptConstructor, system_prompt='Describe the image.'),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_coco_caption_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
) # noqa
]
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMBasePromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.Flickr30kCaption',
data_root='data/flickr30k',
ann_file='annotations/dataset_flickr30k.json',
data_prefix='images',
split='val',
pipeline=val_pipeline)
visualglm_flickr30k_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_flickr30k_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
is_caption_task=True,
prompt_constructor=dict(type=VisualGLMBasePromptConstructor, system_prompt='Describe the image.'),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_flickr30k_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
) # noqa
]
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.GQA',
data_root='data/gqa',
data_prefix='images',
ann_file='annotations/testdev_balanced_questions.json',
pipeline=val_pipeline)
visualglm_gqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_gqa_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMMMBenchPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'options', 'category', 'l2-category', 'context',
'index', 'options_dict'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
visualglm_mmbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_mmbench_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
prompt_constructor=dict(type=VisualGLMMMBenchPromptConstructor),
post_processor=dict(type=VisualGLMBasePostProcessor),
gen_kwargs=dict(max_new_tokens=50,num_beams=5,do_sample=False,repetition_penalty=1.0,length_penalty=-1.0)
)
# evaluation settings
visualglm_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/visualglm-6b-mmbench.xlsx')
]
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
visualglm_ocrvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_ocrvqa_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
visualglm_okvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_okvqa_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMScienceQAPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image'
])
]
dataset = dict(type='mmpretrain.ScienceQA',
data_root='./data/scienceqa',
split='val',
split_file='pid_splits.json',
ann_file='problems.json',
image_only=True,
data_prefix=dict(img_path='val'),
pipeline=val_pipeline)
visualglm_scienceqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_scienceqa_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
prompt_constructor=dict(type=VisualGLMScienceQAPromptConstructor),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
\ No newline at end of file
from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.TextVQA',
data_root='data/textvqa',
ann_file='annotations/TextVQA_0.5.1_val.json',
pipeline=val_pipeline,
data_prefix='images/train_images',
)
visualglm_textvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
visualglm_textvqa_model = dict(
type='visualglm',
pretrained_path='/path/to/visualglm', # or Huggingface repo id
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
post_processor=dict(type=VisualGLMBasePostProcessor)
)
# evaluation settings
visualglm_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment