"...composable_kernel.git" did not exist on "669df2d305f8559fbcda6b554860ba705b44d732"
Unverified Commit 3a232db4 authored by Haodong Duan's avatar Haodong Duan Committed by GitHub
Browse files

[Deperecate] Remove multi-modal related stuff (#1072)



* Remove MultiModal

* update index.rst

* update README

* remove mmbench codes

* update news

---------
Co-authored-by: default avatarLeymore <zfz-960727@163.com>
parent f1ee11de
from opencompass.multimodal.models.llava import LLaVAMMBenchPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'category', 'l2-category', 'context', 'index',
'options_dict', 'options', 'split'
],
),
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
llava_mmbench_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_mmbench_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAMMBenchPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/llava-7b-mmbench.xlsx')
]
from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
llava_ocrvqa_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_ocrvqa_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAVQAPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
llava_okvqa_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_okvqa_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAVQAPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.llava import LLaVAScienceQAPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image'
])
]
dataset = dict(type='mmpretrain.ScienceQA',
data_root='./data/scienceqa',
split='val',
split_file='pid_splits.json',
ann_file='problems.json',
image_only=True,
data_prefix=dict(img_path='val'),
pipeline=val_pipeline)
llava_scienceqa_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_scienceqa_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAScienceQAPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.TextVQA',
data_root='data/textvqa',
ann_file='annotations/TextVQA_0.5.1_val.json',
pipeline=val_pipeline,
data_prefix='images/train_images',
)
llava_textvqa_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_textvqa_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAVQAPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VizWiz',
data_root='data/vizwiz/',
data_prefix='Images/val',
ann_file='Annotations/val.json',
pipeline=val_pipeline)
llava_vizwiz_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_vizwiz_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAVQAPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/coco',
data_prefix='images/val2014',
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/v2_mscoco_val2014_annotations.json',
pipeline=val_pipeline)
llava_vqav2_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_vqav2_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAVQAPromptConstructor),
post_processor=dict(type=LLaVABasePostProcessor)
) # noqa
# evaluation settings
llava_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVAVSRPostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VSR',
data_root='data/vsr/',
data_prefix='images/',
ann_file='annotations/test.json',
pipeline=val_pipeline)
llava_vsr_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
llava_vsr_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAVQAPromptConstructor),
post_processor=dict(type=LLaVAVSRPostProcessor)
) # noqa
# evaluation settings
llava_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
# MiniGPT-4
### Prepare the environment
```sh
cd opencompass/multimodal/models/minigpt_4
git clone https://github.com/Vision-CAIR/MiniGPT-4.git
```
Then prepare the environment according to this [doc](https://github.com/Vision-CAIR/MiniGPT-4)
### Start evaluation
#### Slurm
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION
```
#### PyTorch
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval
```
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4COCOCaotionPromptConstructor,
MiniGPT4COCOCaptionPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(384, 384),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.COCOCaption',
data_root='data/coco',
data_prefix=dict(img_path='images'),
ann_file='annotations/coco_karpathy_val.json',
pipeline=val_pipeline)
minigpt_4_coco_caption_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
minigpt_4_coco_caption_model = dict(
type='minigpt-4',
low_resource=False,
img_size=384,
llama_model='/path/to/vicuna_weights_7b/',
is_caption_task=True,
prompt_constructor=dict(type=MiniGPT4COCOCaotionPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4COCOCaptionPostProcessor))
# evaluation settings
minigpt_4_coco_caption_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
) # noqa
]
minigpt_4_coco_caption_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4COCOCaotionPromptConstructor,
MiniGPT4COCOCaptionPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(384, 384),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.Flickr30kCaption',
data_root='data/flickr30k',
ann_file='annotations/dataset_flickr30k.json',
data_prefix='images',
split='val',
pipeline=val_pipeline)
minigpt_4_flickr30k_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_flickr30k_model = dict(
type='minigpt-4',
low_resource=False,
img_size=384,
llama_model='/path/to/vicuna_weights_7b/',
is_caption_task=True,
prompt_constructor=dict(type=MiniGPT4COCOCaotionPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4COCOCaptionPostProcessor))
# evaluation settings
minigpt_4_flickr30k_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
) # noqa
]
minigpt_4_flickr30k_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VQAPromptConstructor,
MiniGPT4VQAPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.GQA',
data_root='data/gqa',
data_prefix='images',
ann_file='annotations/testdev_balanced_questions.json',
pipeline=val_pipeline)
minigpt_4_gqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_gqa_model = dict(type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(
type=MiniGPT4VQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VQAPostProcessor))
# evaluation settings
minigpt_4_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
minigpt_4_gqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4MMBenchPromptConstructor, MiniGPT4MMBenchPostProcessor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'category', 'l2-category', 'context', 'index',
'options_dict', 'options', 'split'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
minigpt_4_mmbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_mmbench_model = dict(
type='minigpt-4',
low_resource=False,
llama_model='/path/to/vicuna-7b/',
prompt_constructor=dict(type=MiniGPT4MMBenchPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4MMBenchPostProcessor))
# evaluation settings
minigpt_4_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/minigpt-4-7b-mmbench.xlsx')
]
minigpt_4_mmbench_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (MiniGPT4MMEPostProcessor, MiniGPT4MMEPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'answer', 'task'
])
]
dataset = dict(type='opencompass.MMEDataset',
data_dir='/path/to/MME',
pipeline=val_pipeline)
minigpt_4_mme_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
minigpt_4_model = dict(
type='minigpt-4',
low_resource=False,
llama_model='/path/to/vicuna/',
prompt_constructor=dict(type=MiniGPT4MMEPromptConstructor),
post_processor=dict(type=MiniGPT4MMEPostProcessor))
# evaluation settings
minigpt_4_mme_evaluator = [
dict(type='opencompass.MMEMetric')
]
minigpt_4_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VQAPromptConstructor,
MiniGPT4VQAPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
minigpt_4_ocr_vqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_ocr_vqa_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VQAPostProcessor))
# evaluation settings
minigpt_4_ocr_vqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
minigpt_4_ocr_vqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VQAPromptConstructor,
MiniGPT4VQAPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
minigpt_4_ok_vqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_ok_vqa_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VQAPostProcessor))
# evaluation settings
minigpt_4_ok_vqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
minigpt_4_ok_vqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models import (MiniGPT4ScienceQAPromptConstructor,
MiniGPT4ScienceQAPostProcessor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image'
])
]
dataset = dict(type='mmpretrain.ScienceQA',
data_root='./data/scienceqa',
split='val',
split_file='pid_splits.json',
ann_file='problems.json',
image_only=True,
data_prefix=dict(img_path='val'),
pipeline=val_pipeline)
minigpt_4_scienceqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_scienceqa_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4ScienceQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4ScienceQAPostProcessor))
# evaluation settings
minigpt_4_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
minigpt_4_scienceqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import MiniGPT4SEEDBenchPromptConstructor # noqa
# dataloader settings
image_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'answer', 'choices', 'data_type', 'question_type_id',
'index', 'data_path', 'question_id'
])
]
video_pipeline = [
dict(type='mmaction.Resize', scale=(224, 224), interpolation='bicubic'),
dict(type='mmaction.CenterCrop', crop_size=224),
dict(type='Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'answer', 'choices', 'data_type', 'question_type_id',
'index', 'data_path', 'question_id'
])
]
dataset = dict(
type='opencompass.SEEDBenchDataset',
ann_file='data/seedbench/SEED-Bench.json',
cc3m_path='data/seedbench/SEED-Bench-image',
sthv2_path='data/seedbench/sthv2/videos',
epic_kitchens_path='data/seedbench/3h91syskeag572hl6tvuovwv4d/videos/test',
breakfast_path='data/seedbench/BreakfastII_15fps_qvga_sync',
image_pipeline=image_pipeline,
video_pipeline=video_pipeline,
only_image=True)
minigpt_4_seedbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_seedbench_model = dict(
type='minigpt-4',
low_resource=False,
llama_model='/path/to/vicuna/',
prompt_constructor=dict(type=MiniGPT4SEEDBenchPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=None,
mode='loss')
# evaluation settings
minigpt_4_seedbench_evaluator = [dict(type='opencompass.SEEDBenchAcc')]
minigpt_4_load_from = '/path/to/prerained_minigpt4_7b.pth'
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VQAPromptConstructor,
MiniGPT4VQAPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.TextVQA',
data_root='data/textvqa',
ann_file='annotations/TextVQA_0.5.1_val.json',
pipeline=val_pipeline,
data_prefix='images/train_images',
)
minigpt_4_textvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_textvqa_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VQAPostProcessor))
# evaluation settings
minigpt_4_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
minigpt_4_textvqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VQAPromptConstructor,
MiniGPT4VQAPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VizWiz',
data_root='data/vizwiz/',
data_prefix='Images/val',
ann_file='Annotations/val.json',
pipeline=val_pipeline)
minigpt_4_vizwiz_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_vizwiz_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VQAPostProcessor))
# evaluation settings
minigpt_4_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
minigpt_4_vizwiz_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment