Unverified Commit 3a232db4 authored by Haodong Duan's avatar Haodong Duan Committed by GitHub
Browse files

[Deperecate] Remove multi-modal related stuff (#1072)



* Remove MultiModal

* update index.rst

* update README

* remove mmbench codes

* update news

---------
Co-authored-by: default avatarLeymore <zfz-960727@163.com>
parent f1ee11de
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VQAPromptConstructor,
MiniGPT4VQAPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/coco',
data_prefix='images/val2014',
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/v2_mscoco_val2014_annotations.json',
pipeline=val_pipeline)
minigpt_4_vqav2_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_vqav2_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VQAPostProcessor))
# evaluation settings
minigpt_4_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
minigpt_4_vqav2_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
from opencompass.multimodal.models.minigpt_4 import (
MiniGPT4VSRPromptConstructor,
MiniGPT4VSRPostProcessor,
)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VSR',
data_root='data/vsr/',
data_prefix='images/',
ann_file='annotations/test.json',
pipeline=val_pipeline)
minigpt_4_vsr_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler',
shuffle=False))
# model settings
minigpt_4_vsr_model = dict(
type='minigpt-4',
low_resource=False,
img_size=224,
max_length=10,
llama_model='/path/to/vicuna_weights_7b/',
prompt_constructor=dict(type=MiniGPT4VSRPromptConstructor,
image_prompt='###Human: <Img><ImageHere></Img>',
reply_prompt='###Assistant:'),
post_processor=dict(type=MiniGPT4VSRPostProcessor))
# evaluation settings
minigpt_4_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
minigpt_4_vsr_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa
# MplugOwl
### Prepare the environment
```sh
cd opencompass/multimodal/models/mplug_owl
git clone https://github.com/X-PLUG/mPLUG-Owl.git
```
### Start evaluation
#### Slurm
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION
```
#### PyTorch
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval
```
\ No newline at end of file
from opencompass.multimodal.models.mplug_owl import (
MplugOwlMMBenchPostProcessor, MplugOwlMMBenchPromptConstructor)
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(224, 224),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(
type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'answer', 'category', 'l2-category', 'context',
'index', 'options_dict', 'options'
],
),
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
mplug_owl_mmbench_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False),
)
# model settings
mplug_owl_mmbench_model = dict(
type='mplug_owl-7b',
model_path='/mplug-owl-llama-7b-ft',
prompt_constructor=dict(type=MplugOwlMMBenchPromptConstructor),
post_processor=dict(type=MplugOwlMMBenchPostProcessor)
) # noqa
# evaluation settings
mplug_owl_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/mplug_owl-7b-mmagibench-v0.1.0.xlsx')
]
# OpenFlamingo
### Prepare the environment
Install [MMPretrain](https://github.com/open-mmlab/mmpretrain) according to this [doc](https://mmpretrain.readthedocs.io/en/latest/get_started.html#installation)
### Start evaluation
#### Slurm
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION
```
#### PyTorch
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval
```
\ No newline at end of file
from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.COCOCaption',
data_root='data/coco',
data_prefix=dict(img_path='images'),
ann_file='annotations/coco_karpathy_val.json',
pipeline=val_pipeline)
openflamingo_coco_caption_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_coco_caption_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='caption',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor)
)
# evaluation settings
openflamingo_coco_caption_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
) # noqa
]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.Flickr30kCaption',
data_root='data/flickr30k',
ann_file='annotations/dataset_flickr30k.json',
data_prefix='images',
split='val',
pipeline=val_pipeline)
openflamingo_flickr30k_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_flickr30k_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='caption',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor)
)
# evaluation settings
openflamingo_flickr30k_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
) # noqa
]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.GQA',
data_root='data/gqa',
data_prefix='images',
ann_file='annotations/testdev_balanced_questions.json',
pipeline=val_pipeline)
openflamingo_gqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_gqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoMMBenchPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.PILToNumpy'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'options', 'category', 'l2-category', 'index',
'context', 'options_dict'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
openflamingo_mmbench_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_mmbench_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoMMBenchPromptConstructor)
)
# evaluation settings
openflamingo_mmbench_evaluator = [
dict(
type='opencompass.DumpResults',
save_path= # noqa: E251
'work_dirs/9b-flamingo/9b-flamingo-mmbench.xlsx')
]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
openflamingo_ocrvqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# model settings
openflamingo_ocrvqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
openflamingo_okvqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_okvqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoScienceQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution'
])
]
dataset = dict(type='mmpretrain.ScienceQA',
data_root='./data/scienceqa',
split='val',
split_file='pid_splits.json',
ann_file='problems.json',
image_only=True,
data_prefix=dict(img_path='val'),
pipeline=val_pipeline)
openflamingo_scienceqa_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_scienceqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoScienceQAPromptConstructor)
)
# evaluation settings
openflamingo_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.TextVQA',
data_root='data/textvqa',
ann_file='annotations/TextVQA_0.5.1_val.json',
pipeline=val_pipeline,
data_prefix='images/train_images',
)
openflamingo_textvqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_textvqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VizWiz',
data_root='data/vizwiz/',
data_prefix='Images/val',
ann_file='Annotations/val.json',
pipeline=val_pipeline)
openflamingo_vizwiz_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_vizwiz_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/coco',
data_prefix='images/val2014',
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/v2_mscoco_val2014_annotations.json',
pipeline=val_pipeline)
openflamingo_vqav2_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_vqav2_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor, OpenFlamingoVSRPostProcessor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VSR',
data_root='data/vsr/',
data_prefix='images/',
ann_file='annotations/test.json',
pipeline=val_pipeline)
openflamingo_vsr_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_vsr_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor, shot_prompt=('The cat is behind the laptop. Short Answer:yes<|endofchunk|>' # noqa: E501
'The cow is ahead of the person. Short Answer:no<|endofchunk|>')),
post_processor=dict(type=OpenFlamingoVSRPostProcessor)
)
# evaluation settings
openflamingo_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
# OTTER: Multi-modal In-context Instruction Tuning.
### Prepare the environment
```sh
pip install otter_ai
```
### Start evaluation
#### Slurm
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION
```
#### PyTorch
```sh
cd $root
python run.py configs/multimodal/tasks.py --mm-eval
```
\ No newline at end of file
# dataloader settings
from opencompass.multimodal.models.otter import (
OTTERMMBenchPromptConstructor, OTTERMMBenchPostProcessor)
val_pipeline = [
dict(type="mmpretrain.torchvision/Resize", size=(224, 224), interpolation=3),
dict(type="mmpretrain.torchvision/ToTensor"),
dict(
type="mmpretrain.torchvision/Normalize",
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
dict(
type="mmpretrain.PackInputs",
algorithm_keys=["question", "answer", "options", "category", "l2-category", "context", "index", "options_dict"],
),
]
dataset = dict(
type="opencompass.MMBenchDataset", data_file="/path/to/mmbench/mmbench_test_20230712.tsv", pipeline=val_pipeline
)
otter_9b_mmbench_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type="pseudo_collate"),
sampler=dict(type="DefaultSampler", shuffle=False),
)
# model settings
otter_9b_mmbench_model = dict(
type="otter-9b",
model_path="/path/to/OTTER-Image-MPT7B/", # noqa
load_bit="bf16",
prompt_constructor=dict(type=OTTERMMBenchPromptConstructor,
model_label='GPT',
user_label='User'),
post_processor=dict(type=OTTERMMBenchPostProcessor)
)
# evaluation settings
otter_9b_mmbench_evaluator = [dict(type="opencompass.DumpResults", save_path="work_dirs/otter-9b-mmbench.xlsx")]
from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor, QwenVLBasePostProcessor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'options', 'category', 'l2-category', 'context',
'index', 'options_dict'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
qwen_mmbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_model = dict(
type='qwen-vl-base',
pretrained_path='Qwen/Qwen-VL', # or Huggingface repo id
prompt_constructor=dict(type=QwenMMBenchPromptConstructor),
post_processor=dict(type=QwenVLBasePostProcessor)
)
# evaluation settings
qwen_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/qwenvl-base-7b-mmbench.xlsx')
]
from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.COCOCaption',
data_root='data/coco',
data_prefix=dict(img_path='images'),
ann_file='annotations/coco_karpathy_val.json',
pipeline=val_pipeline)
qwen_coco_caption_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_coco_caption_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'),
is_caption_task=True,
)
# evaluation settings
qwen_coco_caption_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
) # noqa
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment