"git@developer.sourcefind.cn:wangsen/mineru.git" did not exist on "2e87e649edd73133a871722ca34b55459ee9ffa8"
Unverified Commit bd50bad8 authored by Yike Yuan's avatar Yike Yuan Committed by GitHub
Browse files

[Feat] Support mm models on public dataset and fix several issues. (#412)



* [Feat] Add public dataset support for visualglm, qwenvl, and flamingo

* [Fix] MMBench related changes.

* [Fix] Openflamingo inference.

* [Fix] Hide ckpt path.

* [Fix] Pre-commit.

---------
Co-authored-by: default avatarHaodong Duan <dhd.efz@gmail.com>
parent 7c2726c2
......@@ -24,7 +24,7 @@ dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
mmbench_dataloader = dict(
llava_mmbench_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
......@@ -33,7 +33,7 @@ mmbench_dataloader = dict(
)
# model settings
llava_model = dict(
llava_mmbench_model = dict(
type='llava',
model_path='/path/to/llava',
prompt_constructor=dict(type=LLaVAMMBenchPromptConstructor),
......@@ -41,7 +41,7 @@ llava_model = dict(
) # noqa
# evaluation settings
mmbench_evaluator = [
llava_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/llava-7b-mmbench.xlsx')
]
......@@ -35,8 +35,8 @@ mplug_owl_mmbench_dataloader = dict(
# model settings
mplug_owl_mmbench_model = dict(
type='mplug_owl_7b',
model_path='/mplug-owl-llama-7b-ft/',
type='mplug_owl-7b',
model_path='/mplug-owl-llama-7b-ft',
prompt_constructor=dict(type=MplugOwlMMBenchPromptConstructor),
post_processor=dict(type=MplugOwlMMBenchPostProcessor)
) # noqa
......@@ -46,5 +46,3 @@ mplug_owl_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/mplug_owl-7b-mmagibench-v0.1.0.xlsx')
]
mplug_owl_mmbench_load_from = None
\ No newline at end of file
from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.COCOCaption',
data_root='data/coco',
data_prefix=dict(img_path='images'),
ann_file='annotations/coco_karpathy_val.json',
pipeline=val_pipeline)
openflamingo_coco_caption_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_coco_caption_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='caption',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor)
)
# evaluation settings
openflamingo_coco_caption_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
) # noqa
]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.Flickr30kCaption',
data_root='data/flickr30k',
ann_file='annotations/dataset_flickr30k.json',
data_prefix='images',
split='val',
pipeline=val_pipeline)
openflamingo_flickr30k_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_flickr30k_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='caption',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor)
)
# evaluation settings
openflamingo_flickr30k_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
) # noqa
]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.GQA',
data_root='data/gqa',
data_prefix='images',
ann_file='annotations/testdev_balanced_questions.json',
pipeline=val_pipeline)
openflamingo_gqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_gqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoMMBenchPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.PILToNumpy'),
......@@ -17,7 +19,7 @@ dataset = dict(type='opencompass.MMBenchDataset',
data_file='data/mmbench/mmbench_test_20230712.tsv',
pipeline=val_pipeline)
openflamingo_dataloader = dict(
openflamingo_mmbench_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
......@@ -27,7 +29,7 @@ openflamingo_dataloader = dict(
)
# model settings
openflamingo_model = dict(
openflamingo_mmbench_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
......@@ -59,11 +61,13 @@ openflamingo_model = dict(
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoMMBenchPromptConstructor)
)
# evaluation settings
openflamingo_evaluator = [
openflamingo_mmbench_evaluator = [
dict(
type='opencompass.DumpResults',
save_path= # noqa: E251
......
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
openflamingo_ocrvqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# model settings
openflamingo_ocrvqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
openflamingo_okvqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_okvqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoScienceQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution'
])
]
dataset = dict(type='mmpretrain.ScienceQA',
data_root='./data/scienceqa',
split='val',
split_file='pid_splits.json',
ann_file='problems.json',
image_only=True,
data_prefix=dict(img_path='val'),
pipeline=val_pipeline)
openflamingo_scienceqa_dataloader = dict(
batch_size=1,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_scienceqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoScienceQAPromptConstructor)
)
# evaluation settings
openflamingo_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.TextVQA',
data_root='data/textvqa',
ann_file='annotations/TextVQA_0.5.1_val.json',
pipeline=val_pipeline,
data_prefix='images/train_images',
)
openflamingo_textvqa_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_textvqa_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VizWiz',
data_root='data/vizwiz/',
data_prefix='Images/val',
ann_file='Annotations/val.json',
pipeline=val_pipeline)
openflamingo_vizwiz_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_vizwiz_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/coco',
data_prefix='images/val2014',
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/v2_mscoco_val2014_annotations.json',
pipeline=val_pipeline)
openflamingo_vqav2_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_vqav2_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
)
# evaluation settings
openflamingo_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor, OpenFlamingoVSRPostProcessor
# dataloader settings
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmpretrain.ResizeEdge',
scale=224,
interpolation='bicubic',
backend='pillow'),
dict(type='CenterCrop', crop_size=(224, 224)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.VSR',
data_root='data/vsr/',
data_prefix='images/',
ann_file='annotations/test.json',
pipeline=val_pipeline)
openflamingo_vsr_dataloader = dict(
batch_size=8,
num_workers=4,
dataset=dataset,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=dict(type='default_collate'),
persistent_workers=True,
)
# model settings
openflamingo_vsr_model = dict(
type='openflamingo',
data_preprocessor=dict(
type='mmpretrain.MultiModalDataPreprocessor',
mean=[122.770938, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305],
to_rgb=True,
),
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
name_or_path='decapoda-research/llama-7b-hf'),
vision_encoder=dict(
type='mmpretrain.VisionTransformer',
arch='l',
patch_size=14,
pre_norm=True,
norm_cfg=dict(type='LN', eps=1e-5),
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
final_norm=False,
out_type='raw',
pretrained= # noqa: E251
'/path/to/vision/encoder', # noqa
),
lang_encoder=dict(
base=dict(type='mmpretrain.AutoModelForCausalLM',
name_or_path=
'decapoda-research/llama-7b-hf',
local_files_only=True),
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
vis_hidden_size=1024,
cross_attn_every_n_layers=4,
use_media_placement_augmentation=False),
),
task='vqa',
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor, shot_prompt=('The cat is behind the laptop. Short Answer:yes<|endofchunk|>' # noqa: E501
'The cow is ahead of the person. Short Answer:no<|endofchunk|>')),
post_processor=dict(type=OpenFlamingoVSRPostProcessor)
)
# evaluation settings
openflamingo_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
......@@ -3,12 +3,9 @@
### Prepare the environment
```sh
cd opencompass/multimodal/models/otter
git clone https://github.com/Luodian/Otter.git
pip install otter_ai
```
Then create a new conda environment and prepare the environement according to this [doc](https://github.com/Luodian/Otter)
### Start evaluation
#### Slurm
......
from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.COCOCaption',
data_root='data/coco',
data_prefix=dict(img_path='images'),
ann_file='annotations/coco_karpathy_val.json',
pipeline=val_pipeline)
qwen_coco_caption_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_coco_caption_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'),
is_caption_task=True,
)
# evaluation settings
qwen_coco_caption_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
) # noqa
]
from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
]
dataset = dict(type='mmpretrain.Flickr30kCaption',
data_root='data/flickr30k',
ann_file='annotations/dataset_flickr30k.json',
data_prefix='images',
split='val',
pipeline=val_pipeline)
qwen_flickr30k_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_flickr30k_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'),
is_caption_task=True,
)
# evaluation settings
qwen_flickr30k_evaluator = [
dict(
type='mmpretrain.COCOCaption',
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
) # noqa
]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.GQA',
data_root='data/gqa',
data_prefix='images',
ann_file='annotations/testdev_balanced_questions.json',
pipeline=val_pipeline)
qwen_gqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_gqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(type='mmpretrain.PackInputs',
algorithm_keys=[
'question', 'options', 'category', 'l2-category', 'context',
'index', 'options_dict'
])
]
dataset = dict(type='opencompass.MMBenchDataset',
data_file='/mnt/petrelfs/share_data/yuanyike/cnbench_v010_rolling.tsv',
pipeline=val_pipeline,
sys_prompt='请从以下选项中选择一个正确选项。')
qwen_mmbench_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLMMBenchPromptConstructor)
)
# evaluation settings
qwen_mmbench_evaluator = [
dict(type='opencompass.DumpResults',
save_path='work_dirs/qwenvl-chat-7b-cnbench-v010.xlsx')
]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(type='mmpretrain.OCRVQA',
data_root='data/ocrvqa',
ann_file='annotations/dataset.json',
split='test',
data_prefix='images',
pipeline=val_pipeline)
qwen_ocrvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_ocrvqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
# dataloader settings
val_pipeline = [
dict(type='mmpretrain.LoadImageFromFile'),
dict(type='mmpretrain.ToPIL', to_rgb=True),
dict(type='mmpretrain.torchvision/Resize',
size=(448, 448),
interpolation=3),
dict(type='mmpretrain.torchvision/ToTensor'),
dict(type='mmpretrain.torchvision/Normalize',
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)),
dict(
type='mmpretrain.PackInputs',
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
meta_keys=['question_id', 'image_id'],
)
]
dataset = dict(
type='mmpretrain.COCOVQA',
data_root='data/okvqa',
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
ann_file='annotations/mscoco_val2014_annotations.json',
pipeline=val_pipeline,
data_prefix='images/val2014',
)
qwen_okvqa_dataloader = dict(batch_size=1,
num_workers=4,
dataset=dataset,
collate_fn=dict(type='pseudo_collate'),
sampler=dict(type='DefaultSampler', shuffle=False))
# model settings
qwen_okvqa_model = dict(
type='qwen-vl-chat',
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
)
# evaluation settings
qwen_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment