Commit c289ecc0 authored by xinghao's avatar xinghao
Browse files

Initial commit

parents
Pipeline #3004 canceled with stages
from mmengine.config import read_base
with read_base():
from .XCOPA_ppl_54058d import XCOPA_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import XCOPADataset
XCOPA_reader_cfg = dict(
input_columns=['question', 'premise', 'choice1', 'choice2'],
output_column='label',
test_split='train')
XCOPA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: 'Premise:{premise}。\nQuestion:{question}。\nAnswer: {choice1}.',
1: 'Passage:{premise}。\nQuestion:{question}。\nAnswer: {choice2}.',
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
XCOPA_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
XCOPA_datasets = [
dict(
type=XCOPADataset,
path='xcopa',
reader_cfg=XCOPA_reader_cfg,
infer_cfg=XCOPA_infer_cfg,
eval_cfg=XCOPA_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .XLSum_gen_2bb71c import XLSum_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import RougeEvaluator
from opencompass.datasets import XLSUMDataset, Xsum_postprocess
XLSum_reader_cfg = dict(input_columns=['text'], output_column='summary')
XLSum_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template='Document:{text}\n'
'Based on the previous text, provide a brief single summary:'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
XLSum_eval_cfg = dict(
evaluator=dict(type=RougeEvaluator),
pred_postprocessor=dict(type=Xsum_postprocess),
)
XLSum_datasets = [
dict(
type=XLSUMDataset,
path='csebuetnlp/xlsum',
reader_cfg=XLSum_reader_cfg,
infer_cfg=XLSum_infer_cfg,
eval_cfg=XLSum_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .Xsum_gen_31397e import Xsum_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import RougeEvaluator
from opencompass.datasets import XsumDataset
Xsum_reader_cfg = dict(input_columns=['dialogue'], output_column='summary')
Xsum_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
'Document:{dialogue}\nBased on the previous text, provide a brief single summary:'
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
Xsum_eval_cfg = dict(
evaluator=dict(type=RougeEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type='Xsum'),
)
Xsum_datasets = [
dict(
type=XsumDataset,
abbr='Xsum',
path='opencompass/xsum',
reader_cfg=Xsum_reader_cfg,
infer_cfg=Xsum_infer_cfg,
eval_cfg=Xsum_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import RougeEvaluator
from opencompass.datasets import XsumDataset, Xsum_postprocess
Xsum_reader_cfg = dict(input_columns=['dialogue'], output_column='summary')
Xsum_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template='Document:{dialogue}\n'
'Based on the previous text, provide a brief single summary:'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
Xsum_eval_cfg = dict(
evaluator=dict(type=RougeEvaluator),
pred_postprocessor=dict(type=Xsum_postprocess),
)
Xsum_datasets = [
dict(
type=XsumDataset,
abbr='Xsum',
path='opencompass/xsum',
reader_cfg=Xsum_reader_cfg,
infer_cfg=Xsum_infer_cfg,
eval_cfg=Xsum_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .adv_glue_sst2.adv_glue_sst2_gen import adv_sst2_datasets
from .adv_glue_qqp.adv_glue_qqp_gen import adv_qqp_datasets
from .adv_glue_rte.adv_glue_rte_gen import adv_rte_datasets
from .adv_glue_qnli.adv_glue_qnli_gen import adv_qnli_datasets
from .adv_glue_mnli.adv_glue_mnli_gen import adv_mnli_datasets
from .adv_glue_mnli_mm.adv_glue_mnli_mm_gen import adv_mnli_mm_datasets
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
from mmengine.config import read_base
with read_base():
from .adv_glue_mnli_gen_bd8ef0 import adv_mnli_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import AdvMnliDataset, AccDropEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
adv_mnli_reader_cfg = dict(
input_columns=['premise', 'hypothesis'], output_column='label_option')
adv_mnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes', 'B. maybe' or 'C. no'.
premise: {premise}
hypothesis: {hypothesis}
Answer:"""),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
adv_mnli_eval_cfg = dict(
evaluator=dict(type=AccDropEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='ABC'),
)
adv_mnli_datasets = [
dict(
abbr='adv_mnli',
type=AdvMnliDataset,
path='opencompass/advglue-dev',
reader_cfg=adv_mnli_reader_cfg,
infer_cfg=adv_mnli_infer_cfg,
eval_cfg=adv_mnli_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .adv_glue_mnli_mm_gen_bd8ef0 import adv_mnli_mm_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import AdvMnliMMDataset, AccDropEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
adv_mnli_mm_reader_cfg = dict(
input_columns=['premise', 'hypothesis'], output_column='label_option')
adv_mnli_mm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes', 'B. maybe' or 'C. no'.
premise: {premise}
hypothesis: {hypothesis}
Answer:"""),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
adv_mnli_mm_eval_cfg = dict(
evaluator=dict(type=AccDropEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='ABC'),
)
adv_mnli_mm_datasets = [
dict(
abbr='adv_mnli_mm',
type=AdvMnliMMDataset,
path='opencompass/advglue-dev',
reader_cfg=adv_mnli_mm_reader_cfg,
infer_cfg=adv_mnli_mm_infer_cfg,
eval_cfg=adv_mnli_mm_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .adv_glue_qnli_gen_0b7326 import adv_qnli_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import AdvQnliDataset, AccDropEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
adv_qnli_reader_cfg = dict(
input_columns=['question', 'sentence'], output_column='label_option')
adv_qnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
"""Please identify whether the sentence answers the question. The answer should be exactly 'A. yes' or 'B. no'.
question: {question}
sentence: {sentence}
Answer:"""),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
adv_qnli_eval_cfg = dict(
evaluator=dict(type=AccDropEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
adv_qnli_datasets = [
dict(
abbr='adv_qnli',
type=AdvQnliDataset,
path='opencompass/advglue-dev',
reader_cfg=adv_qnli_reader_cfg,
infer_cfg=adv_qnli_infer_cfg,
eval_cfg=adv_qnli_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .adv_glue_qqp_gen_cdc277 import adv_qqp_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import AdvQqpDataset, AccDropEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
adv_qqp_reader_cfg = dict(
input_columns=['question1', 'question2'], output_column='label_option')
adv_qqp_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
"""Please identify whether Question 1 has the same meaning as Question 2. The answer should be exactly 'A. no' or 'B. yes'.
Question 1: {question1}
Question 2: {question2}
Answer:"""),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
adv_qqp_eval_cfg = dict(
evaluator=dict(type=AccDropEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
adv_qqp_datasets = [
dict(
abbr='adv_qqp',
type=AdvQqpDataset,
path='opencompass/advglue-dev',
reader_cfg=adv_qqp_reader_cfg,
infer_cfg=adv_qqp_infer_cfg,
eval_cfg=adv_qqp_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .adv_glue_rte_gen_8cc547 import adv_rte_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import AdvRteDataset, AccDropEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
adv_rte_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'], output_column='label_option')
adv_rte_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes' or 'B. no'.
hypothesis: {sentence1}
premise: {sentence2}
Answer:"""),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
adv_rte_eval_cfg = dict(
evaluator=dict(type=AccDropEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
adv_rte_datasets = [
dict(
abbr='adv_rte',
type=AdvRteDataset,
path='opencompass/advglue-dev',
reader_cfg=adv_rte_reader_cfg,
infer_cfg=adv_rte_infer_cfg,
eval_cfg=adv_rte_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .adv_glue_sst2_gen_ee8d3b import adv_sst2_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import AdvSst2Dataset, AccDropEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
adv_sst2_reader_cfg = dict(
input_columns=['sentence'], output_column='label_option')
adv_sst2_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
"""For the given sentence, label the sentiment of the sentence as positive or negative. The answer should be exactly 'A. negative' or 'B. positive'.
sentence: {sentence}
Answer:"""),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
adv_sst2_eval_cfg = dict(
evaluator=dict(type=AccDropEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
adv_sst2_datasets = [
dict(
abbr='adv_sst2',
type=AdvSst2Dataset,
path='opencompass/advglue-dev',
reader_cfg=adv_sst2_reader_cfg,
infer_cfg=adv_sst2_infer_cfg,
eval_cfg=adv_sst2_eval_cfg,
)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment