Commit 7d346000 authored by gaotongxiao's avatar gaotongxiao
Browse files

initial commit

parents
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CslDataset
csl_reader_cfg = dict(
input_columns=["abst", "keywords"], output_column='label')
csl_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[dict(role="HUMAN", prompt="摘要:{abst}")]),
1:
dict(
round=[dict(role="HUMAN", prompt="摘要:{abst}\n关键词:{keywords}")
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
csl_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
csl_datasets = [
dict(
type=CslDataset,
path='json',
abbr='csl_dev',
data_files='./data/FewCLUE/csl/dev_few_all.json',
split='train',
reader_cfg=csl_reader_cfg,
infer_cfg=csl_infer_cfg,
eval_cfg=csl_eval_cfg),
dict(
type=CslDataset,
path='json',
abbr='csl_test',
data_files='./data/FewCLUE/csl/test_public.json',
split='train',
reader_cfg=csl_reader_cfg,
infer_cfg=csl_infer_cfg,
eval_cfg=csl_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
eprstmt_reader_cfg = dict(
input_columns=['sentence'], output_column='label', test_split='train')
eprstmt_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'Negative': ' 内容: "{sentence}"。情绪分类:消极。',
'Positive': ' 内容: "{sentence}"。情绪分类:积极。',
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
eprstmt_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
eprstmt_datasets = [
dict(
type=HFDataset,
abbr='eprstmt-dev',
path='json',
data_files='./data/FewCLUE/eprstmt/dev_few_all.json',
split='train',
reader_cfg=eprstmt_reader_cfg,
infer_cfg=eprstmt_infer_cfg,
eval_cfg=eprstmt_eval_cfg),
dict(
type=HFDataset,
abbr='eprstmt-test',
path='json',
data_files='./data/FewCLUE/eprstmt/test_public.json',
split='train',
reader_cfg=eprstmt_reader_cfg,
infer_cfg=eprstmt_infer_cfg,
eval_cfg=eprstmt_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
ocnli_fc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
ocnli_fc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="错")
]),
'entailment':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="对")
]),
'neutral':
dict(round=[
dict(
role="HUMAN", prompt="如果{sentence1}为真,那么{sentence2}也为真吗?"),
dict(role="BOT", prompt="可能")
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
ocnli_fc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
ocnli_fc_datasets = [
dict(
type=HFDataset,
abbr='ocnli_fc-dev',
path='json',
split='train',
data_files='./data/FewCLUE/ocnli/dev_few_all.json',
reader_cfg=ocnli_fc_reader_cfg,
infer_cfg=ocnli_fc_infer_cfg,
eval_cfg=ocnli_fc_eval_cfg),
dict(
type=HFDataset,
abbr='ocnli_fc-test',
path='json',
split='train',
data_files='./data/FewCLUE/ocnli/test_public.json',
reader_cfg=ocnli_fc_reader_cfg,
infer_cfg=ocnli_fc_infer_cfg,
eval_cfg=ocnli_fc_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
ocnli_fc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
ocnli_fc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
'阅读文章:{sentence1}\n根据上文,回答如下问题: {sentence2}?\n答:错',
'entailment': '阅读文章:{sentence1}\n根据上文,回答如下问题: {sentence2}?\n答:对',
'neutral': '如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
ocnli_fc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
ocnli_fc_datasets = [
dict(
type=HFDataset,
abbr='ocnli_fc-dev',
path='json',
split='train',
data_files='./data/FewCLUE/ocnli/dev_few_all.json',
reader_cfg=ocnli_fc_reader_cfg,
infer_cfg=ocnli_fc_infer_cfg,
eval_cfg=ocnli_fc_eval_cfg),
dict(
type=HFDataset,
abbr='ocnli_fc-test',
path='json',
split='train',
data_files='./data/FewCLUE/ocnli/test_public.json',
reader_cfg=ocnli_fc_reader_cfg,
infer_cfg=ocnli_fc_infer_cfg,
eval_cfg=ocnli_fc_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import TNewsDataset_V2
tnews_reader_cfg = dict(
input_columns="sentence",
output_column="label_desc2",
)
tnews_labels = [
"农业新闻", # news_agriculture
"旅游新闻", # news_travel
"游戏新闻", # news_game
"科技类别公司新闻", # news_tech
"体育类别新闻", # news_sports
"初升高教育新闻", # news_edu
"娱乐圈新闻", # news_entertainment
"投资资讯", # news_finance
"军事类别常识", # news_military
"车辆新闻", # news_car
"楼市新闻", # news_house
"环球不含中国类别新闻", # news_world
"书籍文化历史类别新闻", # news_culture
"故事类别新闻", # news_story
"股票市场类别新闻", # news_stock
]
_tnews_options_list_str = "\n".join(f'{chr(ord("A") + i)}. {tnews_labels[i]}'
for i in range(len(tnews_labels)))
_tnews_options_range_str = ",".join(f'“{chr(ord("A") + i)}”'
for i in range(len(tnews_labels)))
tnews_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt=
f"{{sentence}}\n请判断上述内容属于什么新闻?\n{_tnews_options_list_str}\n请从{_tnews_options_range_str}中进行选择。\n答:",
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
tnews_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type="first-capital"),
)
tnews_datasets = [
dict(
abbr="tnews-dev",
type=TNewsDataset_V2,
path="./data/FewCLUE/tnews/dev_few_all.json",
reader_cfg=tnews_reader_cfg,
infer_cfg=tnews_infer_cfg,
eval_cfg=tnews_eval_cfg,
),
dict(
abbr="tnews-test",
type=TNewsDataset_V2,
path="./data/FewCLUE/tnews/test_public.json",
reader_cfg=tnews_reader_cfg,
infer_cfg=tnews_infer_cfg,
eval_cfg=tnews_eval_cfg,
),
]
del _tnews_options_list_str, _tnews_options_range_str
from mmengine.config import read_base
with read_base():
from .FewCLUE_tnews_ppl_784b9e import tnews_datasets # noqa: F401, F403
This diff is collapsed.
from mmengine.config import read_base
with read_base():
from .GaokaoBench_mixed_f2038e import GaokaoBench_datasets # noqa: F401, F403
This diff is collapsed.
from mmengine.config import read_base
with read_base():
from .PJExam_gen_785c37 import PJExam_datasets # noqa: F401, F403
from mmengine.config import read_base
with read_base():
from .SuperGLUE_AX_g_gen_7a5dee import AX_g_datasets # noqa: F401, F403
from mmengine.config import read_base
with read_base():
from .SuperGLUE_BoolQ_gen_8525d1 import BoolQ_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import BoolQDataset
BoolQ_reader_cfg = dict(
input_columns=["question", "passage"],
output_column="answer",
test_split="train")
BoolQ_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(role="HUMAN", prompt="{passage}\nQuestion: {question}"),
dict(role="BOT", prompt="No."),
]),
1:
dict(round=[
dict(role="HUMAN", prompt="{passage}\nQuestion: {question}"),
dict(role="BOT", prompt="Yes."),
]),
},
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer),
)
BoolQ_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
BoolQ_datasets = [
dict(
type=BoolQDataset,
abbr="BoolQ",
path="json",
data_files="./data/SuperGLUE/BoolQ/val.jsonl",
split="train",
reader_cfg=BoolQ_reader_cfg,
infer_cfg=BoolQ_infer_cfg,
eval_cfg=BoolQ_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .SuperGLUE_CB_ppl_32adbb import CB_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
CB_reader_cfg = dict(
input_columns=["premise", "hypothesis"],
output_column="label",
)
CB_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
"contradiction":
dict(round=[
dict(
role="HUMAN",
prompt=
"{premise}\n{hypothesis}\nWhat is the relation between the two sentences?"
),
dict(role="BOT", prompt="Contradiction"),
]),
"entailment":
dict(round=[
dict(
role="HUMAN",
prompt=
"{premise}\n{hypothesis}\nWhat is the relation between the two sentences?"
),
dict(role="BOT", prompt="Entailment"),
]),
"neutral":
dict(round=[
dict(
role="HUMAN",
prompt=
"{premise}\n{hypothesis}\nWhat is the relation between the two sentences?"
),
dict(role="BOT", prompt="Neutral"),
]),
},
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer),
)
CB_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
CB_datasets = [
dict(
type=HFDataset,
abbr="CB",
path="json",
split="train",
data_files="./data/SuperGLUE/CB/val.jsonl",
reader_cfg=CB_reader_cfg,
infer_cfg=CB_infer_cfg,
eval_cfg=CB_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
COPA_reader_cfg = dict(
input_columns=["question", "premise", "choice1", "choice2"],
output_column="label",
test_split="train")
COPA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(
role="HUMAN",
prompt="{premise}\nQuestion: What may be the {question}?\nAnswer:"),
dict(role="BOT", prompt="{choice1}"),
]),
1:
dict(round=[
dict(
role="HUMAN",
prompt="{premise}\nQuestion: What may be the {question}?\nAnswer:"),
dict(role="BOT", prompt="{choice2}"),
]),
},
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer),
)
COPA_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
COPA_datasets = [
dict(
type=HFDataset,
abbr="COPA",
path="json",
data_files="./data/SuperGLUE/COPA/val.jsonl",
split="train",
reader_cfg=COPA_reader_cfg,
infer_cfg=COPA_infer_cfg,
eval_cfg=COPA_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .SuperGLUE_MultiRC_ppl_83a304 import MultiRC_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import AXDataset_V2
RTE_reader_cfg = dict(
input_columns=["hypothesis", "premise"],
output_column="label",
)
RTE_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt=
"{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?\nA. Yes\nB. No\nAnswer:"
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
RTE_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type="first-capital"),
)
RTE_datasets = [
dict(
abbr="RTE",
type=AXDataset_V2, # rte share the same format with ax
path="./data/SuperGLUE/RTE/val.jsonl",
reader_cfg=RTE_reader_cfg,
infer_cfg=RTE_infer_cfg,
eval_cfg=RTE_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
RTE_reader_cfg = dict(
input_columns=["hypothesis", "premise"],
output_column="label",
test_split="train")
RTE_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
"entailment":
dict(round=[
dict(
role="HUMAN",
prompt=
"{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?"
),
dict(role="BOT", prompt="Yes"),
]),
"not_entailment":
dict(round=[
dict(
role="HUMAN",
prompt=
"{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?"
),
dict(role="BOT", prompt="No"),
])
},
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer),
)
RTE_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
RTE_datasets = [
dict(
type=HFDataset,
abbr="RTE",
path="json",
data_files="./data/SuperGLUE/RTE/val.jsonl",
split="train",
reader_cfg=RTE_reader_cfg,
infer_cfg=RTE_infer_cfg,
eval_cfg=RTE_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import ReCoRDDataset
ReCoRD_reader_cfg = dict(
input_columns=['question', 'text'], output_column='answers')
ReCoRD_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=
"Passage:{text}\nResult:{question}\nQuestion: What entity does ____ refer to in the result?Give me the entity name:"),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
ReCoRD_eval_cfg = dict(
evaluator=dict(type=EMEvaluator), pred_postprocessor=dict(type='ReCoRD'))
ReCoRD_datasets = [
dict(
type=ReCoRDDataset,
abbr='ReCoRD',
path='./data/SuperGLUE/ReCoRD/val.jsonl',
reader_cfg=ReCoRD_reader_cfg,
infer_cfg=ReCoRD_infer_cfg,
eval_cfg=ReCoRD_eval_cfg)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment