Commit c94cc943 authored by Leymore's avatar Leymore Committed by gaotong
Browse files

Add release contribution

parent e6b5bdcb
from mmengine.config import read_base
with read_base():
from .ARC_c_gen_3f3039 import ARC_c_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import ARCDataset
ARC_c_reader_cfg = dict(
input_columns=["question", "textA", "textB", "textC", "textD"],
output_column="answerKey")
ARC_c_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role="HUMAN",
prompt=
"Question: {question}\nA. {textA}\nB. {textB}\nC. {textC}\nD. {textD}\nAnswer:"
)
], ),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
ARC_c_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type="first-capital"),
)
ARC_c_datasets = [
dict(
abbr="ARC-c",
type=ARCDataset,
path="./data/ARC/ARC-c/ARC-Challenge-Dev.jsonl",
reader_cfg=ARC_c_reader_cfg,
infer_cfg=ARC_c_infer_cfg,
eval_cfg=ARC_c_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import ARCDataset
ARC_e_reader_cfg = dict(
input_columns=['question', 'textA', 'textB', 'textC', 'textD'],
output_column='answerKey')
ARC_e_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
"A":
dict(
round=[
dict(role="HUMAN", prompt="Question: {question}\nAnswer: "),
dict(role="BOT", prompt="{textA}")
], ),
"B":
dict(
round=[
dict(role="HUMAN", prompt="Question: {question}\nAnswer: "),
dict(role="BOT", prompt="{textB}")
], ),
"C":
dict(
round=[
dict(role="HUMAN", prompt="Question: {question}\nAnswer: "),
dict(role="BOT", prompt="{textC}")
], ),
"D":
dict(
round=[
dict(role="HUMAN", prompt="Question: {question}\nAnswer: "),
dict(role="BOT", prompt="{textD}")
], ),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
ARC_e_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
ARC_e_datasets = [
dict(
type=ARCDataset,
abbr='ARC-e',
path='./data/ARC/ARC-e/ARC-Easy-Dev.jsonl',
reader_cfg=ARC_e_reader_cfg,
infer_cfg=ARC_e_infer_cfg,
eval_cfg=ARC_e_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import CMRCDataset
CMRC_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
CMRC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="文章:{context}\n根据上文,回答如下问题: {question}\n答:"),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
CMRC_eval_cfg = dict(evaluator=dict(type=EMEvaluator), )
CMRC_datasets = [
dict(
type=CMRCDataset,
abbr='CMRC_dev',
path='./data/CLUE/CMRC/dev.json',
reader_cfg=CMRC_reader_cfg,
infer_cfg=CMRC_infer_cfg,
eval_cfg=CMRC_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
afqmc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(
role="HUMAN",
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?"
),
dict(role="BOT", prompt="不完全一致")
]),
1:
dict(round=[
dict(
role="HUMAN",
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?"
),
dict(role="BOT", prompt="完全一致")
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
afqmc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
afqmc_datasets = [
dict(
type=HFDataset,
abbr='afqmc-dev',
path='json',
data_files='./data/CLUE/AFQMC/dev.json',
split='train',
reader_cfg=afqmc_reader_cfg,
infer_cfg=afqmc_infer_cfg,
eval_cfg=afqmc_eval_cfg),
]
from mmengine.config import read_base
with read_base():
from .CLUE_cmnli_ppl_1c652a import cmnli_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
cmnli_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
cmnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="错")
]),
'entailment':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="对")
]),
'neutral':
dict(round=[
dict(
role="HUMAN", prompt="如果{sentence1}为真,那么{sentence2}也为真吗?"),
dict(role="BOT", prompt="可能")
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
cmnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cmnli_datasets = [
dict(
type=HFDataset,
abbr='cmnli',
path='json',
split='train',
data_files='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
infer_cfg=cmnli_infer_cfg,
eval_cfg=cmnli_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
cmnli_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
cmnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
'阅读文章:{sentence1}\n根据上文,回答如下问题: {sentence2}?\n答:错',
'entailment': '阅读文章:{sentence1}\n根据上文,回答如下问题: {sentence2}?\n答:对',
'neutral': '如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
cmnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cmnli_datasets = [
dict(
type=HFDataset,
abbr='cmnli',
path='json',
split='train',
data_files='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
infer_cfg=cmnli_infer_cfg,
eval_cfg=cmnli_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import cmnliDataset_V2
ocnli_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
)
# TODO: two prompt templates for ocnli
ocnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt=
"阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}\nA. 对\nB. 错\nC. 可能\n请从“A”,“B”,“C”中进行选择。\n答:"
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
ocnli_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type="first-capital"),
)
ocnli_datasets = [
dict(
abbr="ocnli",
type=cmnliDataset_V2, # ocnli share the same format with cmnli
path="./data/CLUE/OCNLI/dev.json",
reader_cfg=ocnli_reader_cfg,
infer_cfg=ocnli_infer_cfg,
eval_cfg=ocnli_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
bustm_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: "{sentence1}。\n{sentence2}。\n两句话说的毫不相关。",
1: "{sentence1}。\n{sentence2}。\n两句话说的一个意思。"
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
bustm_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
bustm_datasets = [
dict(
type=HFDataset,
abbr='bustm-dev',
path='json',
data_files='./data/FewCLUE/bustm/dev_few_all.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg),
dict(
type=HFDataset,
abbr='bustm-test',
path='json',
data_files='./data/FewCLUE/bustm/test_public.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_chid_gen_686c63 import chid_datasets # noqa: F401, F403
from mmengine.config import read_base
with read_base():
from .FewCLUE_cluewsc_ppl_2a9e61 import cluewsc_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CslDataset_V2
csl_reader_cfg = dict(
input_columns=["abst", "keywords"],
output_column="label",
)
csl_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt=
"摘要:{abst}\n关键词:{keywords}\n上述关键词出现在学术期刊中是否恰当?\nA. 否\nB. 是\n请从”A“,”B“中进行选择。\n答:"
)
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
csl_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type="first-capital"),
)
csl_datasets = [
dict(
abbr="csl_dev",
type=CslDataset_V2,
path="./data/FewCLUE/csl/dev_few_all.json",
reader_cfg=csl_reader_cfg,
infer_cfg=csl_infer_cfg,
eval_cfg=csl_eval_cfg,
),
dict(
abbr="csl_test",
type=CslDataset_V2,
path="./data/FewCLUE/csl/test_public.json",
reader_cfg=csl_reader_cfg,
infer_cfg=csl_infer_cfg,
eval_cfg=csl_eval_cfg,
),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import eprstmtDataset_V2
eprstmt_reader_cfg = dict(
input_columns=["sentence"], output_column="label", test_split="train")
eprstmt_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt=
'内容: "{sentence}"。请对上述内容进行情绪分类。\nA. 积极\nB. 消极\n请从”A“,”B“中进行选择。\n答:'
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
eprstmt_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type="first-capital"),
)
eprstmt_datasets = [
dict(
abbr="eprstmt-dev",
type=eprstmtDataset_V2,
path="./data/FewCLUE/eprstmt/dev_few_all.json",
reader_cfg=eprstmt_reader_cfg,
infer_cfg=eprstmt_infer_cfg,
eval_cfg=eprstmt_eval_cfg,
),
dict(
abbr="eprstmt-test",
type=eprstmtDataset_V2,
path="./data/FewCLUE/eprstmt/test_public.json",
reader_cfg=eprstmt_reader_cfg,
infer_cfg=eprstmt_infer_cfg,
eval_cfg=eprstmt_eval_cfg,
),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
eprstmt_reader_cfg = dict(
input_columns=['sentence'], output_column='label', test_split='train')
eprstmt_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'Negative':
dict(round=[
dict(role='HUMAN', prompt='内容: "{sentence}"。情绪分类:'),
dict(role='BOT', prompt='消极。')
]),
'Positive':
dict(round=[
dict(role='HUMAN', prompt='内容: "{sentence}"。情绪分类:'),
dict(role='BOT', prompt='积极。')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
eprstmt_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
eprstmt_datasets = [
dict(
type=HFDataset,
abbr='eprstmt-dev',
path='json',
data_files='./data/FewCLUE/eprstmt/dev_few_all.json',
split='train',
reader_cfg=eprstmt_reader_cfg,
infer_cfg=eprstmt_infer_cfg,
eval_cfg=eprstmt_eval_cfg),
dict(
type=HFDataset,
abbr='eprstmt-test',
path='json',
data_files='./data/FewCLUE/eprstmt/test_public.json',
split='train',
reader_cfg=eprstmt_reader_cfg,
infer_cfg=eprstmt_infer_cfg,
eval_cfg=eprstmt_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_tnews_gen_8d59ba import tnews_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import PJExamDataset, PJExamEvaluator
PJExam_datasets = []
for _name in [
'gk-2022-v1', 'gk-2022-v1-math', 'gk-2023-v1', 'gk-2023-v1-math',
'gk-2023-v2', 'gk-2023-v2-math', 'zk-2022-v1'
]:
_hint = "请你做一道</major>选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】A<eoa>\n完整的题目回答的格式如下:\n【解析】...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答。\n题目如下:\n"
_reader_cfg = {
"input_columns": ['question'],
"output_column": 'std_ans',
},
_infer_cfg = {
"ice_template": {
"type": PromptTemplate,
"template": {
"round": [{
"role": "HUMAN",
"prompt": _hint + "{question}",
}]
},
"ice_token": "</E>"
},
"retriever": {
"type": ZeroRetriever
},
"inferencer": {
"type": GenInferencer,
"max_out_len": 1024,
}
}
_eval_cfg = {
"evaluator": {
"type": PJExamEvaluator
},
"pred_role": "BOT",
"ds_column": "eval_infos"
}
_dataset = {
"type": PJExamDataset,
"abbr": "PJExamDataset-" + _name,
"path": './data/PJExam',
"name": _name,
"reader_cfg": _reader_cfg,
"infer_cfg": _infer_cfg,
"eval_cfg": _eval_cfg,
}
PJExam_datasets.append(_dataset)
del _name, _hint, _reader_cfg, _infer_cfg, _eval_cfg, _dataset
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
AX_b_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
AX_b_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'entailment': '{sentence1}?entailment, {sentence2}',
'not_entailment': '{sentence1}?not_entailment, {sentence2}'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
AX_b_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
AX_b_datasets = [
dict(
type=HFDataset,
abbr='AX_b',
path='json',
data_files='./data/SuperGLUE/AX-b/AX-b.jsonl',
split='train',
reader_cfg=AX_b_reader_cfg,
infer_cfg=AX_b_infer_cfg,
eval_cfg=AX_b_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .SuperGLUE_AX_g_ppl_8d9bf9 import AX_g_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
AX_g_reader_cfg = dict(
input_columns=['hypothesis', 'premise'],
output_column='label',
test_split='train')
AX_g_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'entailment': '{premise}?entailment, {hypothesis}',
'not_entailment': '{premise}?not_entailment, {hypothesis}'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
AX_g_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
AX_g_datasets = [
dict(
type=HFDataset,
abbr='AX_g',
path='json',
data_files='./data/SuperGLUE/AX-g/AX-g.jsonl',
split='train',
reader_cfg=AX_g_reader_cfg,
infer_cfg=AX_g_infer_cfg,
eval_cfg=AX_g_eval_cfg)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment