Commit c289ecc0 authored by xinghao's avatar xinghao
Browse files

Initial commit

parents
Pipeline #3004 canceled with stages
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import C3Dataset
C3_reader_cfg = dict(
input_columns=[
'question', 'content', 'choice0', 'choice1', 'choice2', 'choice3',
'choices'
],
output_column='label')
C3_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: '文章:{content}\n问题:{question}\n答案:{choice0}',
1: '文章:{content}\n问题:{question}\n答案:{choice1}',
2: '文章:{content}\n问题:{question}\n答案:{choice2}',
3: '文章:{content}\n问题:{question}\n答案:{choice3}'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
C3_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
C3_datasets = [
dict(
type=C3Dataset,
abbr='C3',
path='./data/CLUE/C3/dev_0.json',
reader_cfg=C3_reader_cfg,
infer_cfg=C3_infer_cfg,
eval_cfg=C3_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import C3Dataset
C3_reader_cfg = dict(
input_columns=[
'question', 'content', 'choice0', 'choice1', 'choice2', 'choice3',
'choices'
],
output_column='label')
C3_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
i: dict(round=[
dict(role='HUMAN', prompt='文章:{content}\n问题:{question}'),
dict(role='BOT', prompt=f'答案:{{choice{i}}}')
])
for i in range(4)
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
C3_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
C3_datasets = [
dict(
type=C3Dataset,
abbr='C3',
path='./data/CLUE/C3/dev_0.json',
reader_cfg=C3_reader_cfg,
infer_cfg=C3_infer_cfg,
eval_cfg=C3_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .CLUE_CMRC_gen_1bd3c8 import CMRC_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import CMRCDataset, cmrc_postprocess
CMRC_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
CMRC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='根据文章回答问题。你的答案应该尽可能简练,请以 ‘答案是’ 开头的句式作答。\n文章:{context}\n问:{question}\n答:'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
CMRC_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=cmrc_postprocess),
)
CMRC_datasets = [
dict(
type=CMRCDataset,
abbr='CMRC_dev',
path='opencompass/cmrc_dev',
reader_cfg=CMRC_reader_cfg,
infer_cfg=CMRC_infer_cfg,
eval_cfg=CMRC_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import CMRCDataset
CMRC_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
CMRC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(role='HUMAN', prompt='文章:{context}\n根据上文,回答如下问题:{question}'),
dict(role='BOT', prompt='答:'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
CMRC_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
)
CMRC_datasets = [
dict(
type=CMRCDataset,
abbr='CMRC_dev',
path='opencompass/cmrc_dev',
reader_cfg=CMRC_reader_cfg,
infer_cfg=CMRC_infer_cfg,
eval_cfg=CMRC_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import CMRCDataset
CMRC_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
CMRC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template='文章:{context}\n根据上文,回答如下问题: {question}\n答:'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
CMRC_eval_cfg = dict(evaluator=dict(type=EMEvaluator), )
CMRC_datasets = [
dict(
type=CMRCDataset,
abbr='CMRC_dev',
path='opencompass/cmrc_dev',
reader_cfg=CMRC_reader_cfg,
infer_cfg=CMRC_infer_cfg,
eval_cfg=CMRC_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import CMRCDataset
CMRC_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
CMRC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='文章:{context}\n根据上文,回答如下问题:\n{question}\n答:'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
CMRC_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
)
CMRC_datasets = [
dict(
type=CMRCDataset,
abbr='CMRC_dev',
path='opencompass/cmrc_dev',
reader_cfg=CMRC_reader_cfg,
infer_cfg=CMRC_infer_cfg,
eval_cfg=CMRC_eval_cfg),
]
from mmengine.config import read_base
with read_base():
from .CLUE_DRCD_gen_1bd3c8 import DRCD_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import DRCDDataset, drcd_postprocess
DRCD_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
DRCD_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='根据文章回答问题。你的答案应该尽可能简练,请以 ‘答案是’ 开头的句式作答。\n文章:{context}\n问:{question}\n答:'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
DRCD_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=drcd_postprocess),
)
DRCD_datasets = [
dict(
type=DRCDDataset,
abbr='DRCD_dev',
path='opencompass/drcd_dev',
reader_cfg=DRCD_reader_cfg,
infer_cfg=DRCD_infer_cfg,
eval_cfg=DRCD_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import DRCDDataset
DRCD_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
DRCD_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(role='HUMAN', prompt='文章:{context}\n根据上文,回答如下问题:{question}'),
dict(role='BOT', prompt='答:'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
DRCD_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
)
DRCD_datasets = [
dict(
type=DRCDDataset,
abbr='DRCD_dev',
path='opencompass/drcd_dev',
reader_cfg=DRCD_reader_cfg,
infer_cfg=DRCD_infer_cfg,
eval_cfg=DRCD_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import DRCDDataset
DRCD_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
DRCD_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template='文章:{context}\n根据上文,回答如下问题: {question}\n答:'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
DRCD_eval_cfg = dict(evaluator=dict(type=EMEvaluator), )
DRCD_datasets = [
dict(
type=DRCDDataset,
abbr='DRCD_dev',
path='opencompass/drcd_dev',
reader_cfg=DRCD_reader_cfg,
infer_cfg=DRCD_infer_cfg,
eval_cfg=DRCD_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import DRCDDataset
DRCD_reader_cfg = dict(
input_columns=['question', 'context'], output_column='answers')
DRCD_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='文章:{context}\n根据上文,回答如下问题:\n{question}\n答:'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
DRCD_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
)
DRCD_datasets = [
dict(
type=DRCDDataset,
abbr='DRCD_dev',
path='opencompass/drcd_dev',
reader_cfg=DRCD_reader_cfg,
infer_cfg=DRCD_infer_cfg,
eval_cfg=DRCD_eval_cfg),
]
from mmengine.config import read_base
with read_base():
from .CLUE_afqmc_gen_901306 import afqmc_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import AFQMCDatasetV2
from opencompass.utils.text_postprocessors import first_capital_postprocess
afqmc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?\nA. 不完全一致\nB. 完全一致\n请从“A”,“B”中进行选择。\n答:',
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
afqmc_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
afqmc_datasets = [
dict(
abbr='afqmc-dev',
type=AFQMCDatasetV2,
path='opencompass/afqmc-dev',
reader_cfg=afqmc_reader_cfg,
infer_cfg=afqmc_infer_cfg,
eval_cfg=afqmc_eval_cfg,
),
]
from mmengine.config import read_base
with read_base():
from .CLUE_afqmc_ppl_6507d7 import afqmc_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
afqmc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(
role='HUMAN', prompt='“{sentence1}”与“{sentence2}”不同还是相似?'),
dict(role='BOT', prompt='不同。')
]),
1:
dict(round=[
dict(
role='HUMAN', prompt='“{sentence1}”与“{sentence2}”不同还是相似?'),
dict(role='BOT', prompt='相似')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
afqmc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
afqmc_datasets = [
dict(
type=HFDataset,
abbr='afqmc-dev',
path='json',
data_files='./data/CLUE/AFQMC/dev.json',
split='train',
reader_cfg=afqmc_reader_cfg,
infer_cfg=afqmc_infer_cfg,
eval_cfg=afqmc_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
afqmc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?'
),
dict(role='BOT', prompt='不完全一致')
]),
1:
dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?'
),
dict(role='BOT', prompt='完全一致')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
afqmc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
afqmc_datasets = [
dict(
type=HFDataset,
abbr='afqmc-dev',
path='json',
data_files='./data/CLUE/AFQMC/dev.json',
split='train',
reader_cfg=afqmc_reader_cfg,
infer_cfg=afqmc_infer_cfg,
eval_cfg=afqmc_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
afqmc_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: '{sentence1},{sentence2}不同。',
1: '{sentence1},{sentence2}相似。'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
afqmc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
afqmc_datasets = [
dict(
type=HFDataset,
abbr='afqmc-dev',
path='json',
data_files='./data/CLUE/AFQMC/dev.json',
split='train',
reader_cfg=afqmc_reader_cfg,
infer_cfg=afqmc_infer_cfg,
eval_cfg=afqmc_eval_cfg),
]
from mmengine.config import read_base
with read_base():
from .CLUE_cmnli_gen_1abf97 import cmnli_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CMNLIDatasetV2
from opencompass.utils.text_postprocessors import first_capital_postprocess
cmnli_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
cmnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?\nA. 蕴含\nB. 矛盾\nC. 无关\n请从“A”,“B”,“C”中进行选择。\n答:'
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
cmnli_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
cmnli_datasets = [
dict(
abbr='cmnli',
type=CMNLIDatasetV2,
path='opencompass/cmnli-dev',
reader_cfg=cmnli_reader_cfg,
infer_cfg=cmnli_infer_cfg,
eval_cfg=cmnli_eval_cfg,
)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment