"library/vscode:/vscode.git/clone" did not exist on "81ffce2d5d7c0c46358e2705904b70ae5484f5d5"
Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -6,18 +6,18 @@ from opencompass.datasets import AFQMCDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
afqmc_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
test_split="train")
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?\nA. 不完全一致\nB. 完全一致\n请从“A”,“B”中进行选择。\n答:",
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?\nA. 不完全一致\nB. 完全一致\n请从“A”,“B”中进行选择。\n答:',
),
]),
),
......@@ -27,15 +27,15 @@ afqmc_infer_cfg = dict(
afqmc_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
afqmc_datasets = [
dict(
abbr="afqmc-dev",
abbr='afqmc-dev',
type=AFQMCDataset_V2,
path="./data/CLUE/AFQMC/dev.json",
path='./data/CLUE/AFQMC/dev.json',
reader_cfg=afqmc_reader_cfg,
infer_cfg=afqmc_infer_cfg,
eval_cfg=afqmc_eval_cfg,
......
......@@ -16,14 +16,14 @@ afqmc_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN", prompt="“{sentence1}”与“{sentence2}”不同还是相似?"),
dict(role="BOT", prompt="不同。")
role='HUMAN', prompt='“{sentence1}”与“{sentence2}”不同还是相似?'),
dict(role='BOT', prompt='不同。')
]),
1:
dict(round=[
dict(
role="HUMAN", prompt="“{sentence1}”与“{sentence2}”不同还是相似?"),
dict(role="BOT", prompt="相似")
role='HUMAN', prompt='“{sentence1}”与“{sentence2}”不同还是相似?'),
dict(role='BOT', prompt='相似')
]),
}),
retriever=dict(type=ZeroRetriever),
......
......@@ -16,20 +16,20 @@ afqmc_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?"
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?'
),
dict(role="BOT", prompt="不完全一致")
dict(role='BOT', prompt='不完全一致')
]),
1:
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?"
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?'
),
dict(role="BOT", prompt="完全一致")
dict(role='BOT', prompt='完全一致')
]),
}),
retriever=dict(type=ZeroRetriever),
......
......@@ -13,8 +13,8 @@ afqmc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: "{sentence1},{sentence2}不同。",
1: "{sentence1},{sentence2}相似。"
0: '{sentence1},{sentence2}不同。',
1: '{sentence1},{sentence2}相似。'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
......
......@@ -6,18 +6,18 @@ from opencompass.datasets import cmnliDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
cmnli_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
test_split="train")
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
cmnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?\nA. 蕴含\nB. 矛盾\nC. 无关\n请从“A”,“B”,“C”中进行选择。\n答:"
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?\nA. 蕴含\nB. 矛盾\nC. 无关\n请从“A”,“B”,“C”中进行选择。\n答:'
),
]),
),
......@@ -27,15 +27,15 @@ cmnli_infer_cfg = dict(
cmnli_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
cmnli_datasets = [
dict(
abbr="cmnli",
abbr='cmnli',
type=cmnliDataset_V2,
path="./data/CLUE/cmnli/cmnli_public/dev.json",
path='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
infer_cfg=cmnli_infer_cfg,
eval_cfg=cmnli_eval_cfg,
......
......@@ -6,18 +6,18 @@ from opencompass.datasets import cmnliDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
cmnli_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
test_split="train")
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
cmnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}\nA. 对\nB. 错\nC. 可能\n请从“A”,“B”,“C”中进行选择。\n答:"
'阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}\nA. 对\nB. 错\nC. 可能\n请从“A”,“B”,“C”中进行选择。\n答:'
),
]),
),
......@@ -27,15 +27,15 @@ cmnli_infer_cfg = dict(
cmnli_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
cmnli_datasets = [
dict(
abbr="cmnli",
abbr='cmnli',
type=cmnliDataset_V2,
path="./data/CLUE/cmnli/cmnli_public/dev.json",
path='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
infer_cfg=cmnli_infer_cfg,
eval_cfg=cmnli_eval_cfg,
......
......@@ -25,7 +25,7 @@ cmnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cmnli_datasets = [
dict(
abbr="cmnli",
abbr='cmnli',
type=cmnliDataset,
path='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
......
......@@ -16,22 +16,22 @@ cmnli_infer_cfg = dict(
'contradiction':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="错")
role='HUMAN',
prompt='阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?'),
dict(role='BOT', prompt='错')
]),
'entailment':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="对")
role='HUMAN',
prompt='阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?'),
dict(role='BOT', prompt='对')
]),
'neutral':
dict(round=[
dict(
role="HUMAN", prompt="如果{sentence1}为真,那么{sentence2}也为真吗?"),
dict(role="BOT", prompt="可能")
role='HUMAN', prompt='如果{sentence1}为真,那么{sentence2}也为真吗?'),
dict(role='BOT', prompt='可能')
]),
}),
retriever=dict(type=ZeroRetriever),
......@@ -41,7 +41,7 @@ cmnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cmnli_datasets = [
dict(
abbr="cmnli",
abbr='cmnli',
type=cmnliDataset,
path='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
......
......@@ -16,26 +16,26 @@ cmnli_infer_cfg = dict(
'contradiction':
dict(round=[
dict(
role="HUMAN",
prompt="语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?"
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role="BOT", prompt="矛盾")
dict(role='BOT', prompt='矛盾')
]),
'entailment':
dict(round=[
dict(
role="HUMAN",
prompt="语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?"
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role="BOT", prompt="蕴含")
dict(role='BOT', prompt='蕴含')
]),
'neutral':
dict(round=[
dict(
role="HUMAN",
prompt="语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?"
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role="BOT", prompt="无关")
dict(role='BOT', prompt='无关')
]),
}),
retriever=dict(type=ZeroRetriever),
......@@ -45,7 +45,7 @@ cmnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cmnli_datasets = [
dict(
abbr="cmnli",
abbr='cmnli',
type=cmnliDataset,
path='./data/CLUE/cmnli/cmnli_public/dev.json',
reader_cfg=cmnli_reader_cfg,
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import cmnliDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
ocnli_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
input_columns=['sentence1', 'sentence2'],
output_column='label',
)
# TODO: two prompt templates for ocnli
......@@ -16,9 +16,9 @@ ocnli_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}\nA. 对\nB. 错\nC. 可能\n请从“A”,“B”,“C”中进行选择。\n答:"
'阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}\nA. 对\nB. 错\nC. 可能\n请从“A”,“B”,“C”中进行选择。\n答:'
),
]),
),
......@@ -28,15 +28,15 @@ ocnli_infer_cfg = dict(
ocnli_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
ocnli_datasets = [
dict(
abbr="ocnli",
abbr='ocnli',
type=cmnliDataset_V2, # ocnli share the same format with cmnli
path="./data/CLUE/OCNLI/dev.json",
path='./data/CLUE/OCNLI/dev.json',
reader_cfg=ocnli_reader_cfg,
infer_cfg=ocnli_infer_cfg,
eval_cfg=ocnli_eval_cfg,
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import cmnliDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
ocnli_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
input_columns=['sentence1', 'sentence2'],
output_column='label',
)
# TODO: two prompt templates for ocnli
......@@ -16,9 +16,9 @@ ocnli_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?\nA. 蕴含\n B. 矛盾\n C. 无关\n请从“A”,“B”,“C”中进行选择。\n答:"
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?\nA. 蕴含\n B. 矛盾\n C. 无关\n请从“A”,“B”,“C”中进行选择。\n答:'
),
]),
),
......@@ -28,15 +28,15 @@ ocnli_infer_cfg = dict(
ocnli_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
ocnli_datasets = [
dict(
abbr="ocnli",
abbr='ocnli',
type=cmnliDataset_V2, # ocnli share the same format with cmnli
path="./data/CLUE/OCNLI/dev.json",
path='./data/CLUE/OCNLI/dev.json',
reader_cfg=ocnli_reader_cfg,
infer_cfg=ocnli_infer_cfg,
eval_cfg=ocnli_eval_cfg,
......
......@@ -15,22 +15,22 @@ ocnli_infer_cfg = dict(
'contradiction':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="错")
role='HUMAN',
prompt='阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?'),
dict(role='BOT', prompt='错')
]),
'entailment':
dict(round=[
dict(
role="HUMAN",
prompt="阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?"),
dict(role="BOT", prompt="对")
role='HUMAN',
prompt='阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?'),
dict(role='BOT', prompt='对')
]),
'neutral':
dict(round=[
dict(
role="HUMAN", prompt="如果{sentence1}为真,那么{sentence2}也为真吗?"),
dict(role="BOT", prompt="可能")
role='HUMAN', prompt='如果{sentence1}为真,那么{sentence2}也为真吗?'),
dict(role='BOT', prompt='可能')
]),
}),
retriever=dict(type=ZeroRetriever),
......
......@@ -15,26 +15,26 @@ ocnli_infer_cfg = dict(
'contradiction':
dict(round=[
dict(
role="HUMAN",
prompt="语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?"
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role="BOT", prompt="矛盾")
dict(role='BOT', prompt='矛盾')
]),
'entailment':
dict(round=[
dict(
role="HUMAN",
prompt="语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?"
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role="BOT", prompt="蕴含")
dict(role='BOT', prompt='蕴含')
]),
'neutral':
dict(round=[
dict(
role="HUMAN",
prompt="语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?"
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role="BOT", prompt="无关")
dict(role='BOT', prompt='无关')
]),
}),
retriever=dict(type=ZeroRetriever),
......
......@@ -7,8 +7,8 @@ from opencompass.utils.text_postprocessors import first_capital_postprocess
chembench_reader_cfg = dict(
input_columns=["input", "A", "B", "C", "D"],
output_column="target",
input_columns=['input', 'A', 'B', 'C', 'D'],
output_column='target',
train_split='dev')
chembench_all_sets = [
......@@ -34,26 +34,26 @@ for _name in chembench_all_sets:
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
f"{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: "
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
dict(role="BOT", prompt="{target}\n")
dict(role='BOT', prompt='{target}\n')
]),
),
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
f"{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: "
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
],
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
inferencer=dict(type=GenInferencer),
......@@ -65,9 +65,9 @@ for _name in chembench_all_sets:
chembench_datasets.append(
dict(
abbr=f"ChemBench_{_name}",
abbr=f'ChemBench_{_name}',
type=ChemBenchDataset,
path="./data/ChemBench/",
path='./data/ChemBench/',
name=_name,
reader_cfg=chembench_reader_cfg,
infer_cfg=chembench_infer_cfg,
......
......@@ -6,18 +6,18 @@ from opencompass.datasets import AFQMCDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
bustm_reader_cfg = dict(
input_columns=["sentence1", "sentence2"],
output_column="label",
test_split="train")
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?\nA. 无关\nB. 相关\n请从“A”,“B”中进行选择。\n答:",
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?\nA. 无关\nB. 相关\n请从“A”,“B”中进行选择。\n答:',
),
]),
),
......@@ -27,23 +27,23 @@ bustm_infer_cfg = dict(
bustm_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
bustm_datasets = [
dict(
abbr="bustm-dev",
abbr='bustm-dev',
type=AFQMCDataset_V2, # bustm share the same format with AFQMC
path="./data/FewCLUE/bustm/dev_few_all.json",
path='./data/FewCLUE/bustm/dev_few_all.json',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg,
),
dict(
abbr="bustm-test",
abbr='bustm-test',
type=AFQMCDataset_V2, # bustm share the same format with AFQMC
path="./data/FewCLUE/bustm/test_public.json",
path='./data/FewCLUE/bustm/test_public.json',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg,
......
......@@ -19,11 +19,11 @@ bustm_infer_cfg = dict(
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="请判断以下两句话说的是否是一个意思:")
prompt='请判断以下两句话说的是否是一个意思:')
],
round=[
dict(role="HUMAN", prompt="{sentence1},{sentence2}"),
dict(role="BOT", prompt="两句话说的毫不相关。")
dict(role='HUMAN', prompt='{sentence1},{sentence2}'),
dict(role='BOT', prompt='两句话说的毫不相关。')
]),
1:
dict(
......@@ -31,11 +31,11 @@ bustm_infer_cfg = dict(
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="请判断以下两句话说的是否是一个意思:")
prompt='请判断以下两句话说的是否是一个意思:')
],
round=[
dict(role="HUMAN", prompt="{sentence1},{sentence2}"),
dict(role="BOT", prompt="两句话说是的一个意思。")
dict(role='HUMAN', prompt='{sentence1},{sentence2}'),
dict(role='BOT', prompt='两句话说是的一个意思。')
]),
}),
retriever=dict(type=ZeroRetriever),
......
......@@ -13,8 +13,8 @@ bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: "{sentence1}。\n{sentence2}。\n两句话说的毫不相关。",
1: "{sentence1}。\n{sentence2}。\n两句话说的一个意思。"
0: '{sentence1}。\n{sentence2}。\n两句话说的毫不相关。',
1: '{sentence1}。\n{sentence2}。\n两句话说的一个意思。'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
......
......@@ -16,20 +16,20 @@ bustm_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?"
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?'
),
dict(role="BOT", prompt="两句话说的毫不相关。")
dict(role='BOT', prompt='两句话说的毫不相关。')
]),
1:
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?"
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?'
),
dict(role="BOT", prompt="两句话说是的一个意思。")
dict(role='BOT', prompt='两句话说是的一个意思。')
]),
}),
retriever=dict(type=ZeroRetriever),
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import CHIDDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
chid_reader_cfg = dict(
input_columns=["content","A","B","C","D","E","F","G"],
output_column="answer",
input_columns=['content','A','B','C','D','E','F','G'],
output_column='answer',
)
chid_infer_cfg = dict(
......@@ -16,9 +16,9 @@ chid_infer_cfg = dict(
template=dict(
round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{content}\n请选择______处所填的词\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nE. {E}\nF. {F}\nG. {G}\n请从”A“,”B“,”C“,”D“,”E“,”F“,”G“中进行选择。答:",
'{content}\n请选择______处所填的词\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nE. {E}\nF. {F}\nG. {G}\n请从”A“,”B“,”C“,”D“,”E“,”F“,”G“中进行选择。答:',
),
])),
retriever=dict(type=ZeroRetriever),
......@@ -27,23 +27,23 @@ chid_infer_cfg = dict(
chid_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
chid_datasets = [
dict(
abbr="chid-dev",
abbr='chid-dev',
type=CHIDDataset_V2,
path="./data/FewCLUE/chid/dev_few_all.json",
path='./data/FewCLUE/chid/dev_few_all.json',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg,
),
dict(
abbr="chid-test",
abbr='chid-test',
type=CHIDDataset_V2,
path="./data/FewCLUE/chid/test_public.json",
path='./data/FewCLUE/chid/test_public.json',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg,
......
......@@ -13,8 +13,8 @@ chid_infer_cfg = dict(
template={
i: dict(
round=[
dict(role="HUMAN", prompt=f"以下句子是否通顺?\n{{content{i}}}"),
dict(role="BOT", prompt="这个句子是通顺的。"),
dict(role='HUMAN', prompt=f'以下句子是否通顺?\n{{content{i}}}'),
dict(role='BOT', prompt='这个句子是通顺的。'),
], )
for i in range(7)
}),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment