Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -3,22 +3,22 @@ from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import TACODataset, TACOEvaluator
TACO_reader_cfg = dict(input_columns=["question", "starter"], output_column="problem_id", train_split='test')
TACO_reader_cfg = dict(input_columns=['question', 'starter'], output_column='problem_id', train_split='test')
TACO_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="Please write a python program to address the following QUESTION. Your ANSWER should be in a code block format like this: ```python # Write your code here ```. \nQUESTION:\n{question} {starter}\nANSWER:\n"),
template='Please write a python program to address the following QUESTION. Your ANSWER should be in a code block format like this: ```python # Write your code here ```. \nQUESTION:\n{question} {starter}\nANSWER:\n'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
TACO_eval_cfg = dict(evaluator=dict(type=TACOEvaluator), pred_role="BOT")
TACO_eval_cfg = dict(evaluator=dict(type=TACOEvaluator), pred_role='BOT')
TACO_datasets = [
dict(
type=TACODataset,
abbr="TACO",
abbr='TACO',
path='./data/BAAI-TACO',
num_repeats = 1,
reader_cfg=TACO_reader_cfg,
......
......@@ -5,23 +5,23 @@ from opencompass.openicl.icl_evaluator import TEvalEvaluator
from opencompass.datasets import teval_postprocess, TEvalDataset
teval_subject_mapping = {
"instruct": ["instruct_v1"],
"plan": ["plan_json_v1", "plan_str_v1"],
"review": ["review_str_v1"],
"reason_retrieve_understand": ["reason_retrieve_understand_json_v1"],
"reason": ["reason_str_v1"],
"retrieve": ["retrieve_str_v1"],
"understand": ["understand_str_v1"],
'instruct': ['instruct_v1'],
'plan': ['plan_json_v1', 'plan_str_v1'],
'review': ['review_str_v1'],
'reason_retrieve_understand': ['reason_retrieve_understand_json_v1'],
'reason': ['reason_str_v1'],
'retrieve': ['retrieve_str_v1'],
'understand': ['understand_str_v1'],
}
teval_reader_cfg = dict(input_columns=["prompt"], output_column="ground_truth")
teval_reader_cfg = dict(input_columns=['prompt'], output_column='ground_truth')
teval_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role="HUMAN", prompt="{prompt}"),
dict(role='HUMAN', prompt='{prompt}'),
],
),
),
......@@ -41,9 +41,9 @@ for _name in teval_all_sets:
for subset in teval_subject_mapping[_name]:
teval_datasets.append(
dict(
abbr="teval-" + subset,
abbr='teval-' + subset,
type=TEvalDataset,
path="./data/teval/EN",
path='./data/teval/EN',
name=subset,
reader_cfg=teval_reader_cfg,
infer_cfg=teval_infer_cfg,
......
......@@ -5,23 +5,23 @@ from opencompass.openicl.icl_evaluator import TEvalEvaluator
from opencompass.datasets import teval_postprocess, TEvalDataset
teval_subject_mapping = {
"instruct_zh": ["instruct_v1_zh"],
"plan_zh": ["plan_json_v1_zh", "plan_str_v1_zh"],
"review_zh": ["review_str_v1_zh"],
"reason_retrieve_understand_zh": ["reason_retrieve_understand_json_v1_zh"],
"reason_zh": ["reason_str_v1_zh"],
"retrieve_zh": ["retrieve_str_v1_zh"],
"understand_zh": ["understand_str_v1_zh"],
'instruct_zh': ['instruct_v1_zh'],
'plan_zh': ['plan_json_v1_zh', 'plan_str_v1_zh'],
'review_zh': ['review_str_v1_zh'],
'reason_retrieve_understand_zh': ['reason_retrieve_understand_json_v1_zh'],
'reason_zh': ['reason_str_v1_zh'],
'retrieve_zh': ['retrieve_str_v1_zh'],
'understand_zh': ['understand_str_v1_zh'],
}
teval_reader_cfg = dict(input_columns=["prompt"], output_column="ground_truth")
teval_reader_cfg = dict(input_columns=['prompt'], output_column='ground_truth')
teval_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role="HUMAN", prompt="{prompt}"),
dict(role='HUMAN', prompt='{prompt}'),
],
),
),
......@@ -41,9 +41,9 @@ for _name in teval_all_sets:
for subset in teval_subject_mapping[_name]:
teval_datasets.append(
dict(
abbr="teval-" + subset,
abbr='teval-' + subset,
type=TEvalDataset,
path="./data/teval/ZH",
path='./data/teval/ZH',
name=subset,
reader_cfg=teval_reader_cfg,
infer_cfg=teval_infer_cfg,
......
......@@ -37,19 +37,19 @@ for k in [0, 1, 5]:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role='HUMAN', prompt='Answer the question, your answer should be as simple as possible, start your answer with the prompt \'The answer is \'.\nQ: {question}?'),
dict(role='BOT', prompt='A:'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50),
)
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role="BOT")
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
triviaqa_datasets.append(
dict(
......
......@@ -27,13 +27,13 @@ for k in [1]:
prompt_template=dict(
type=PromptTemplate,
template='</E>Q: {question}\nA: ',
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=["Q:", "\n"]),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Q:', '\n']),
)
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role="BOT")
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
triviaqa_datasets.append(
dict(
......
......@@ -37,19 +37,19 @@ for k in [1]:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role='HUMAN', prompt='Q: {question}'),
dict(role='BOT', prompt='A:'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=["Q:", "\n"]),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Q:', '\n']),
)
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role="BOT")
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
triviaqa_datasets.append(
dict(
......
......@@ -37,19 +37,19 @@ for k in [0, 1, 5, 25]:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role='HUMAN', prompt='Q: {question}'),
dict(role='BOT', prompt='A:'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=["Q:", "\n"]),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Q:', '\n']),
)
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role="BOT")
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
triviaqa_datasets.append(
dict(
......
......@@ -12,7 +12,7 @@ triviaqarc_reader_cfg = dict(
triviaqarc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="{evidence}\nAnswer these questions:\nQ: {question}?\nA:"),
template='{evidence}\nAnswer these questions:\nQ: {question}?\nA:'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(
type=GenInferencer, max_out_len=50, max_seq_len=8192, batch_size=4))
......
......@@ -13,7 +13,7 @@ truthfulqa_reader_cfg = dict(
truthfulqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[dict(role="HUMAN", prompt="{question}")])),
template=dict(round=[dict(role='HUMAN', prompt='{question}')])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
......
......@@ -5,24 +5,24 @@ from opencompass.datasets import TydiQADataset, TydiQAEvaluator
# All configs are for TydiQA Goldp task
tydiqa_reader_cfg = dict(
input_columns=["passage_text", "question_text"],
output_column="answer"
input_columns=['passage_text', 'question_text'],
output_column='answer'
)
langs = ['arabic', 'bengali', 'english', 'finnish', 'indonesian', 'japanese', 'korean', 'russian', 'swahili', 'telugu', 'thai']
prefixs_prompt = {
"english": ("Answer the following question based on the information in the given passage.", "Passage:", "Question:", "Answer:"),
"arabic": ("أجب على السؤال التالي بناءً على المعلومات في المقطع المعطى.", "المقطع:", "السؤال:", "الإجابة:"),
"bengali": ("প্রদত্ত অধ্যায়ের তথ্যের উপর ভিত্তি করে নিম্নলিখিত প্রশ্নের উত্তর দিন।", "অধ্যায়:", "প্রশ্ন:", "উত্তর:"),
"finnish": ("Vastaa seuraavaan kysymykseen annetun kappaleen tiedon perusteella.", "Kappale:", "Kysymys:", "Vastaus:"),
"indonesian": ("Jawab pertanyaan berikut berdasarkan informasi di bagian yang diberikan.", "Bagian:", "Pertanyaan:", "Jawaban:"),
"korean": ("주어진 문단의 정보에 기반하여 다음 질문에 답하십시오.", "문단:", "질문:", "답변:"),
"japanese":("文脈に基づいて質問に答えてください。","ぶんしょう:","しつもん:", "かいとう:"),
"russian": ("Ответьте на следующий вопрос на основе информации в данном отрывке.", "Отрывок:", "Вопрос:", "Ответ:"),
"swahili": ("Jibu swali lifuatalo kulingana na habari kwenye kifungu kilichotolewa.", "Kifungu:", "Swali:", "Jibu:"),
"telugu": ("ఇచ్చిన పేరాలోని సమాచారం ఆధారంగా కింది ప్రశ్నకు సమాధానం ఇవ్వండి.", "పేరా:", "ప్రశ్న:", "సమాధానం:"),
"thai":("ตอบคำถามต่อไปนี้โดยอิงตามข้อมูลในตอนข้อความที่กำหนด:", "ตอนข้อความ:", "คำถาม:", "คำตอบ:")
'english': ('Answer the following question based on the information in the given passage.', 'Passage:', 'Question:', 'Answer:'),
'arabic': ('أجب على السؤال التالي بناءً على المعلومات في المقطع المعطى.', 'المقطع:', 'السؤال:', 'الإجابة:'),
'bengali': ('প্রদত্ত অধ্যায়ের তথ্যের উপর ভিত্তি করে নিম্নলিখিত প্রশ্নের উত্তর দিন।', 'অধ্যায়:', 'প্রশ্ন:', 'উত্তর:'),
'finnish': ('Vastaa seuraavaan kysymykseen annetun kappaleen tiedon perusteella.', 'Kappale:', 'Kysymys:', 'Vastaus:'),
'indonesian': ('Jawab pertanyaan berikut berdasarkan informasi di bagian yang diberikan.', 'Bagian:', 'Pertanyaan:', 'Jawaban:'),
'korean': ('주어진 문단의 정보에 기반하여 다음 질문에 답하십시오.', '문단:', '질문:', '답변:'),
'japanese':('文脈に基づいて質問に答えてください。','ぶんしょう:','しつもん:', 'かいとう:'),
'russian': ('Ответьте на следующий вопрос на основе информации в данном отрывке.', 'Отрывок:', 'Вопрос:', 'Ответ:'),
'swahili': ('Jibu swali lifuatalo kulingana na habari kwenye kifungu kilichotolewa.', 'Kifungu:', 'Swali:', 'Jibu:'),
'telugu': ('ఇచ్చిన పేరాలోని సమాచారం ఆధారంగా కింది ప్రశ్నకు సమాధానం ఇవ్వండి.', 'పేరా:', 'ప్రశ్న:', 'సమాధానం:'),
'thai':('ตอบคำถามต่อไปนี้โดยอิงตามข้อมูลในตอนข้อความที่กำหนด:', 'ตอนข้อความ:', 'คำถาม:', 'คำตอบ:')
}
tydiqa_datasets = []
......@@ -31,7 +31,7 @@ for _lang in langs:
tydiqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=f"{_hint[0]}\n\n</E>{_hint[1]}{{passage_text}}\n{_hint[2]} {{question_text}}\n{_hint[3]} {{answer}}" ,
template=f'{_hint[0]}\n\n</E>{_hint[1]}{{passage_text}}\n{_hint[2]} {{question_text}}\n{_hint[3]} {{answer}}' ,
ice_token='</E>'
),
retriever=dict(type=ZeroRetriever),
......
......@@ -7,11 +7,11 @@ from opencompass.utils.text_postprocessors import first_option_postprocess
single_choice_prompts = {
"single_choice_cn": "以下是一道单项选择题,请你根据你了解的知识给出正确的答案选项。\n下面是你要回答的题目:\n{question}\n答案选项:",
'single_choice_cn': '以下是一道单项选择题,请你根据你了解的知识给出正确的答案选项。\n下面是你要回答的题目:\n{question}\n答案选项:',
}
wikibench_sets = {
"wiki": ["single_choice_cn"],
'wiki': ['single_choice_cn'],
}
do_circular = True
......@@ -24,31 +24,31 @@ for _split in list(wikibench_sets.keys()):
ice_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role="HUMAN", prompt=single_choice_prompts[_name]),
dict(role="BOT", prompt="{answer}"),
dict(role='HUMAN', prompt=single_choice_prompts[_name]),
dict(role='BOT', prompt='{answer}'),
],
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
wikibench_eval_cfg = dict(
evaluator=dict(type=CircularEvaluator if do_circular else AccEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options="ABCD"),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
)
wikibench_datasets.append(
dict(
type=WikiBenchDataset,
path=f"./data/WikiBench/{_name}.jsonl",
name="circular_" + _name if do_circular else _name,
abbr="wikibench-" + _split + "-" + _name + "circular" if do_circular else "",
path=f'./data/WikiBench/{_name}.jsonl',
name='circular_' + _name if do_circular else _name,
abbr='wikibench-' + _split + '-' + _name + 'circular' if do_circular else '',
reader_cfg=dict(
input_columns=["question"],
output_column="answer",
input_columns=['question'],
output_column='answer',
),
infer_cfg=wikibench_infer_cfg,
eval_cfg=wikibench_eval_cfg,
......
......@@ -17,21 +17,21 @@ wikitext_infer_cfg = dict(
wikitext_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
wikitext_103_raw_datasets = []
for _split in ["validation", "test"]:
for _split in ['validation', 'test']:
wikitext_reader_cfg = dict(
input_columns=["text"],
input_columns=['text'],
output_column=None,
train_split="train",
train_split='train',
test_split=_split,
)
wikitext_103_raw_datasets.append(
dict(
abbr=f"wikitext-103-raw-{_split}",
abbr=f'wikitext-103-raw-{_split}',
type=HFDataset,
path="wikitext",
name="wikitext-103-raw-v1",
path='wikitext',
name='wikitext-103-raw-v1',
reader_cfg=wikitext_reader_cfg,
infer_cfg=wikitext_infer_cfg,
eval_cfg=wikitext_eval_cfg,
......
......@@ -17,21 +17,21 @@ wikitext_infer_cfg = dict(
wikitext_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
wikitext_2_raw_datasets = []
for _split in ["validation", "test"]:
for _split in ['validation', 'test']:
wikitext_reader_cfg = dict(
input_columns=["text"],
input_columns=['text'],
output_column=None,
train_split="train",
train_split='train',
test_split=_split,
)
wikitext_2_raw_datasets.append(
dict(
abbr=f"wikitext-2-raw-{_split}",
abbr=f'wikitext-2-raw-{_split}',
type=HFDataset,
path="wikitext",
name="wikitext-2-raw-v1",
path='wikitext',
name='wikitext-2-raw-v1',
reader_cfg=wikitext_reader_cfg,
infer_cfg=wikitext_infer_cfg,
eval_cfg=wikitext_eval_cfg,
......
......@@ -16,7 +16,7 @@ winograd_infer_cfg = dict(
template={
i: dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
f"{{prompt}} Q: In the previous text, what does '{{pronoun}}' refer to? A: {{opt{i+1}}}"
), # noqa
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import winograndeDataset_V2
from opencompass.utils.text_postprocessors import first_option_postprocess
winogrande_reader_cfg = dict(
input_columns=["opt1", "opt2"],
output_column="answer",
input_columns=['opt1', 'opt2'],
output_column='answer',
)
winogrande_infer_cfg = dict(
......@@ -15,9 +15,9 @@ winogrande_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Which of the following is a good sentence:\nA. {opt1}\nB. {opt2}\nAnswer:"
'Which of the following is a good sentence:\nA. {opt1}\nB. {opt2}\nAnswer:'
),
]),
),
......@@ -27,13 +27,13 @@ winogrande_infer_cfg = dict(
winogrande_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
winogrande_datasets = [
dict(
abbr="winogrande",
abbr='winogrande',
type=winograndeDataset_V2,
path='./data/winogrande',
reader_cfg=winogrande_reader_cfg,
......
......@@ -6,23 +6,23 @@ from opencompass.datasets import winograndeDataset_V3
from opencompass.utils.text_postprocessors import first_option_postprocess
winogrande_reader_cfg = dict(
input_columns=["opt1", "opt2"],
output_column="answer",
train_split="train_xs",
test_split="dev",
input_columns=['opt1', 'opt2'],
output_column='answer',
train_split='train_xs',
test_split='dev',
)
winogrande_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role="HUMAN", prompt="Which of the following is a good sentence:\nA. {opt1}\nB. {opt2}\nAnswer:"),
dict(role="BOT", prompt="{answer}"),
dict(role='HUMAN', prompt='Which of the following is a good sentence:\nA. {opt1}\nB. {opt2}\nAnswer:'),
dict(role='BOT', prompt='{answer}'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 2, 4, 6, 8]),
inferencer=dict(type=GenInferencer),
......@@ -30,15 +30,15 @@ winogrande_infer_cfg = dict(
winogrande_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type=first_option_postprocess, options="AB"),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
winogrande_datasets = [
dict(
abbr="winogrande",
abbr='winogrande',
type=winograndeDataset_V3,
path="./data/winogrande",
path='./data/winogrande',
reader_cfg=winogrande_reader_cfg,
infer_cfg=winogrande_infer_cfg,
eval_cfg=winogrande_eval_cfg,
......
......@@ -6,23 +6,23 @@ from opencompass.datasets import winograndeDataset_V3
from opencompass.utils.text_postprocessors import first_option_postprocess
winogrande_reader_cfg = dict(
input_columns=["prompt", "only_option1", "only_option2"],
output_column="answer",
train_split="train_xs",
test_split="dev",
input_columns=['prompt', 'only_option1', 'only_option2'],
output_column='answer',
train_split='train_xs',
test_split='dev',
)
winogrande_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role="HUMAN", prompt="Question: {prompt}\nA. {only_option1}\nB. {only_option2}\nAnswer:"),
dict(role="BOT", prompt="{answer}"),
dict(role='HUMAN', prompt='Question: {prompt}\nA. {only_option1}\nB. {only_option2}\nAnswer:'),
dict(role='BOT', prompt='{answer}'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 2, 4, 6, 8]),
inferencer=dict(type=GenInferencer),
......@@ -30,15 +30,15 @@ winogrande_infer_cfg = dict(
winogrande_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_postprocessor=dict(type=first_option_postprocess, options="AB"),
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
winogrande_datasets = [
dict(
abbr="winogrande",
abbr='winogrande',
type=winograndeDataset_V3,
path="./data/winogrande",
path='./data/winogrande',
reader_cfg=winogrande_reader_cfg,
infer_cfg=winogrande_infer_cfg,
eval_cfg=winogrande_eval_cfg,
......
......@@ -7,11 +7,11 @@ from opencompass.datasets import winograndeDataset_V3
winogrande_reader_cfg = dict(
input_columns=['opt1', 'opt2'],
output_column='answer',
train_split="train_xs",
test_split="dev",
train_split='train_xs',
test_split='dev',
)
question_and_options = "Which of the following is a good sentence:\nA. {opt1}\nB. {opt2}"
question_and_options = 'Which of the following is a good sentence:\nA. {opt1}\nB. {opt2}'
winogrande_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
......@@ -20,7 +20,7 @@ winogrande_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={'A': '</E>{opt1}', 'B': '</E>{opt2}'},
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 2, 4, 6, 8]),
inferencer=dict(type=LLInferencer),
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import winograndeDataset_V2
from opencompass.utils.text_postprocessors import first_option_postprocess
winogrande_reader_cfg = dict(
input_columns=["prompt", "only_option1", "only_option2"],
output_column="answer",
input_columns=['prompt', 'only_option1', 'only_option2'],
output_column='answer',
)
winogrande_infer_cfg = dict(
......@@ -15,7 +15,7 @@ winogrande_infer_cfg = dict(
type=PromptTemplate,
template=dict(
round=[
dict(role="HUMAN", prompt="Question: {prompt}\nA. {only_option1}\nB. {only_option2}\nAnswer:"),
dict(role='HUMAN', prompt='Question: {prompt}\nA. {only_option1}\nB. {only_option2}\nAnswer:'),
]
),
),
......@@ -25,13 +25,13 @@ winogrande_infer_cfg = dict(
winogrande_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
winogrande_datasets = [
dict(
abbr="winogrande",
abbr='winogrande',
type=winograndeDataset_V2,
path='./data/winogrande',
reader_cfg=winogrande_reader_cfg,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment