Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(1000, 5000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
# ----------English Version----------
base_path = './data/needlebench'
......
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(1000, 5000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
......
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(1000, 5000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
......
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
# ----------English Version----------
base_path = './data/needlebench'
......
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
......
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals_list = [1, 5, 10, 15, 20]
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
......
......@@ -16,7 +16,7 @@ def generate_linear_space(start, end, num):
if num == 1:
return [start]
elif num < 1:
raise ValueError("num must be at least 1.")
raise ValueError('num must be at least 1.')
step = (end - start) / (num - 1)
return [start + step * i for i in range(num)]
......@@ -54,7 +54,7 @@ needlebench_eval_cfg = dict(
context_lengths = list(range(5000, 9000, 1000))
document_depth_percent_intervals = 20
document_depth_percent_interval_type = "linear"
document_depth_percent_interval_type = 'linear'
base_path = './data/needlebench'
file_list = ['PaulGrahamEssays.jsonl']
......
......@@ -36,19 +36,19 @@ for k in [0, 1, 5]:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role='HUMAN', prompt='Answer the question, your answer should be as simple as possible, start your answer with the prompt \'The answer is \'.\nQ: {question}?'),
dict(role='BOT', prompt='A:'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50),
)
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets.append(
dict(
......
......@@ -9,12 +9,12 @@ nq_reader_cfg = dict(
nq_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="Answer these questions:\nQ: {question}?\nA:{answer}",
template='Answer these questions:\nQ: {question}?\nA:{answer}',
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets = [
dict(
......
......@@ -16,7 +16,7 @@ nq_infer_cfg = dict(
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets = [
dict(
......
......@@ -17,7 +17,7 @@ nq_infer_cfg = dict(
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets = [
dict(
......
......@@ -17,7 +17,7 @@ nq_infer_cfg = dict(
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets = [
dict(
......
......@@ -36,19 +36,19 @@ for k in [1]:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role='HUMAN', prompt='Q: {question}?'),
dict(role='BOT', prompt='A:'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=["Q:", "\n"]),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Q:', '\n']),
)
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets.append(
dict(
......
......@@ -26,13 +26,13 @@ for k in [1]:
prompt_template=dict(
type=PromptTemplate,
template='</E>Q: {question}\nA: ',
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=["Q:", "\n"]),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Q:', '\n']),
)
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets.append(
dict(
......
......@@ -36,19 +36,19 @@ for k in [0, 1, 5, 25]:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
begin='</E>',
round=[
dict(role='HUMAN', prompt='Q: {question}?'),
dict(role='BOT', prompt='A:'),
]
),
ice_token="</E>",
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=["Q:", "\n"]),
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Q:', '\n']),
)
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role="BOT")
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
nq_datasets.append(
dict(
......
......@@ -4,7 +4,7 @@ from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import NaturalQuestionDataset_CN, NQEvaluator_CN
nqcn_reader_cfg = dict(
input_columns=["question"], output_column="answer", train_split="test"
input_columns=['question'], output_column='answer', train_split='test'
)
nqcn_infer_cfg = dict(
......@@ -12,7 +12,7 @@ nqcn_infer_cfg = dict(
type=PromptTemplate,
template=dict(
round=[
dict(role="HUMAN", prompt="问题: {question}?\n答案是:"),
dict(role='HUMAN', prompt='问题: {question}?\n答案是:'),
],
),
),
......@@ -20,13 +20,13 @@ nqcn_infer_cfg = dict(
inferencer=dict(type=GenInferencer),
)
nqcn_eval_cfg = dict(evaluator=dict(type=NQEvaluator_CN), pred_role="BOT")
nqcn_eval_cfg = dict(evaluator=dict(type=NQEvaluator_CN), pred_role='BOT')
nqcn_datasets = [
dict(
abbr="nq_cn",
abbr='nq_cn',
type=NaturalQuestionDataset_CN,
path="./data/nq_cn",
path='./data/nq_cn',
reader_cfg=nqcn_reader_cfg,
infer_cfg=nqcn_infer_cfg,
eval_cfg=nqcn_eval_cfg,
......
......@@ -6,36 +6,36 @@ from opencompass.datasets import OBQADataset
from opencompass.utils.text_postprocessors import first_option_postprocess
_input_columns = [
["question_stem", "A", "B", "C", "D"],
["question_stem", "A", "B", "C", "D", "fact1"],
['question_stem', 'A', 'B', 'C', 'D'],
['question_stem', 'A', 'B', 'C', 'D', 'fact1'],
]
_template = [
dict(
round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Question: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:"
'Question: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:'
),
], ),
dict(
round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Given the fact: {fact1}\nQuestion: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:",
'Given the fact: {fact1}\nQuestion: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:',
),
], ),
]
obqa_datasets = [
dict(
abbr="openbookqa",
abbr='openbookqa',
type=OBQADataset,
path='./data/openbookqa/Main/test.jsonl',
),
dict(
abbr="openbookqa_fact",
abbr='openbookqa_fact',
type=OBQADataset,
path='./data/openbookqa/Additional/test_complete.jsonl',
),
......@@ -43,7 +43,7 @@ obqa_datasets = [
for _i in range(2):
obqa_reader_cfg = dict(
input_columns=_input_columns[_i], output_column="answerKey")
input_columns=_input_columns[_i], output_column='answerKey')
obqa_infer_cfg = dict(
prompt_template=dict(type=PromptTemplate, template=_template[_i]),
retriever=dict(type=ZeroRetriever),
......@@ -51,10 +51,10 @@ for _i in range(2):
)
obqa_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
)
obqa_datasets[_i]["reader_cfg"] = obqa_reader_cfg
obqa_datasets[_i]["infer_cfg"] = obqa_infer_cfg
obqa_datasets[_i]["eval_cfg"] = obqa_eval_cfg
obqa_datasets[_i]['reader_cfg'] = obqa_reader_cfg
obqa_datasets[_i]['infer_cfg'] = obqa_infer_cfg
obqa_datasets[_i]['eval_cfg'] = obqa_eval_cfg
......@@ -9,32 +9,32 @@ _input_columns = [
['question_stem', 'A', 'B', 'C', 'D', 'fact1'],
]
_template = [{
'A': "{question_stem} {A}",
'B': "{question_stem} {B}",
'C': "{question_stem} {C}",
'D': "{question_stem} {D}",
'A': '{question_stem} {A}',
'B': '{question_stem} {B}',
'C': '{question_stem} {C}',
'D': '{question_stem} {D}',
}, {
'A': "Given the fact {fact1}, we know that {question_stem} {A}",
'B': "Given the fact {fact1}, we know that {question_stem} {B}",
'C': "Given the fact {fact1}, we know that {question_stem} {C}",
'D': "Given the fact {fact1}, we know that {question_stem} {D}",
'A': 'Given the fact {fact1}, we know that {question_stem} {A}',
'B': 'Given the fact {fact1}, we know that {question_stem} {B}',
'C': 'Given the fact {fact1}, we know that {question_stem} {C}',
'D': 'Given the fact {fact1}, we know that {question_stem} {D}',
}]
obqa_datasets = [
dict(
abbr="openbookqa",
abbr='openbookqa',
type=OBQADataset,
path='./data/openbookqa/Main/test.jsonl',
),
dict(
abbr="openbookqa_fact",
abbr='openbookqa_fact',
type=OBQADataset,
path='./data/openbookqa/Additional/test_complete.jsonl',
),
]
for _i in range(2):
obqa_reader_cfg = dict(
input_columns=_input_columns[_i], output_column="answerKey")
input_columns=_input_columns[_i], output_column='answerKey')
obqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
......@@ -44,6 +44,6 @@ for _i in range(2):
)
obqa_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
obqa_datasets[_i]["reader_cfg"] = obqa_reader_cfg
obqa_datasets[_i]["infer_cfg"] = obqa_infer_cfg
obqa_datasets[_i]["eval_cfg"] = obqa_eval_cfg
obqa_datasets[_i]['reader_cfg'] = obqa_reader_cfg
obqa_datasets[_i]['infer_cfg'] = obqa_infer_cfg
obqa_datasets[_i]['eval_cfg'] = obqa_eval_cfg
......@@ -6,7 +6,7 @@ from opencompass.datasets import OBQADataset_V2
obqa_reader_cfg = dict(
input_columns=['question_stem', 'A', 'B', 'C', 'D', 'fact1'],
output_column="answerKey"
output_column='answerKey'
)
obqa_infer_cfg = dict(
prompt_template=dict(
......@@ -15,10 +15,10 @@ obqa_infer_cfg = dict(
ans: dict(
round=[
dict(
role="HUMAN",
prompt="We know the fact that {fact1}.\nQuestion: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\n"
role='HUMAN',
prompt='We know the fact that {fact1}.\nQuestion: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\n'
),
dict(role="BOT", prompt=f"Answer: {ans}"),
dict(role='BOT', prompt=f'Answer: {ans}'),
], )
for ans in ['A', 'B', 'C', 'D']
}
......
......@@ -13,11 +13,11 @@ _template = [
ans: dict(
round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Question: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:"
'Question: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:'
),
dict(role="BOT", prompt=ans),
dict(role='BOT', prompt=ans),
], )
for ans in ['A', 'B', 'C', 'D']
},
......@@ -25,11 +25,11 @@ _template = [
ans: dict(
round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Given the fact: {fact1}\nQuestion: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:"
'Given the fact: {fact1}\nQuestion: {question_stem}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nAnswer:'
),
dict(role="BOT", prompt=ans),
dict(role='BOT', prompt=ans),
], )
for ans in ['A', 'B', 'C', 'D']
}
......@@ -37,7 +37,7 @@ _template = [
obqa_datasets = [
dict(
abbr="openbookqa",
abbr='openbookqa',
type=OBQADataset,
path='./data/openbookqa/Main/test.jsonl',
),
......@@ -49,7 +49,7 @@ obqa_datasets = [
]
for _i in range(2):
obqa_reader_cfg = dict(
input_columns=_input_columns[_i], output_column="answerKey")
input_columns=_input_columns[_i], output_column='answerKey')
obqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
......@@ -59,6 +59,6 @@ for _i in range(2):
)
obqa_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
obqa_datasets[_i]["reader_cfg"] = obqa_reader_cfg
obqa_datasets[_i]["infer_cfg"] = obqa_infer_cfg
obqa_datasets[_i]["eval_cfg"] = obqa_eval_cfg
obqa_datasets[_i]['reader_cfg'] = obqa_reader_cfg
obqa_datasets[_i]['infer_cfg'] = obqa_infer_cfg
obqa_datasets[_i]['eval_cfg'] = obqa_eval_cfg
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment