Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -5,9 +5,9 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
COPA_reader_cfg = dict(
input_columns=["question", "premise", "choice1", "choice2"],
output_column="label",
test_split="train")
input_columns=['question', 'premise', 'choice1', 'choice2'],
output_column='label',
test_split='train')
COPA_infer_cfg = dict(
prompt_template=dict(
......@@ -16,16 +16,16 @@ COPA_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN",
prompt="{premise}\nQuestion: What may be the {question}?\nAnswer:"),
dict(role="BOT", prompt="{choice1}"),
role='HUMAN',
prompt='{premise}\nQuestion: What may be the {question}?\nAnswer:'),
dict(role='BOT', prompt='{choice1}'),
]),
1:
dict(round=[
dict(
role="HUMAN",
prompt="{premise}\nQuestion: What may be the {question}?\nAnswer:"),
dict(role="BOT", prompt="{choice2}"),
role='HUMAN',
prompt='{premise}\nQuestion: What may be the {question}?\nAnswer:'),
dict(role='BOT', prompt='{choice2}'),
]),
},
),
......@@ -38,10 +38,10 @@ COPA_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
COPA_datasets = [
dict(
type=HFDataset,
abbr="COPA",
path="json",
data_files="./data/SuperGLUE/COPA/val.jsonl",
split="train",
abbr='COPA',
path='json',
data_files='./data/SuperGLUE/COPA/val.jsonl',
split='train',
reader_cfg=COPA_reader_cfg,
infer_cfg=COPA_infer_cfg,
eval_cfg=COPA_eval_cfg,
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import MultiRCDataset_V2
from opencompass.utils.text_postprocessors import first_option_postprocess
MultiRC_reader_cfg = dict(
input_columns=["question", "text", "answer"],
output_column="label",
input_columns=['question', 'text', 'answer'],
output_column='label',
)
MultiRC_infer_cfg = dict(
......@@ -15,9 +15,9 @@ MultiRC_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{text}\nQuestion: {question}\nAnswer: {answer}\nIs it true?\nA. Yes\nB. No\nAnswer:"
'{text}\nQuestion: {question}\nAnswer: {answer}\nIs it true?\nA. Yes\nB. No\nAnswer:'
),
]),
),
......@@ -27,15 +27,15 @@ MultiRC_infer_cfg = dict(
MultiRC_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
MultiRC_datasets = [
dict(
abbr="MultiRC",
abbr='MultiRC',
type=MultiRCDataset_V2,
path="./data/SuperGLUE/MultiRC/val.jsonl",
path='./data/SuperGLUE/MultiRC/val.jsonl',
reader_cfg=MultiRC_reader_cfg,
infer_cfg=MultiRC_infer_cfg,
eval_cfg=MultiRC_eval_cfg,
......
......@@ -11,8 +11,8 @@ MultiRC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: "Passage:{text}。\nQuestion:{question}。\nAnswer: {answer}. It is false.",
1: "Passage:</P>。\nQuestion:{question}。\nAnswer: {answer}. It is true.",
0: 'Passage:{text}。\nQuestion:{question}。\nAnswer: {answer}. It is false.',
1: 'Passage:</P>。\nQuestion:{question}。\nAnswer: {answer}. It is true.',
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
......
......@@ -5,8 +5,8 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import MultiRCDataset
MultiRC_reader_cfg = dict(
input_columns=["question", "text", "answer"],
output_column="label",
input_columns=['question', 'text', 'answer'],
output_column='label',
)
MultiRC_infer_cfg = dict(
......@@ -16,16 +16,16 @@ MultiRC_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN",
prompt="{text}\nQuestion: {question}\nAnswer: {answer}\nIs it true?"),
dict(role="BOT", prompt="No, it is false."),
role='HUMAN',
prompt='{text}\nQuestion: {question}\nAnswer: {answer}\nIs it true?'),
dict(role='BOT', prompt='No, it is false.'),
]),
1:
dict(round=[
dict(
role="HUMAN",
prompt="{text}\nQuestion: {question}\nAnswer: {answer}\nIs it true?"),
dict(role="BOT", prompt="Yes, it is true."),
role='HUMAN',
prompt='{text}\nQuestion: {question}\nAnswer: {answer}\nIs it true?'),
dict(role='BOT', prompt='Yes, it is true.'),
]),
},
),
......@@ -38,8 +38,8 @@ MultiRC_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
MultiRC_datasets = [
dict(
type=MultiRCDataset,
abbr="MultiRC",
path="./data/SuperGLUE/MultiRC/val.jsonl",
abbr='MultiRC',
path='./data/SuperGLUE/MultiRC/val.jsonl',
reader_cfg=MultiRC_reader_cfg,
infer_cfg=MultiRC_infer_cfg,
eval_cfg=MultiRC_eval_cfg,
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import AXDataset_V2
from opencompass.utils.text_postprocessors import first_option_postprocess
RTE_reader_cfg = dict(
input_columns=["hypothesis", "premise"],
output_column="label",
input_columns=['hypothesis', 'premise'],
output_column='label',
)
RTE_infer_cfg = dict(
......@@ -15,9 +15,9 @@ RTE_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?\nA. Yes\nB. No\nAnswer:"
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?\nA. Yes\nB. No\nAnswer:'
),
]),
),
......@@ -27,15 +27,15 @@ RTE_infer_cfg = dict(
RTE_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_option_postprocess, options='AB'),
)
RTE_datasets = [
dict(
abbr="RTE",
abbr='RTE',
type=AXDataset_V2, # rte share the same format with ax
path="./data/SuperGLUE/RTE/val.jsonl",
path='./data/SuperGLUE/RTE/val.jsonl',
reader_cfg=RTE_reader_cfg,
infer_cfg=RTE_infer_cfg,
eval_cfg=RTE_eval_cfg,
......
......@@ -5,31 +5,31 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
RTE_reader_cfg = dict(
input_columns=["hypothesis", "premise"],
output_column="label",
test_split="train")
input_columns=['hypothesis', 'premise'],
output_column='label',
test_split='train')
RTE_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
"entailment":
'entailment':
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?"
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?'
),
dict(role="BOT", prompt="Yes"),
dict(role='BOT', prompt='Yes'),
]),
"not_entailment":
'not_entailment':
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?"
'{premise}\n{hypothesis}\nIs the sentence below entailed by the sentence above?'
),
dict(role="BOT", prompt="No"),
dict(role='BOT', prompt='No'),
])
},
),
......@@ -42,10 +42,10 @@ RTE_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
RTE_datasets = [
dict(
type=HFDataset,
abbr="RTE",
path="json",
data_files="./data/SuperGLUE/RTE/val.jsonl",
split="train",
abbr='RTE',
path='json',
data_files='./data/SuperGLUE/RTE/val.jsonl',
split='train',
reader_cfg=RTE_reader_cfg,
infer_cfg=RTE_infer_cfg,
eval_cfg=RTE_eval_cfg,
......
......@@ -11,7 +11,7 @@ ReCoRD_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=
"Passage:{text}\nResult:{question}\nQuestion: What entity does ____ refer to in the result?Give me the entity name:"),
'Passage:{text}\nResult:{question}\nQuestion: What entity does ____ refer to in the result?Give me the entity name:'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
......
......@@ -5,8 +5,8 @@ from opencompass.openicl.icl_evaluator import EMEvaluator
from opencompass.datasets import ReCoRDDataset
ReCoRD_reader_cfg = dict(
input_columns=["question", "text"],
output_column="answers",
input_columns=['question', 'text'],
output_column='answers',
)
ReCoRD_infer_cfg = dict(
......@@ -14,9 +14,9 @@ ReCoRD_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Passage: {text}\nResult: {question}\nQuestion: What entity does ____ refer to in the result? Give me the entity name:"
'Passage: {text}\nResult: {question}\nQuestion: What entity does ____ refer to in the result? Give me the entity name:'
),
]),
),
......@@ -27,14 +27,14 @@ ReCoRD_infer_cfg = dict(
ReCoRD_eval_cfg = dict(
evaluator=dict(type=EMEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type="ReCoRD"),
pred_postprocessor=dict(type='ReCoRD'),
)
ReCoRD_datasets = [
dict(
type=ReCoRDDataset,
abbr="ReCoRD",
path="./data/SuperGLUE/ReCoRD/val.jsonl",
abbr='ReCoRD',
path='./data/SuperGLUE/ReCoRD/val.jsonl',
reader_cfg=ReCoRD_reader_cfg,
infer_cfg=ReCoRD_infer_cfg,
eval_cfg=ReCoRD_eval_cfg,
......
......@@ -12,7 +12,7 @@ ReCoRD_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN", prompt="Passage:\n{text}\nResult:\n{question}\nQuestion:\nWhat entity does ____ refer to in the Result?\nAnswer:"
role='HUMAN', prompt='Passage:\n{text}\nResult:\n{question}\nQuestion:\nWhat entity does ____ refer to in the Result?\nAnswer:'
),
]),
),
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import WSCDataset_V2
from opencompass.utils.text_postprocessors import first_capital_postprocess
WSC_reader_cfg = dict(
input_columns=["span1", "span2", "text"],
output_column="label",
input_columns=['span1', 'span2', 'text'],
output_column='label',
)
WSC_infer_cfg = dict(
......@@ -15,7 +15,7 @@ WSC_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{text}\nIs '{span1}' and '{span2}' refers to the same entity in the above sentence?\nA. Yes\nB. No\nAnswer:"
),
......@@ -27,15 +27,15 @@ WSC_infer_cfg = dict(
WSC_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
WSC_datasets = [
dict(
abbr="WSC",
abbr='WSC',
type=WSCDataset_V2,
path="./data/SuperGLUE/WSC/val.jsonl",
path='./data/SuperGLUE/WSC/val.jsonl',
reader_cfg=WSC_reader_cfg,
infer_cfg=WSC_infer_cfg,
eval_cfg=WSC_eval_cfg,
......
......@@ -6,8 +6,8 @@ from opencompass.datasets import WSCDataset_V3
from opencompass.utils.text_postprocessors import first_capital_postprocess
WSC_reader_cfg = dict(
input_columns=["span1", "span2", "text"],
output_column="label",
input_columns=['span1', 'span2', 'text'],
output_column='label',
)
WSC_infer_cfg = dict(
......@@ -15,9 +15,9 @@ WSC_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Passage: {text}\nDoes the pronoun # {span2} # refer to * {span1} *?\nA. Yes\nB. No\nAnswer:"
'Passage: {text}\nDoes the pronoun # {span2} # refer to * {span1} *?\nA. Yes\nB. No\nAnswer:'
),
]),
),
......@@ -27,15 +27,15 @@ WSC_infer_cfg = dict(
WSC_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
WSC_datasets = [
dict(
abbr="WSC",
abbr='WSC',
type=WSCDataset_V3,
path="./data/SuperGLUE/WSC/val.jsonl",
path='./data/SuperGLUE/WSC/val.jsonl',
reader_cfg=WSC_reader_cfg,
infer_cfg=WSC_infer_cfg,
eval_cfg=WSC_eval_cfg,
......
......@@ -5,8 +5,8 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import WSCDataset
WSC_reader_cfg = dict(
input_columns=["span1", "span2", "text", "new_text"],
output_column="answer",
input_columns=['span1', 'span2', 'text', 'new_text'],
output_column='answer',
)
WSC_infer_cfg = dict(
......@@ -14,10 +14,10 @@ WSC_infer_cfg = dict(
type=PromptTemplate,
template={
0: dict(round=[
dict(role="HUMAN", prompt="{text}"),
dict(role='HUMAN', prompt='{text}'),
]),
1: dict(round=[
dict(role="HUMAN", prompt="{new_text}"),
dict(role='HUMAN', prompt='{new_text}'),
]),
},
),
......@@ -30,10 +30,10 @@ WSC_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
WSC_datasets = [
dict(
type=WSCDataset,
path="json",
abbr="WSC",
data_files="./data/SuperGLUE/WSC/val.jsonl",
split="train",
path='json',
abbr='WSC',
data_files='./data/SuperGLUE/WSC/val.jsonl',
split='train',
reader_cfg=WSC_reader_cfg,
infer_cfg=WSC_infer_cfg,
eval_cfg=WSC_eval_cfg,
......
......@@ -5,8 +5,8 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import WSCDataset_V3
WSC_reader_cfg = dict(
input_columns=["span1", "span2", "text"],
output_column="label",
input_columns=['span1', 'span2', 'text'],
output_column='label',
)
WSC_infer_cfg = dict(
......@@ -16,16 +16,16 @@ WSC_infer_cfg = dict(
'A':
dict(round=[
dict(
role="HUMAN",
prompt="Passage: {text}\nDoes the pronoun # {span2} # refer to * {span1} *?\nA. Yes\nB. No\nAnswer: "
role='HUMAN',
prompt='Passage: {text}\nDoes the pronoun # {span2} # refer to * {span1} *?\nA. Yes\nB. No\nAnswer: '
),
dict(role='BOT', prompt='A'),
]),
'B':
dict(round=[
dict(
role="HUMAN",
prompt="Passage: {text}\nDoes the pronoun # {span2} # refer to * {span1} *?\nA. Yes\nB. No\nAnswer: "
role='HUMAN',
prompt='Passage: {text}\nDoes the pronoun # {span2} # refer to * {span1} *?\nA. Yes\nB. No\nAnswer: '
),
dict(role='BOT', prompt='B'),
]),
......@@ -39,9 +39,9 @@ WSC_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
WSC_datasets = [
dict(
abbr="WSC",
abbr='WSC',
type=WSCDataset_V3,
path="./data/SuperGLUE/WSC/val.jsonl",
path='./data/SuperGLUE/WSC/val.jsonl',
reader_cfg=WSC_reader_cfg,
infer_cfg=WSC_infer_cfg,
eval_cfg=WSC_eval_cfg,
......
......@@ -5,8 +5,8 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import WSCDataset_V2
WSC_reader_cfg = dict(
input_columns=["span1", "span2", "text"],
output_column="label",
input_columns=['span1', 'span2', 'text'],
output_column='label',
)
WSC_infer_cfg = dict(
......@@ -16,7 +16,7 @@ WSC_infer_cfg = dict(
'A':
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{text}\nIs '{span1}' and '{span2}' refers to the same entity in the above sentence?"
),
......@@ -25,7 +25,7 @@ WSC_infer_cfg = dict(
'B':
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"{text}\nIs '{span1}' and '{span2}' refers to the same entity in the above sentence?"
),
......@@ -41,9 +41,9 @@ WSC_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
WSC_datasets = [
dict(
abbr="WSC",
abbr='WSC',
type=WSCDataset_V2,
path="./data/SuperGLUE/WSC/val.jsonl",
path='./data/SuperGLUE/WSC/val.jsonl',
reader_cfg=WSC_reader_cfg,
infer_cfg=WSC_infer_cfg,
eval_cfg=WSC_eval_cfg,
......
......@@ -12,8 +12,8 @@ WSC_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: "{text}",
1: "{new_text}"
0: '{text}',
1: '{new_text}'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
......
......@@ -7,11 +7,11 @@ from opencompass.utils.text_postprocessors import first_capital_postprocess
WiC_reader_cfg = dict(
input_columns=[
"word",
"sentence1",
"sentence2",
'word',
'sentence1',
'sentence2',
],
output_column="label",
output_column='label',
)
WiC_infer_cfg = dict(
......@@ -19,7 +19,7 @@ WiC_infer_cfg = dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Sentence 1: {sentence1}\nSentence 2: {sentence2}\nAre '{word}' in the above two sentenses the same?\nA. Yes\nB. No\nAnswer:"
),
......@@ -31,15 +31,15 @@ WiC_infer_cfg = dict(
WiC_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
WiC_datasets = [
dict(
abbr="WiC",
abbr='WiC',
type=WiCDataset_V2,
path="./data/SuperGLUE/WiC/val.jsonl",
path='./data/SuperGLUE/WiC/val.jsonl',
reader_cfg=WiC_reader_cfg,
infer_cfg=WiC_infer_cfg,
eval_cfg=WiC_eval_cfg,
......
......@@ -6,12 +6,12 @@ from opencompass.datasets import WiCDataset
WiC_reader_cfg = dict(
input_columns=[
"word",
"sentence1",
"sentence2",
'word',
'sentence1',
'sentence2',
],
output_column="answer",
test_split="train")
output_column='answer',
test_split='train')
WiC_infer_cfg = dict(
prompt_template=dict(
......@@ -20,7 +20,7 @@ WiC_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Sentence 1: {sentence1}\nSentence 2: {sentence2}\n'{word}' in the above two sentenses are different."
),
......@@ -28,7 +28,7 @@ WiC_infer_cfg = dict(
1:
dict(round=[
dict(
role="HUMAN",
role='HUMAN',
prompt=
"Sentence 1: {sentence1}\nSentence 2: {sentence2}\n'{word}' in the above two sentenses are the same."
),
......@@ -44,10 +44,10 @@ WiC_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
WiC_datasets = [
dict(
type=WiCDataset,
abbr="WiC",
path="json",
data_files="./data/SuperGLUE/WiC/val.jsonl",
split="train",
abbr='WiC',
path='json',
data_files='./data/SuperGLUE/WiC/val.jsonl',
split='train',
reader_cfg=WiC_reader_cfg,
infer_cfg=WiC_infer_cfg,
eval_cfg=WiC_eval_cfg,
......
......@@ -6,12 +6,12 @@ from opencompass.datasets import WiCDataset
WiC_reader_cfg = dict(
input_columns=[
"word",
"sentence1",
"sentence2",
'word',
'sentence1',
'sentence2',
],
output_column="answer",
test_split="train")
output_column='answer',
test_split='train')
WiC_infer_cfg = dict(
prompt_template=dict(
......@@ -20,12 +20,12 @@ WiC_infer_cfg = dict(
0:
dict(round=[
dict(
role="HUMAN",
prompt="{word} in {sentence1} and {sentence2} is different."),
role='HUMAN',
prompt='{word} in {sentence1} and {sentence2} is different.'),
]),
1:
dict(round=[
dict(role="HUMAN", prompt="{word} in {sentence1} and {sentence2} is same."),
dict(role='HUMAN', prompt='{word} in {sentence1} and {sentence2} is same.'),
]),
},
),
......@@ -38,10 +38,10 @@ WiC_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
WiC_datasets = [
dict(
type=WiCDataset,
abbr="WiC",
path="json",
data_files="./data/SuperGLUE/WiC/val.jsonl",
split="train",
abbr='WiC',
path='json',
data_files='./data/SuperGLUE/WiC/val.jsonl',
split='train',
reader_cfg=WiC_reader_cfg,
infer_cfg=WiC_infer_cfg,
eval_cfg=WiC_eval_cfg,
......
......@@ -8,17 +8,17 @@ from opencompass.datasets import TabMWPDataset, TabMWPEvaluator
input_format='TQ'
output_format='A'
elements = {"Q": "Question: {question}",
"T": "Table: {table}",
"S": "Solution: {solution}",
"A": "Answer: The answer is {answer}.",
"AS": "Answer: The answer is {answer}. BECAUSE: {solution}",
"SA": "Answer: {solution} The answer is {answer}."}
elements = {'Q': 'Question: {question}',
'T': 'Table: {table}',
'S': 'Solution: {solution}',
'A': 'Answer: The answer is {answer}.',
'AS': 'Answer: The answer is {answer}. BECAUSE: {solution}',
'SA': 'Answer: {solution} The answer is {answer}.'}
TabMWP_reader_cfg = dict(
input_columns=["question", "table"],
output_column="test_elements",
input_columns=['question', 'table'],
output_column='test_elements',
train_split='dev',
)
......@@ -28,8 +28,8 @@ TabMWP_infer_cfg = dict(
template=dict(
round=[
dict(
role="HUMAN",
prompt= "\n".join(elements[label] for label in input_format)
role='HUMAN',
prompt= '\n'.join(elements[label] for label in input_format)
),
],
),
......@@ -45,9 +45,8 @@ TabMWP_eval_cfg = dict(
TabMWP_datasets = [
dict(
type=TabMWPDataset,
path="./data/tabmwp/",
path='./data/tabmwp/',
reader_cfg=TabMWP_reader_cfg,
infer_cfg=TabMWP_infer_cfg,
eval_cfg=TabMWP_eval_cfg,)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment