Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
opencompass
Commits
be3dfa50
Commit
be3dfa50
authored
Aug 06, 2025
by
jerrrrry
Browse files
Initial commit
parents
Pipeline
#2876
failed with stages
in 0 seconds
Changes
807
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
554 additions
and
0 deletions
+554
-0
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_11c175.py
.../configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_11c175.py
+33
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
...ass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_91ca53.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_91ca53.py
+44
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl.py
...ass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_54058d.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_54058d.py
+34
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_5c24f1.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_5c24f1.py
+45
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_9f3618.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_9f3618.py
+49
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen.py
...nfigs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_27071f.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_27071f.py
+43
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
...nfigs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_866273.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_866273.py
+30
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_ced824.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_ced824.py
+47
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
...mpass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
...onfigs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
+43
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
...mpass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
...onfigs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
+34
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
...onfigs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
+53
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen.py
...configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_0f7784.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_0f7784.py
+29
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_30dea0.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_30dea0.py
+42
-0
No files found.
Too many changes to show.
To preserve performance only
807 of 807+
files are displayed.
Plain diff
Email patch
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_11c175.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
CB_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label'
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'{premise}?contradiction, {hypothesis}'
,
'entailment'
:
'{premise}?entailment, {hypothesis}'
,
'neutral'
:
'{premise}?neutral, {hypothesis}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
CB_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'CB'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/SuperGLUE/CB/val.jsonl'
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_COPA_gen_91ca53
import
COPA_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_91ca53.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
COPADatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: Which may be the {question}?
\n
A. {choice1}
\n
B. {choice2}
\n
Answer:'
),
],
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
COPA_datasets
=
[
dict
(
abbr
=
'COPA'
,
type
=
COPADatasetV2
,
path
=
'./data/SuperGLUE/COPA/val.jsonl'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_COPA_ppl_9f3618
import
COPA_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_54058d.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'Premise:{premise}。
\n
Question:{question}。
\n
Answer: {choice1}.'
,
1
:
'Passage:{premise}。
\n
Question:{question}。
\n
Answer: {choice2}.'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'COPA'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/COPA/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_5c24f1.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice1}'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice2}'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'COPA'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/COPA/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_9f3618.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: What may be the {question}?
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice1}'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: What may be the {question}?
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice2}'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'COPA'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/COPA/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_MultiRC_gen_27071f
import
MultiRC_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_27071f.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
,
'answer'
],
output_column
=
'label'
,
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?
\n
A. Yes
\n
B. No
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
MultiRC_datasets
=
[
dict
(
abbr
=
'MultiRC'
,
type
=
MultiRCDatasetV2
,
path
=
'./data/SuperGLUE/MultiRC/val.jsonl'
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_MultiRC_ppl_ced824
import
MultiRC_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_866273.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDataset
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
,
'answer'
],
output_column
=
'label'
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'Passage:{text}。
\n
Question:{question}。
\n
Answer: {answer}. It is false.'
,
1
:
'Passage:</P>。
\n
Question:{question}。
\n
Answer: {answer}. It is true.'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
MultiRC_datasets
=
[
dict
(
type
=
MultiRCDataset
,
abbr
=
'MultiRC'
,
path
=
'./data/SuperGLUE/MultiRC/val.jsonl'
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_ced824.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDataset
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
,
'answer'
],
output_column
=
'label'
,
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?'
),
dict
(
role
=
'BOT'
,
prompt
=
'No, it is false.'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes, it is true.'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
MultiRC_datasets
=
[
dict
(
type
=
MultiRCDataset
,
abbr
=
'MultiRC'
,
path
=
'./data/SuperGLUE/MultiRC/val.jsonl'
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_RTE_gen_68aac7
import
RTE_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AXDatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
RTE_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?
\n
A. Yes
\n
B. No
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
RTE_datasets
=
[
dict
(
abbr
=
'RTE'
,
type
=
AXDatasetV2
,
# rte share the same format with ax
path
=
'./data/SuperGLUE/RTE/val.jsonl'
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_RTE_ppl_66caf3
import
RTE_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
RTE_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
test_split
=
'train'
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'entailment'
:
'{premise}?entailment, {hypothesis}'
,
'not_entailment'
:
'{premise}?not_entailment, {hypothesis}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
RTE_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'RTE'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/RTE/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
RTE_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
test_split
=
'train'
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes'
),
]),
'not_entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?'
),
dict
(
role
=
'BOT'
,
prompt
=
'No'
),
])
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
RTE_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'RTE'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/RTE/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_ReCoRD_gen_30dea0
import
ReCoRD_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_0f7784.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDataset
,
ReCoRD_postprocess
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'Passage:{text}
\n
Result:{question}
\n
Question: What entity does ____ refer to in the result?Give me the entity name:'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_postprocessor
=
dict
(
type
=
ReCoRD_postprocess
))
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDataset
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_30dea0.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDataset
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
,
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {text}
\n
Result: {question}
\n
Question: What entity does ____ refer to in the result? Give me the entity name:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
'ReCoRD'
),
)
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDataset
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
,
)
]
Prev
1
…
21
22
23
24
25
26
27
28
29
…
41
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment