Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Ecological Empowerment
opencompass
Commits
c289ecc0
Commit
c289ecc0
authored
Oct 21, 2025
by
xinghao
Browse files
Initial commit
parents
Pipeline
#3004
canceled with stages
Changes
750
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
607 additions
and
0 deletions
+607
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_27071f.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_27071f.py
+43
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
...nfigs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_866273.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_866273.py
+30
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_ced824.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_ced824.py
+47
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
...mpass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
...onfigs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
+43
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
...mpass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
...onfigs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
+34
-0
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
...onfigs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
+53
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen.py
...configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_0f7784.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_0f7784.py
+29
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_30dea0.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_30dea0.py
+42
-0
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_a69961.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_a69961.py
+35
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen.py
...mpass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen_7902a7.py
...onfigs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen_7902a7.py
+43
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen_fe4bf3.py
...onfigs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen_fe4bf3.py
+43
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl.py
...mpass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_003529.py
...onfigs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_003529.py
+41
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_1c4a90.py
...onfigs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_1c4a90.py
+49
-0
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_d0f531.py
...onfigs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_d0f531.py
+51
-0
No files found.
Too many changes to show.
To preserve performance only
750 of 750+
files are displayed.
Plain diff
Email patch
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_27071f.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
,
'answer'
],
output_column
=
'label'
,
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?
\n
A. Yes
\n
B. No
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
MultiRC_datasets
=
[
dict
(
abbr
=
'MultiRC'
,
type
=
MultiRCDatasetV2
,
path
=
'./data/SuperGLUE/MultiRC/val.jsonl'
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_MultiRC_ppl_ced824
import
MultiRC_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_866273.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDataset
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
,
'answer'
],
output_column
=
'label'
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'Passage:{text}。
\n
Question:{question}。
\n
Answer: {answer}. It is false.'
,
1
:
'Passage:</P>。
\n
Question:{question}。
\n
Answer: {answer}. It is true.'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
MultiRC_datasets
=
[
dict
(
type
=
MultiRCDataset
,
abbr
=
'MultiRC'
,
path
=
'./data/SuperGLUE/MultiRC/val.jsonl'
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl_ced824.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDataset
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
,
'answer'
],
output_column
=
'label'
,
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?'
),
dict
(
role
=
'BOT'
,
prompt
=
'No, it is false.'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes, it is true.'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
MultiRC_datasets
=
[
dict
(
type
=
MultiRCDataset
,
abbr
=
'MultiRC'
,
path
=
'./data/SuperGLUE/MultiRC/val.jsonl'
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_RTE_gen_68aac7
import
RTE_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_68aac7.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AXDatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
RTE_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?
\n
A. Yes
\n
B. No
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
RTE_datasets
=
[
dict
(
abbr
=
'RTE'
,
type
=
AXDatasetV2
,
# rte share the same format with ax
path
=
'./data/SuperGLUE/RTE/val.jsonl'
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_RTE_ppl_66caf3
import
RTE_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_50f8f6.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
RTE_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
test_split
=
'train'
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'entailment'
:
'{premise}?entailment, {hypothesis}'
,
'not_entailment'
:
'{premise}?not_entailment, {hypothesis}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
RTE_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'RTE'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/RTE/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_66caf3.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
RTE_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
test_split
=
'train'
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes'
),
]),
'not_entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?'
),
dict
(
role
=
'BOT'
,
prompt
=
'No'
),
])
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
RTE_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'RTE'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/RTE/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_ReCoRD_gen_30dea0
import
ReCoRD_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_0f7784.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDataset
,
ReCoRD_postprocess
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'Passage:{text}
\n
Result:{question}
\n
Question: What entity does ____ refer to in the result?Give me the entity name:'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_postprocessor
=
dict
(
type
=
ReCoRD_postprocess
))
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDataset
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_30dea0.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDataset
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
,
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {text}
\n
Result: {question}
\n
Question: What entity does ____ refer to in the result? Give me the entity name:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
'ReCoRD'
),
)
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDataset
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_a69961.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDatasetV2
,
ReCoRD_postprocess
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage:
\n
{text}
\n
Result:
\n
{question}
\n
Question:
\n
What entity does ____ refer to in the Result?
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
ReCoRD_postprocess
))
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDatasetV2
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_WSC_gen_fe4bf3
import
WSC_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen_7902a7.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WSCDatasetV2
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
WSC_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
],
output_column
=
'label'
,
)
WSC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"{text}
\n
Is '{span1}' and '{span2}' refers to the same entity in the above sentence?
\n
A. Yes
\n
B. No
\n
Answer:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
WSC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
WSC_datasets
=
[
dict
(
abbr
=
'WSC'
,
type
=
WSCDatasetV2
,
path
=
'./data/SuperGLUE/WSC/val.jsonl'
,
reader_cfg
=
WSC_reader_cfg
,
infer_cfg
=
WSC_infer_cfg
,
eval_cfg
=
WSC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_gen_fe4bf3.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WSCDatasetV3
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
WSC_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
],
output_column
=
'label'
,
)
WSC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {text}
\n
Does the pronoun # {span2} # refer to * {span1} *?
\n
A. Yes
\n
B. No
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
WSC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
WSC_datasets
=
[
dict
(
abbr
=
'WSC'
,
type
=
WSCDatasetV3
,
path
=
'./data/SuperGLUE/WSC/val.jsonl'
,
reader_cfg
=
WSC_reader_cfg
,
infer_cfg
=
WSC_infer_cfg
,
eval_cfg
=
WSC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_WSC_ppl_1c4a90
import
WSC_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_003529.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WSCDataset
WSC_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
,
'new_text'
],
output_column
=
'answer'
,
)
WSC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{new_text}'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
WSC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
WSC_datasets
=
[
dict
(
type
=
WSCDataset
,
path
=
'json'
,
abbr
=
'WSC'
,
data_files
=
'./data/SuperGLUE/WSC/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
WSC_reader_cfg
,
infer_cfg
=
WSC_infer_cfg
,
eval_cfg
=
WSC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_1c4a90.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WSCDatasetV3
WSC_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
],
output_column
=
'label'
,
)
WSC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'A'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {text}
\n
Does the pronoun # {span2} # refer to * {span1} *?
\n
A. Yes
\n
B. No
\n
Answer: '
),
dict
(
role
=
'BOT'
,
prompt
=
'A'
),
]),
'B'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {text}
\n
Does the pronoun # {span2} # refer to * {span1} *?
\n
A. Yes
\n
B. No
\n
Answer: '
),
dict
(
role
=
'BOT'
,
prompt
=
'B'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
WSC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
WSC_datasets
=
[
dict
(
abbr
=
'WSC'
,
type
=
WSCDatasetV3
,
path
=
'./data/SuperGLUE/WSC/val.jsonl'
,
reader_cfg
=
WSC_reader_cfg
,
infer_cfg
=
WSC_infer_cfg
,
eval_cfg
=
WSC_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_WSC/SuperGLUE_WSC_ppl_d0f531.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WSCDatasetV2
WSC_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
],
output_column
=
'label'
,
)
WSC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'A'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"{text}
\n
Is '{span1}' and '{span2}' refers to the same entity in the above sentence?"
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes'
),
]),
'B'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"{text}
\n
Is '{span1}' and '{span2}' refers to the same entity in the above sentence?"
),
dict
(
role
=
'BOT'
,
prompt
=
'No'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
WSC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
WSC_datasets
=
[
dict
(
abbr
=
'WSC'
,
type
=
WSCDatasetV2
,
path
=
'./data/SuperGLUE/WSC/val.jsonl'
,
reader_cfg
=
WSC_reader_cfg
,
infer_cfg
=
WSC_infer_cfg
,
eval_cfg
=
WSC_eval_cfg
,
)
]
Prev
1
…
24
25
26
27
28
29
30
31
32
…
38
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment