Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Ecological Empowerment
opencompass
Commits
c289ecc0
Commit
c289ecc0
authored
Oct 21, 2025
by
xinghao
Browse files
Initial commit
parents
Pipeline
#3004
canceled with stages
Changes
750
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
590 additions
and
0 deletions
+590
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
...s/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen_883d50.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen_883d50.py
+41
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl.py
...s/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_16b1d9.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_16b1d9.py
+43
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314797.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314797.py
+43
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314b96.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314b96.py
+45
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_4da4db.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_4da4db.py
+45
-0
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_9619db.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_9619db.py
+34
-0
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen.py
...compass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen_854c6c.py
.../configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen_854c6c.py
+44
-0
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
...compass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_0143fe.py
.../configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_0143fe.py
+62
-0
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_11c175.py
.../configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_11c175.py
+33
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
...ass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
+4
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_91ca53.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_91ca53.py
+44
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl.py
...ass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl.py
+4
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_54058d.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_54058d.py
+34
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_5c24f1.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_5c24f1.py
+45
-0
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_9f3618.py
...figs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_9f3618.py
+49
-0
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen.py
...nfigs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen.py
+4
-0
No files found.
Too many changes to show.
To preserve performance only
750 of 750+
files are displayed.
Plain diff
Email patch
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_BoolQ_gen_883d50
import
BoolQ_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen_883d50.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDatasetV2
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'passage'
],
output_column
=
'label'
,
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}
\n
A. Yes
\n
B. No
\n
Answer:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
BoolQ_datasets
=
[
dict
(
abbr
=
'BoolQ'
,
type
=
BoolQDatasetV2
,
path
=
'opencompass/boolq'
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_BoolQ_ppl_314b96
import
BoolQ_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_16b1d9.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDatasetV2
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'passage'
],
output_column
=
'label'
,
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'A'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes'
),
]),
'B'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'No'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
abbr
=
'BoolQ'
,
type
=
BoolQDatasetV2
,
path
=
'opencompass/boolq'
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314797.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDatasetV3
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'passage'
],
output_column
=
'label'
,
test_split
=
'train'
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'false'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {passage}
\n
Question: {question}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Answer: No'
),
]),
'true'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Passage: {passage}
\n
Question: {question}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Answer: Yes'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
abbr
=
'BoolQ'
,
type
=
BoolQDatasetV3
,
path
=
'opencompass/boolq'
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314b96.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDataset
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'passage'
],
output_column
=
'answer'
,
test_split
=
'train'
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'No'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
type
=
BoolQDataset
,
abbr
=
'BoolQ'
,
path
=
'json'
,
data_files
=
'opencompass/boolq'
,
split
=
'train'
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_4da4db.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDataset
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'passage'
],
output_column
=
'answer'
,
test_split
=
'train'
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}'
),
dict
(
role
=
'BOT'
,
prompt
=
'No.'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{passage}
\n
Question: {question}'
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes.'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
type
=
BoolQDataset
,
abbr
=
'BoolQ'
,
path
=
'json'
,
data_files
=
'opencompass/boolq'
,
split
=
'train'
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_9619db.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDataset
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'passage'
],
output_column
=
'answer'
,
test_split
=
'train'
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'Passage:{passage}。
\n
Question:{question}。
\n
Answer: No.'
,
1
:
'Passage:{passage}。
\n
Question:{question}。
\n
Answer: Yes.'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
type
=
BoolQDataset
,
abbr
=
'BoolQ'
,
path
=
'json'
,
data_files
=
'opencompass/boolq'
,
split
=
'train'
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_CB_gen_854c6c
import
CB_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen_854c6c.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CBDatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
CB_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label'
,
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?
\n
A. Contradiction
\n
B. Entailment
\n
C. Neutral
\n
Answer:'
),
],
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'ABC'
),
)
CB_datasets
=
[
dict
(
abbr
=
'CB'
,
type
=
CBDatasetV2
,
path
=
'./data/SuperGLUE/CB/val.jsonl'
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_CB_ppl_0143fe
import
CB_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_0143fe.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
CB_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label'
,
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Contradiction'
),
]),
'entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Entailment'
),
]),
'neutral'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?'
),
dict
(
role
=
'BOT'
,
prompt
=
'Neutral'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
CB_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'CB'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/SuperGLUE/CB/val.jsonl'
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_11c175.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
CB_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label'
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'{premise}?contradiction, {hypothesis}'
,
'entailment'
:
'{premise}?entailment, {hypothesis}'
,
'neutral'
:
'{premise}?neutral, {hypothesis}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
CB_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'CB'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/SuperGLUE/CB/val.jsonl'
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_COPA_gen_91ca53
import
COPA_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_91ca53.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
COPADatasetV2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: Which may be the {question}?
\n
A. {choice1}
\n
B. {choice2}
\n
Answer:'
),
],
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
COPA_datasets
=
[
dict
(
abbr
=
'COPA'
,
type
=
COPADatasetV2
,
path
=
'./data/SuperGLUE/COPA/val.jsonl'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_COPA_ppl_9f3618
import
COPA_datasets
# noqa: F401, F403
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_54058d.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'Premise:{premise}。
\n
Question:{question}。
\n
Answer: {choice1}.'
,
1
:
'Passage:{premise}。
\n
Question:{question}。
\n
Answer: {choice2}.'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'COPA'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/COPA/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_5c24f1.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice1}'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice2}'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'COPA'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/COPA/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_9f3618.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: What may be the {question}?
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice1}'
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{premise}
\n
Question: What may be the {question}?
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
'{choice2}'
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'COPA'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/COPA/val.jsonl'
,
split
=
'train'
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
opencompass/configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_MultiRC_gen_27071f
import
MultiRC_datasets
# noqa: F401, F403
Prev
1
…
23
24
25
26
27
28
29
30
31
…
38
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment