Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
fb111087
"git@developer.sourcefind.cn:modelzoo/alphafold2_jax.git" did not exist on "98caef21efa959e44ed01ab33cfb15ab04a39418"
Commit
fb111087
authored
Jul 04, 2023
by
yingfhu
Browse files
[Feat] support opencompass
parent
7d346000
Changes
81
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
554 additions
and
0 deletions
+554
-0
configs/datasets/ARC_c/ARC_c_ppl_2b1755.py
configs/datasets/ARC_c/ARC_c_ppl_2b1755.py
+33
-0
configs/datasets/CLUE_C3/CLUE_C3_gen.py
configs/datasets/CLUE_C3/CLUE_C3_gen.py
+4
-0
configs/datasets/CLUE_C3/CLUE_C3_gen_9e3de9.py
configs/datasets/CLUE_C3/CLUE_C3_gen_9e3de9.py
+50
-0
configs/datasets/CLUE_DRCD/CLUE_DRCD_gen.py
configs/datasets/CLUE_DRCD/CLUE_DRCD_gen.py
+4
-0
configs/datasets/CLUE_afqmc/CLUE_afqmc_gen_db509b.py
configs/datasets/CLUE_afqmc/CLUE_afqmc_gen_db509b.py
+42
-0
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_00b348.py
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_00b348.py
+34
-0
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_2313cf.py
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_2313cf.py
+44
-0
configs/datasets/CLUE_cmnli/CLUE_cmnli_gen.py
configs/datasets/CLUE_cmnli/CLUE_cmnli_gen.py
+4
-0
configs/datasets/CLUE_ocnli/CLUE_ocnli_gen.py
configs/datasets/CLUE_ocnli/CLUE_ocnli_gen.py
+4
-0
configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl.py
configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl.py
+4
-0
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen.py
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen.py
+4
-0
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen_276956.py
...gs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen_276956.py
+50
-0
configs/datasets/FewCLUE_csl/FewCLUE_csl_gen.py
configs/datasets/FewCLUE_csl/FewCLUE_csl_gen.py
+4
-0
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_8eee08.py
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_8eee08.py
+41
-0
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_gen_bef37f.py
.../datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_gen_bef37f.py
+49
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_33cc73.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_33cc73.py
+48
-0
configs/datasets/SuperGLUE_AX_b/SuperGLUE_AX_b_ppl_4bd960.py
configs/datasets/SuperGLUE_AX_b/SuperGLUE_AX_b_ppl_4bd960.py
+53
-0
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_f80fb0.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_f80fb0.py
+45
-0
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_012063.py
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_012063.py
+33
-0
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
+4
-0
No files found.
configs/datasets/ARC_c/ARC_c_ppl_2b1755.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
ARCDataset
ARC_c_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'textA'
,
'textB'
,
'textC'
,
'textD'
],
output_column
=
'answerKey'
)
ARC_c_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"A"
:
"Question: {question}
\n
Answer: {textA}"
,
"B"
:
"Question: {question}
\n
Answer: {textB}"
,
"C"
:
"Question: {question}
\n
Answer: {textC}"
,
"D"
:
"Question: {question}
\n
Answer: {textD}"
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ARC_c_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
ARC_c_datasets
=
[
dict
(
type
=
ARCDataset
,
abbr
=
'ARC-c'
,
path
=
'./data/ARC/ARC-c/ARC-Challenge-Dev.jsonl'
,
reader_cfg
=
ARC_c_reader_cfg
,
infer_cfg
=
ARC_c_infer_cfg
,
eval_cfg
=
ARC_c_eval_cfg
)
]
configs/datasets/CLUE_C3/CLUE_C3_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.CLUE_C3_gen_9e3de9
import
C3_datasets
# noqa: F401, F403
configs/datasets/CLUE_C3/CLUE_C3_gen_9e3de9.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
C3Dataset_V2
C3_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"content"
,
"choice0"
,
"choice1"
,
"choice2"
,
"choice3"
,
"choices"
,
],
output_column
=
"label"
,
)
C3_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{content}
\n
问:{question}
\n
A. {choice0}
\n
B. {choice1}
\n
C. {choice2}
\n
D. {choice3}
\n
请从“A”,“B”,“C”,“D”中进行选择。
\n
答:"
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
C3_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
C3_datasets
=
[
dict
(
abbr
=
"C3"
,
type
=
C3Dataset_V2
,
path
=
"./data/CLUE/C3/dev_0.json"
,
reader_cfg
=
C3_reader_cfg
,
infer_cfg
=
C3_infer_cfg
,
eval_cfg
=
C3_eval_cfg
,
)
]
configs/datasets/CLUE_DRCD/CLUE_DRCD_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.CLUE_DRCD_gen_03b96b
import
DRCD_datasets
# noqa: F401, F403
configs/datasets/CLUE_afqmc/CLUE_afqmc_gen_db509b.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AFQMCDataset_V2
afqmc_reader_cfg
=
dict
(
input_columns
=
[
"sentence1"
,
"sentence2"
],
output_column
=
"label"
,
test_split
=
"train"
)
afqmc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?
\n
A. 不完全一致
\n
B. 完全一致
\n
请从“A”,“B”中进行选择。
\n
答:"
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
afqmc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
afqmc_datasets
=
[
dict
(
abbr
=
"afqmc-dev"
,
type
=
AFQMCDataset_V2
,
path
=
"./data/CLUE/AFQMC/dev.json"
,
reader_cfg
=
afqmc_reader_cfg
,
infer_cfg
=
afqmc_infer_cfg
,
eval_cfg
=
afqmc_eval_cfg
,
),
]
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_00b348.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
afqmc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
afqmc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
"{sentence1},{sentence2}不同。"
,
1
:
"{sentence1},{sentence2}相似。"
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
afqmc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
afqmc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'afqmc-dev'
,
path
=
'json'
,
data_files
=
'./data/CLUE/AFQMC/dev.json'
,
split
=
'train'
,
reader_cfg
=
afqmc_reader_cfg
,
infer_cfg
=
afqmc_infer_cfg
,
eval_cfg
=
afqmc_eval_cfg
),
]
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_2313cf.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
afqmc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
afqmc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"“{sentence1}”与“{sentence2}”不同还是相似?"
),
dict
(
role
=
"BOT"
,
prompt
=
"不同。"
)
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"“{sentence1}”与“{sentence2}”不同还是相似?"
),
dict
(
role
=
"BOT"
,
prompt
=
"相似"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
afqmc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
afqmc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'afqmc-dev'
,
path
=
'json'
,
data_files
=
'./data/CLUE/AFQMC/dev.json'
,
split
=
'train'
,
reader_cfg
=
afqmc_reader_cfg
,
infer_cfg
=
afqmc_infer_cfg
,
eval_cfg
=
afqmc_eval_cfg
),
]
configs/datasets/CLUE_cmnli/CLUE_cmnli_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.CLUE_cmnli_gen_316313
import
cmnli_datasets
# noqa: F401, F403
configs/datasets/CLUE_ocnli/CLUE_ocnli_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.CLUE_ocnli_gen_7c44b0
import
ocnli_datasets
# noqa: F401, F403
configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_chid_ppl_b6cd88
import
chid_datasets
# noqa: F401, F403
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_cluewsc_gen_276956
import
cluewsc_datasets
# noqa: F401, F403
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen_276956.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CluewscDataset_V2
cluewsc_reader_cfg
=
dict
(
input_columns
=
[
"span1"
,
"span2"
,
"text"
,
"new_text"
],
output_column
=
"label"
,
)
cluewsc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{text}
\n
此处,“{span2}”是否指代“{span1}“?
\n
A. 是
\n
B. 否
\n
请从”A“,”B“中进行选择。
\n
答:"
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
cluewsc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
cluewsc_datasets
=
[
dict
(
abbr
=
"cluewsc-dev"
,
type
=
CluewscDataset_V2
,
path
=
"./data/FewCLUE/cluewsc/dev_few_all.json"
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
,
),
dict
(
abbr
=
"cluewsc-test"
,
type
=
CluewscDataset_V2
,
path
=
"./data/FewCLUE/cluewsc/test_public.json"
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
,
),
]
configs/datasets/FewCLUE_csl/FewCLUE_csl_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_csl_gen_1b0c02
import
csl_datasets
# noqa: F401, F403
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_8eee08.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CslDataset
csl_reader_cfg
=
dict
(
input_columns
=
[
"abst"
,
"keywords"
],
output_column
=
'label'
)
csl_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
"摘要:{abst}"
,
1
:
"摘要:{abst}
\n
关键词:{keywords}"
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
csl_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
csl_datasets
=
[
dict
(
type
=
CslDataset
,
path
=
'json'
,
abbr
=
'csl_dev'
,
data_files
=
'./data/FewCLUE/csl/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
),
dict
(
type
=
CslDataset
,
path
=
'json'
,
abbr
=
'csl_test'
,
data_files
=
'./data/FewCLUE/csl/test_public.json'
,
split
=
'train'
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
)
]
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_gen_bef37f.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
cmnliDataset_V2
ocnli_fc_reader_cfg
=
dict
(
input_columns
=
[
"sentence1"
,
"sentence2"
],
output_column
=
"label"
,
test_split
=
"train"
)
ocnli_fc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}
\n
A. 对
\n
B. 错
\n
C. 可能
\n
请从“A”,“B”,“C”中进行选择。
\n
答:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
ocnli_fc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
ocnli_fc_datasets
=
[
dict
(
abbr
=
"ocnli_fc-dev"
,
type
=
cmnliDataset_V2
,
# ocnli_fc share the same format with cmnli
path
=
"./data/FewCLUE/ocnli/dev_few_all.json"
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
,
),
dict
(
abbr
=
"ocnli_fc-test"
,
type
=
cmnliDataset_V2
,
# ocnli_fc share the same format with cmnli
path
=
"./data/FewCLUE/ocnli/test_public.json"
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
,
),
]
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_33cc73.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
TNewsDataset
tnews_reader_cfg
=
dict
(
input_columns
=
'sentence'
,
output_column
=
'label_desc2'
)
tnews_labels
=
[
'农业新闻'
,
'旅游新闻'
,
'游戏新闻'
,
'科技类别公司新闻'
,
'体育类别新闻'
,
'初升高教育新闻'
,
'娱乐圈新闻'
,
'投资资讯'
,
'军事类别常识'
,
'车辆新闻'
,
'楼市新闻'
,
'环球不含中国类别新闻'
,
'书籍文化历史类别新闻'
,
'故事类别新闻'
,
'股票市场类别新闻'
]
tnews_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
lb
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'以下内容属于什么新闻:{sentence}。'
),
dict
(
role
=
'BOT'
,
prompt
=
lb
)
])
for
lb
in
tnews_labels
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
tnews_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
tnews_datasets
=
[
dict
(
type
=
TNewsDataset
,
path
=
'json'
,
abbr
=
'tnews-dev'
,
data_files
=
'./data/FewCLUE/tnews/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
),
dict
(
type
=
TNewsDataset
,
path
=
'json'
,
abbr
=
'tnews-test'
,
data_files
=
'./data/FewCLUE/tnews/test_public.json'
,
split
=
'train'
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
)
]
configs/datasets/SuperGLUE_AX_b/SuperGLUE_AX_b_ppl_4bd960.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
AX_b_reader_cfg
=
dict
(
input_columns
=
[
"sentence1"
,
"sentence2"
],
output_column
=
"label"
,
test_split
=
"train"
)
AX_b_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{sentence1}
\n
{sentence2}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes"
),
]),
"not_entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{sentence1}
\n
{sentence2}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"No"
),
])
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
AX_b_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
AX_b_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"AX_b"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/AX-b/AX-b.jsonl"
,
split
=
"train"
,
reader_cfg
=
AX_b_reader_cfg
,
infer_cfg
=
AX_b_infer_cfg
,
eval_cfg
=
AX_b_eval_cfg
,
)
]
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_f80fb0.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDataset
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"passage"
],
output_column
=
"answer"
,
test_split
=
"train"
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{passage}
\n
Question: {question}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"No"
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{passage}
\n
Question: {question}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes"
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
type
=
BoolQDataset
,
abbr
=
"BoolQ"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/BoolQ/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_012063.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
CB_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label'
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'{premise}?contradiction, {hypothesis}'
,
'entailment'
:
'{premise}?entailment, {hypothesis}'
,
'neutral'
:
'{premise}?neutral, {hypothesis}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
CB_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'CB'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/SuperGLUE/CB/val.jsonl'
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
)
]
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_COPA_gen_6d5e67
import
COPA_datasets
# noqa: F401, F403
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment