Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
7d346000
"experiments/vscode:/vscode.git/clone" did not exist on "e4e1c76f921471f302dd31d677361668fc08ac78"
Commit
7d346000
authored
Jul 04, 2023
by
gaotongxiao
Browse files
initial commit
parents
Changes
188
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1226 additions
and
0 deletions
+1226
-0
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_f99d7a.py
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_f99d7a.py
+45
-0
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_b59c1f.py
...gs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_b59c1f.py
+41
-0
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_29abd6.py
.../datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_29abd6.py
+60
-0
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_b828fc.py
.../datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_b828fc.py
+44
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_8d59ba.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_8d59ba.py
+74
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
+4
-0
configs/datasets/GaokaoBench/GaokaoBench_gen_aed980.py
configs/datasets/GaokaoBench/GaokaoBench_gen_aed980.py
+301
-0
configs/datasets/GaokaoBench/GaokaoBench_mixed.py
configs/datasets/GaokaoBench/GaokaoBench_mixed.py
+4
-0
configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py
configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py
+353
-0
configs/datasets/PJExam/PJExam_gen.py
configs/datasets/PJExam/PJExam_gen.py
+4
-0
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
+4
-0
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
+4
-0
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_094411.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_094411.py
+45
-0
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
+4
-0
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_32adbb.py
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_32adbb.py
+62
-0
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_ddb78c.py
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_ddb78c.py
+49
-0
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
+4
-0
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_ce346a.py
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_ce346a.py
+42
-0
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_29a22c.py
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_29a22c.py
+53
-0
configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_90d5b6.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_90d5b6.py
+29
-0
No files found.
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_f99d7a.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CslDataset
csl_reader_cfg
=
dict
(
input_columns
=
[
"abst"
,
"keywords"
],
output_column
=
'label'
)
csl_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"摘要:{abst}"
)]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"摘要:{abst}
\n
关键词:{keywords}"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
csl_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
csl_datasets
=
[
dict
(
type
=
CslDataset
,
path
=
'json'
,
abbr
=
'csl_dev'
,
data_files
=
'./data/FewCLUE/csl/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
),
dict
(
type
=
CslDataset
,
path
=
'json'
,
abbr
=
'csl_test'
,
data_files
=
'./data/FewCLUE/csl/test_public.json'
,
split
=
'train'
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
)
]
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_b59c1f.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
eprstmt_reader_cfg
=
dict
(
input_columns
=
[
'sentence'
],
output_column
=
'label'
,
test_split
=
'train'
)
eprstmt_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'Negative'
:
' 内容: "{sentence}"。情绪分类:消极。'
,
'Positive'
:
' 内容: "{sentence}"。情绪分类:积极。'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
eprstmt_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
eprstmt_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'eprstmt-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/eprstmt/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'eprstmt-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/eprstmt/test_public.json'
,
split
=
'train'
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
)
]
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_29abd6.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_fc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
ocnli_fc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"错"
)
]),
'entailment'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"对"
)
]),
'neutral'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"如果{sentence1}为真,那么{sentence2}也为真吗?"
),
dict
(
role
=
"BOT"
,
prompt
=
"可能"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_fc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
ocnli_fc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-dev'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/dev_few_all.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-test'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/test_public.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
)
]
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_b828fc.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_fc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
ocnli_fc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:错'
,
'entailment'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:对'
,
'neutral'
:
'如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_fc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
ocnli_fc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-dev'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/dev_few_all.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-test'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/test_public.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
)
]
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_8d59ba.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
TNewsDataset_V2
tnews_reader_cfg
=
dict
(
input_columns
=
"sentence"
,
output_column
=
"label_desc2"
,
)
tnews_labels
=
[
"农业新闻"
,
# news_agriculture
"旅游新闻"
,
# news_travel
"游戏新闻"
,
# news_game
"科技类别公司新闻"
,
# news_tech
"体育类别新闻"
,
# news_sports
"初升高教育新闻"
,
# news_edu
"娱乐圈新闻"
,
# news_entertainment
"投资资讯"
,
# news_finance
"军事类别常识"
,
# news_military
"车辆新闻"
,
# news_car
"楼市新闻"
,
# news_house
"环球不含中国类别新闻"
,
# news_world
"书籍文化历史类别新闻"
,
# news_culture
"故事类别新闻"
,
# news_story
"股票市场类别新闻"
,
# news_stock
]
_tnews_options_list_str
=
"
\n
"
.
join
(
f
'
{
chr
(
ord
(
"A"
)
+
i
)
}
.
{
tnews_labels
[
i
]
}
'
for
i
in
range
(
len
(
tnews_labels
)))
_tnews_options_range_str
=
","
.
join
(
f
'“
{
chr
(
ord
(
"A"
)
+
i
)
}
”'
for
i
in
range
(
len
(
tnews_labels
)))
tnews_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
f
"{{sentence}}
\n
请判断上述内容属于什么新闻?
\n
{
_tnews_options_list_str
}
\n
请从
{
_tnews_options_range_str
}
中进行选择。
\n
答:"
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
tnews_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
tnews_datasets
=
[
dict
(
abbr
=
"tnews-dev"
,
type
=
TNewsDataset_V2
,
path
=
"./data/FewCLUE/tnews/dev_few_all.json"
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
,
),
dict
(
abbr
=
"tnews-test"
,
type
=
TNewsDataset_V2
,
path
=
"./data/FewCLUE/tnews/test_public.json"
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
,
),
]
del
_tnews_options_list_str
,
_tnews_options_range_str
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_tnews_ppl_784b9e
import
tnews_datasets
# noqa: F401, F403
configs/datasets/GaokaoBench/GaokaoBench_gen_aed980.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
GaokaoBenchDataset
_MCQ_prompts
=
[
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Math_II_MCQs"
,
"prefix_prompt"
:
"请你做一道数学选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Math_I_MCQs"
,
"prefix_prompt"
:
"请你做一道数学选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_History_MCQs"
,
"prefix_prompt"
:
"请你做一道历史选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Biology_MCQs"
,
"prefix_prompt"
:
"请你做一道生物选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Political_Science_MCQs"
,
"prefix_prompt"
:
"请你做一道政治选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"multi_choice"
,
"keyword"
:
"2010-2022_Physics_MCQs"
,
"prefix_prompt"
:
"请你做一道物理选择题。
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出所有符合题意的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】 AB <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】... <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Chemistry_MCQs"
,
"prefix_prompt"
:
"请你做一道化学选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2013_English_MCQs"
,
"prefix_prompt"
:
"请你做一道英语选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_Chinese_Modern_Lit"
,
"prefix_prompt"
:
"请你做一道语文阅读理解题,其中包含三个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_English_Fill_in_Blanks"
,
"prefix_prompt"
:
"请你做一道英语完形填空题,其中包含二十个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"five_out_of_seven"
,
"keyword"
:
"2012-2022_English_Cloze_Test"
,
"prefix_prompt"
:
"请回答下面的问题,将符合题意的五个选项的字母写在【答案】和<eoa>之间,例如“【答案】 A B C D E <eoa>
\n
请严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_Geography_MCQs"
,
"prefix_prompt"
:
"请你做一道地理选择题,其中包含两到三个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_English_Reading_Comp"
,
"prefix_prompt"
:
"请你做一道英语阅读理解题,其中包含三到五个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_Chinese_Lang_and_Usage_MCQs"
,
"prefix_prompt"
:
"请你做一道语文选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
(1)【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
(2)【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。如果不止一道题,请分别作答
\n
题目如下:"
},
]
_FBQ_prompts
=
[{
"type"
:
"cloze"
,
"keyword"
:
"2010-2022_Math_I_Fill-in-the-Blank"
,
"prefix_prompt"
:
"请解答下面的数学填空题
\n
仔细阅读题目,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"cloze"
,
"keyword"
:
"2010-2022_Math_II_Fill-in-the-Blank"
,
"prefix_prompt"
:
"请解答下面的数学填空题
\n
仔细阅读题目,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"cloze"
,
"keyword"
:
"2010-2022_Chinese_Language_Famous_Passages_and_Sentences_Dictation"
,
"prefix_prompt"
:
"请回答下面的语文填空题
\n
请你仔细阅读题目,先找到题目对应的中国名篇,再从名篇中找到合适的句子填写到题目的空白处。请你将思考过程写在【解析】和<eoe>之间,将最终答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"cloze"
,
"keyword"
:
"2014-2022_English_Language_Cloze_Passage"
,
"prefix_prompt"
:
"请回答下面的英语短文填词题
\n
仔细阅读题目,空白处请填入一个适当单词或者括号内单词的正确形式。请你一步步思考,将思考过程写在【解析】和<eoe>之间,将最终答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
}]
_OEQ_prompts
=
[
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Geography_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的地理解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。你的答案请写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chemistry_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的化学解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Math_I_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的数学解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,答案需要有完整的解题步骤。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_History_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的历史解答题
\n
仔细阅读材料和题目,并充分结合你已有的知识,解答其中的问题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Biology_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的生物解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,同一小题的答案用
\t
分隔开。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...
\t
...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...
\t
...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Math_II_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的数学解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,答案需要有完整的解题步骤。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Physics_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的物理解答题,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)【解析】 ...<eoe>
\n
【答案】 ...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Political_Science_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的政治解答题
\n
仔细阅读材料和题目,并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"correction"
,
"keyword"
:
"2012-2022_English_Language_Error_Correction"
,
"prefix_prompt"
:
"请解答下面的英语短文改错题,仔细阅读题目并充分结合你你已有的知识,找出其中10处需要改动的地方。请你一步步思考,把修改后的短文写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:【答案】 ...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
# "prefix_prompt": [
# "请解答下面的英语短文改错题,仔细阅读题目并充分结合你你已有的知识,找出其中10处需要改动的地方。请你一步步思考,把修改后的短文写在【答案】和<eoa>之间。\n完整的题目回答格式如下:【答案】 ...<eoa>\n 请你严格按照上述格式作答。\n题目如下:",
# "请比较下面两篇短文,找到第二篇和第一篇的10处不同,每处不同只涉及一个单词,请将结果写在【答案】和<eoa>之间。例如:【答案】1. 将play改为plays\n 2.增加了the\n ... <eoa>\n 完整的题目回答格式如下:【答案】(1) ... \n (2) ...\n ...(10) ...\n<eoa>\n请你严格按照上述格式作答。\n短文如下:"
# ],
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Ancient_Poetry_Reading"
,
"prefix_prompt"
:
"请解答下面的语文古代诗歌阅读题,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)【答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Practical_Text_Reading"
,
"prefix_prompt"
:
"请解答下面的语文实用类文本阅读,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)[答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Literary_Text_Reading"
,
"prefix_prompt"
:
"请解答下面的语文文学类文本阅读,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)[答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Classical_Chinese_Reading"
,
"prefix_prompt"
:
"请解答下面的语文文言文阅读,仔细阅读题目,前三题是单选题,最后一题要将文言文翻译为现代汉语。请你一步步思考并把最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。翻译题把翻译后的现代汉语句子写在【答案】后面,例如”【答案】今天天气很好 <eoa>”
\n
完整的题目回答格式如下:(1)[答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Language_and_Writing_Skills_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的语文解答题,仔细阅读题目,注意其中可能含有选择题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)【解析】 ...<eoe>
\n
【答案】 ...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
}
]
GaokaoBench_datasets
=
[]
for
_folder
,
_prompts
in
[
(
"Multiple-choice_Questions"
,
_MCQ_prompts
),
(
"Fill-in-the-blank_Questions"
,
_FBQ_prompts
),
(
"Open-ended_Questions"
,
_OEQ_prompts
),
]:
for
_p
in
_prompts
:
_reader_cfg
=
{
"input_columns"
:
[
'question'
],
"output_column"
:
'answer'
,
}
_infer_cfg
=
{
"ice_template"
:
{
"type"
:
PromptTemplate
,
"template"
:
{
"round"
:
[{
"role"
:
"HUMAN"
,
"prompt"
:
_p
[
'prefix_prompt'
]
+
'{question}'
}]
},
"ice_token"
:
"</E>"
},
"retriever"
:
{
"type"
:
ZeroRetriever
},
"inferencer"
:
{
"type"
:
GenInferencer
,
"max_out_len"
:
1024
,
}
}
_eval_cfg
=
{
"evaluator"
:
{
"type"
:
"GaokaoBenchEvaluator"
+
"_"
+
_p
[
'type'
],
},
"pred_role"
:
"BOT"
,
}
_base_path
=
'./data/GAOKAO-BENCH/data'
_dataset
=
{
"type"
:
GaokaoBenchDataset
,
"abbr"
:
"GaokaoBench_"
+
_p
[
'keyword'
],
"path"
:
_base_path
+
'/'
+
_folder
+
'/'
+
_p
[
'keyword'
]
+
".json"
,
"reader_cfg"
:
_reader_cfg
,
"infer_cfg"
:
_infer_cfg
,
"eval_cfg"
:
_eval_cfg
,
}
GaokaoBench_datasets
.
append
(
_dataset
)
_temporary_variables
=
[
k
for
k
in
globals
()
if
k
.
startswith
(
'_'
)]
for
_t
in
_temporary_variables
:
del
globals
()[
_t
]
del
_temporary_variables
,
_t
configs/datasets/GaokaoBench/GaokaoBench_mixed.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.GaokaoBench_mixed_f2038e
import
GaokaoBench_datasets
# noqa: F401, F403
configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
,
PPLInferencer
from
opencompass.datasets
import
GaokaoBenchDataset
_MCQ_prompts
=
[
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Math_II_MCQs"
,
"prefix_prompt"
:
"请你做一道数学选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Math_I_MCQs"
,
"prefix_prompt"
:
"请你做一道数学选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_History_MCQs"
,
"prefix_prompt"
:
"请你做一道历史选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Biology_MCQs"
,
"prefix_prompt"
:
"请你做一道生物选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Political_Science_MCQs"
,
"prefix_prompt"
:
"请你做一道政治选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"multi_choice"
,
"keyword"
:
"2010-2022_Physics_MCQs"
,
"prefix_prompt"
:
"请你做一道物理选择题。
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出所有符合题意的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】 AB <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】... <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2022_Chemistry_MCQs"
,
"prefix_prompt"
:
"请你做一道化学选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"single_choice"
,
"keyword"
:
"2010-2013_English_MCQs"
,
"prefix_prompt"
:
"请你做一道英语选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_Chinese_Modern_Lit"
,
"prefix_prompt"
:
"请你做一道语文阅读理解题,其中包含三个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_English_Fill_in_Blanks"
,
"prefix_prompt"
:
"请你做一道英语完形填空题,其中包含二十个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"five_out_of_seven"
,
"keyword"
:
"2012-2022_English_Cloze_Test"
,
"prefix_prompt"
:
"请回答下面的问题,将符合题意的五个选项的字母写在【答案】和<eoa>之间,例如“【答案】 A B C D E <eoa>
\n
请严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_Geography_MCQs"
,
"prefix_prompt"
:
"请你做一道地理选择题,其中包含两到三个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_English_Reading_Comp"
,
"prefix_prompt"
:
"请你做一道英语阅读理解题,其中包含三到五个小题。
\n
请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:(1)【答案】 A <eoa>
\n
(2)【答案】 B <eoa>
\n
请你严格按照上述格式作答。
\n
"
},
{
"type"
:
"multi_question_choice"
,
"keyword"
:
"2010-2022_Chinese_Lang_and_Usage_MCQs"
,
"prefix_prompt"
:
"请你做一道语文选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】: A <eoa>
\n
完整的题目回答的格式如下:
\n
(1)【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
(2)【解析】 ... <eoe>
\n
【答案】 ... <eoa>
\n
请你严格按照上述格式作答。如果不止一道题,请分别作答
\n
题目如下:"
},
]
_FBQ_prompts
=
[{
"type"
:
"cloze"
,
"keyword"
:
"2010-2022_Math_I_Fill-in-the-Blank"
,
"prefix_prompt"
:
"请解答下面的数学填空题
\n
仔细阅读题目,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"cloze"
,
"keyword"
:
"2010-2022_Math_II_Fill-in-the-Blank"
,
"prefix_prompt"
:
"请解答下面的数学填空题
\n
仔细阅读题目,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"cloze"
,
"keyword"
:
"2010-2022_Chinese_Language_Famous_Passages_and_Sentences_Dictation"
,
"prefix_prompt"
:
"请回答下面的语文填空题
\n
请你仔细阅读题目,先找到题目对应的中国名篇,再从名篇中找到合适的句子填写到题目的空白处。请你将思考过程写在【解析】和<eoe>之间,将最终答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"cloze"
,
"keyword"
:
"2014-2022_English_Language_Cloze_Passage"
,
"prefix_prompt"
:
"请回答下面的英语短文填词题
\n
仔细阅读题目,空白处请填入一个适当单词或者括号内单词的正确形式。请你一步步思考,将思考过程写在【解析】和<eoe>之间,将最终答案写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
}]
_OEQ_prompts
=
[
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Geography_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的地理解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。你的答案请写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chemistry_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的化学解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Math_I_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的数学解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,答案需要有完整的解题步骤。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_History_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的历史解答题
\n
仔细阅读材料和题目,并充分结合你已有的知识,解答其中的问题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Biology_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的生物解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,同一小题的答案用
\t
分隔开。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...
\t
...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...
\t
...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Math_II_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的数学解答题
\n
仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,答案需要有完整的解题步骤。
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Physics_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的物理解答题,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)【解析】 ...<eoe>
\n
【答案】 ...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Political_Science_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的政治解答题
\n
仔细阅读材料和题目,并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间
\n
完整的题目回答格式如下:
\n
(1)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"correction"
,
"keyword"
:
"2012-2022_English_Language_Error_Correction"
,
"prefix_prompt"
:
"请解答下面的英语短文改错题,仔细阅读题目并充分结合你你已有的知识,找出其中10处需要改动的地方。请你一步步思考,把修改后的短文写在【答案】和<eoa>之间。
\n
完整的题目回答格式如下:【答案】 ...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:"
,
# "prefix_prompt": [
# "请解答下面的英语短文改错题,仔细阅读题目并充分结合你你已有的知识,找出其中10处需要改动的地方。请你一步步思考,把修改后的短文写在【答案】和<eoa>之间。\n完整的题目回答格式如下:【答案】 ...<eoa>\n 请你严格按照上述格式作答。\n题目如下:",
# "请比较下面两篇短文,找到第二篇和第一篇的10处不同,每处不同只涉及一个单词,请将结果写在【答案】和<eoa>之间。例如:【答案】1. 将play改为plays\n 2.增加了the\n ... <eoa>\n 完整的题目回答格式如下:【答案】(1) ... \n (2) ...\n ...(10) ...\n<eoa>\n请你严格按照上述格式作答。\n短文如下:"
# ],
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Ancient_Poetry_Reading"
,
"prefix_prompt"
:
"请解答下面的语文古代诗歌阅读题,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)【答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Practical_Text_Reading"
,
"prefix_prompt"
:
"请解答下面的语文实用类文本阅读,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)[答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Literary_Text_Reading"
,
"prefix_prompt"
:
"请解答下面的语文文学类文本阅读,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)[答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Classical_Chinese_Reading"
,
"prefix_prompt"
:
"请解答下面的语文文言文阅读,仔细阅读题目,前三题是单选题,最后一题要将文言文翻译为现代汉语。请你一步步思考并把最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。翻译题把翻译后的现代汉语句子写在【答案】后面,例如”【答案】今天天气很好 <eoa>”
\n
完整的题目回答格式如下:(1)[答案】 ...<eoa>
\n
(2)【答案】...<eoa>
\n
请你严格按照上述格式作答,如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
},
{
"type"
:
"subjective"
,
"keyword"
:
"2010-2022_Chinese_Language_Language_and_Writing_Skills_Open-ended_Questions"
,
"prefix_prompt"
:
"请解答下面的语文解答题,仔细阅读题目,注意其中可能含有选择题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。
\n
完整的题目回答格式如下:(1)【解析】 ...<eoe>
\n
【答案】 ...<eoa>
\n
(2)【解析】 ...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。如果不止一道题,请分别作答。
\n
题目如下:"
,
"comment"
:
""
}
]
GaokaoBench_datasets
=
[]
for
_folder
,
_prompts
in
[
(
"Multiple-choice_Questions"
,
_MCQ_prompts
),
(
"Fill-in-the-blank_Questions"
,
_FBQ_prompts
),
(
"Open-ended_Questions"
,
_OEQ_prompts
),
]:
for
_p
in
_prompts
:
if
_p
[
'type'
]
==
"single_choice"
:
continue
_reader_cfg
=
{
"input_columns"
:
[
'question'
],
"output_column"
:
'answer'
,
}
_infer_cfg
=
{
"ice_template"
:
{
"type"
:
PromptTemplate
,
"template"
:
{
"round"
:
[{
"role"
:
"HUMAN"
,
"prompt"
:
_p
[
'prefix_prompt'
]
+
'{question}'
}]
},
"ice_token"
:
"</E>"
},
"retriever"
:
{
"type"
:
ZeroRetriever
},
"inferencer"
:
{
"type"
:
GenInferencer
,
"max_out_len"
:
1024
,
}
}
_eval_cfg
=
{
"evaluator"
:
{
"type"
:
"GaokaoBenchEvaluator"
+
"_"
+
_p
[
'type'
],
},
"pred_role"
:
"BOT"
,
}
_base_path
=
'./data/GAOKAO-BENCH/data'
_dataset
=
{
"type"
:
GaokaoBenchDataset
,
"abbr"
:
"GaokaoBench_"
+
_p
[
'keyword'
],
"path"
:
_base_path
+
'/'
+
_folder
+
'/'
+
_p
[
'keyword'
]
+
".json"
,
"reader_cfg"
:
_reader_cfg
,
"infer_cfg"
:
_infer_cfg
,
"eval_cfg"
:
_eval_cfg
,
}
GaokaoBench_datasets
.
append
(
_dataset
)
_folder
=
"Multiple-choice_Questions"
for
_p
in
_MCQ_prompts
:
if
_p
[
'type'
]
!=
"single_choice"
:
continue
_reader_cfg
=
{
"input_columns"
:
[
'question'
],
"output_column"
:
'answer'
,
}
_infer_cfg
=
{
"ice_template"
:
{
"type"
:
PromptTemplate
,
"template"
:
{
answer
:
{
"round"
:
[{
"role"
:
"HUMAN"
,
"prompt"
:
_p
[
'prefix_prompt'
]
+
'{question}'
},
{
"role"
:
"BOT"
,
"prompt"
:
f
"【答案】
{
answer
}
<eoa>"
}]
}
for
answer
in
[
'A'
,
'B'
,
'C'
,
'D'
]
},
"ice_token"
:
"</E>"
},
"retriever"
:
{
"type"
:
ZeroRetriever
},
"inferencer"
:
{
"type"
:
PPLInferencer
}
}
_eval_cfg
=
{
"evaluator"
:
{
"type"
:
"GaokaoBenchEvaluator"
+
"_"
+
_p
[
'type'
],
},
"pred_role"
:
"BOT"
,
}
_base_path
=
'./data/GAOKAO-BENCH/data'
_dataset
=
{
"type"
:
GaokaoBenchDataset
,
"abbr"
:
"GaokaoBench_"
+
_p
[
'keyword'
],
"path"
:
_base_path
+
'/'
+
_folder
+
'/'
+
_p
[
'keyword'
]
+
".json"
,
"reader_cfg"
:
_reader_cfg
,
"infer_cfg"
:
_infer_cfg
,
"eval_cfg"
:
_eval_cfg
,
}
GaokaoBench_datasets
.
append
(
_dataset
)
_temporary_variables
=
[
k
for
k
in
globals
()
if
k
.
startswith
(
'_'
)]
for
_t
in
_temporary_variables
:
del
globals
()[
_t
]
del
_temporary_variables
,
_t
configs/datasets/PJExam/PJExam_gen.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.PJExam_gen_785c37
import
PJExam_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_AX_g_gen_7a5dee
import
AX_g_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_BoolQ_gen_8525d1
import
BoolQ_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_094411.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDataset
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"passage"
],
output_column
=
"answer"
,
test_split
=
"train"
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{passage}
\n
Question: {question}"
),
dict
(
role
=
"BOT"
,
prompt
=
"No."
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{passage}
\n
Question: {question}"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes."
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
type
=
BoolQDataset
,
abbr
=
"BoolQ"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/BoolQ/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_CB_ppl_32adbb
import
CB_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_32adbb.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
CB_reader_cfg
=
dict
(
input_columns
=
[
"premise"
,
"hypothesis"
],
output_column
=
"label"
,
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"contradiction"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Contradiction"
),
]),
"entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Entailment"
),
]),
"neutral"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Neutral"
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
CB_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"CB"
,
path
=
"json"
,
split
=
"train"
,
data_files
=
"./data/SuperGLUE/CB/val.jsonl"
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
,
)
]
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_ddb78c.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"premise"
,
"choice1"
,
"choice2"
],
output_column
=
"label"
,
test_split
=
"train"
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
Question: What may be the {question}?
\n
Answer:"
),
dict
(
role
=
"BOT"
,
prompt
=
"{choice1}"
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
Question: What may be the {question}?
\n
Answer:"
),
dict
(
role
=
"BOT"
,
prompt
=
"{choice2}"
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"COPA"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/COPA/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_MultiRC_ppl_83a304
import
MultiRC_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_ce346a.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AXDataset_V2
RTE_reader_cfg
=
dict
(
input_columns
=
[
"hypothesis"
,
"premise"
],
output_column
=
"label"
,
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?
\n
A. Yes
\n
B. No
\n
Answer:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
RTE_datasets
=
[
dict
(
abbr
=
"RTE"
,
type
=
AXDataset_V2
,
# rte share the same format with ax
path
=
"./data/SuperGLUE/RTE/val.jsonl"
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_29a22c.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
RTE_reader_cfg
=
dict
(
input_columns
=
[
"hypothesis"
,
"premise"
],
output_column
=
"label"
,
test_split
=
"train"
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes"
),
]),
"not_entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"No"
),
])
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
RTE_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"RTE"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/RTE/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_90d5b6.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDataset
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"Passage:{text}
\n
Result:{question}
\n
Question: What entity does ____ refer to in the result?Give me the entity name:"
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_postprocessor
=
dict
(
type
=
'ReCoRD'
))
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDataset
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
)
]
Prev
1
2
3
4
5
6
…
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment