Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
7d346000
"...composable_kernel_rocm.git" did not exist on "fcbb978828b308d8c367a3eeaebee485a61b548c"
Commit
7d346000
authored
Jul 04, 2023
by
gaotongxiao
Browse files
initial commit
parents
Changes
188
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
572 additions
and
0 deletions
+572
-0
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_f99d7a.py
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_f99d7a.py
+45
-0
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_b59c1f.py
...gs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_b59c1f.py
+41
-0
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_29abd6.py
.../datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_29abd6.py
+60
-0
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_b828fc.py
.../datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_b828fc.py
+44
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_8d59ba.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_8d59ba.py
+74
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
+4
-0
configs/datasets/GaokaoBench/GaokaoBench_gen_aed980.py
configs/datasets/GaokaoBench/GaokaoBench_gen_aed980.py
+0
-0
configs/datasets/GaokaoBench/GaokaoBench_mixed.py
configs/datasets/GaokaoBench/GaokaoBench_mixed.py
+4
-0
configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py
configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py
+0
-0
configs/datasets/PJExam/PJExam_gen.py
configs/datasets/PJExam/PJExam_gen.py
+4
-0
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
+4
-0
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
+4
-0
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_094411.py
...gs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_094411.py
+45
-0
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
+4
-0
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_32adbb.py
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_32adbb.py
+62
-0
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_ddb78c.py
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_ddb78c.py
+49
-0
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
+4
-0
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_ce346a.py
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_ce346a.py
+42
-0
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_29a22c.py
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_29a22c.py
+53
-0
configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_90d5b6.py
.../datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_90d5b6.py
+29
-0
No files found.
configs/datasets/FewCLUE_csl/FewCLUE_csl_ppl_f99d7a.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CslDataset
csl_reader_cfg
=
dict
(
input_columns
=
[
"abst"
,
"keywords"
],
output_column
=
'label'
)
csl_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"摘要:{abst}"
)]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"摘要:{abst}
\n
关键词:{keywords}"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
csl_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
csl_datasets
=
[
dict
(
type
=
CslDataset
,
path
=
'json'
,
abbr
=
'csl_dev'
,
data_files
=
'./data/FewCLUE/csl/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
),
dict
(
type
=
CslDataset
,
path
=
'json'
,
abbr
=
'csl_test'
,
data_files
=
'./data/FewCLUE/csl/test_public.json'
,
split
=
'train'
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
)
]
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_b59c1f.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
eprstmt_reader_cfg
=
dict
(
input_columns
=
[
'sentence'
],
output_column
=
'label'
,
test_split
=
'train'
)
eprstmt_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'Negative'
:
' 内容: "{sentence}"。情绪分类:消极。'
,
'Positive'
:
' 内容: "{sentence}"。情绪分类:积极。'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
eprstmt_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
eprstmt_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'eprstmt-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/eprstmt/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'eprstmt-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/eprstmt/test_public.json'
,
split
=
'train'
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
)
]
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_29abd6.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_fc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
ocnli_fc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"错"
)
]),
'entailment'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"对"
)
]),
'neutral'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"如果{sentence1}为真,那么{sentence2}也为真吗?"
),
dict
(
role
=
"BOT"
,
prompt
=
"可能"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_fc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
ocnli_fc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-dev'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/dev_few_all.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-test'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/test_public.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
)
]
configs/datasets/FewCLUE_ocnli_fc/FewCLUE_ocnli_fc_ppl_b828fc.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_fc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
ocnli_fc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:错'
,
'entailment'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:对'
,
'neutral'
:
'如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_fc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
ocnli_fc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-dev'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/dev_few_all.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'ocnli_fc-test'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/FewCLUE/ocnli/test_public.json'
,
reader_cfg
=
ocnli_fc_reader_cfg
,
infer_cfg
=
ocnli_fc_infer_cfg
,
eval_cfg
=
ocnli_fc_eval_cfg
)
]
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen_8d59ba.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
TNewsDataset_V2
tnews_reader_cfg
=
dict
(
input_columns
=
"sentence"
,
output_column
=
"label_desc2"
,
)
tnews_labels
=
[
"农业新闻"
,
# news_agriculture
"旅游新闻"
,
# news_travel
"游戏新闻"
,
# news_game
"科技类别公司新闻"
,
# news_tech
"体育类别新闻"
,
# news_sports
"初升高教育新闻"
,
# news_edu
"娱乐圈新闻"
,
# news_entertainment
"投资资讯"
,
# news_finance
"军事类别常识"
,
# news_military
"车辆新闻"
,
# news_car
"楼市新闻"
,
# news_house
"环球不含中国类别新闻"
,
# news_world
"书籍文化历史类别新闻"
,
# news_culture
"故事类别新闻"
,
# news_story
"股票市场类别新闻"
,
# news_stock
]
_tnews_options_list_str
=
"
\n
"
.
join
(
f
'
{
chr
(
ord
(
"A"
)
+
i
)
}
.
{
tnews_labels
[
i
]
}
'
for
i
in
range
(
len
(
tnews_labels
)))
_tnews_options_range_str
=
","
.
join
(
f
'“
{
chr
(
ord
(
"A"
)
+
i
)
}
”'
for
i
in
range
(
len
(
tnews_labels
)))
tnews_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
f
"{{sentence}}
\n
请判断上述内容属于什么新闻?
\n
{
_tnews_options_list_str
}
\n
请从
{
_tnews_options_range_str
}
中进行选择。
\n
答:"
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
tnews_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
tnews_datasets
=
[
dict
(
abbr
=
"tnews-dev"
,
type
=
TNewsDataset_V2
,
path
=
"./data/FewCLUE/tnews/dev_few_all.json"
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
,
),
dict
(
abbr
=
"tnews-test"
,
type
=
TNewsDataset_V2
,
path
=
"./data/FewCLUE/tnews/test_public.json"
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
,
),
]
del
_tnews_options_list_str
,
_tnews_options_range_str
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_tnews_ppl_784b9e
import
tnews_datasets
# noqa: F401, F403
configs/datasets/GaokaoBench/GaokaoBench_gen_aed980.py
0 → 100644
View file @
7d346000
This diff is collapsed.
Click to expand it.
configs/datasets/GaokaoBench/GaokaoBench_mixed.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.GaokaoBench_mixed_f2038e
import
GaokaoBench_datasets
# noqa: F401, F403
configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py
0 → 100644
View file @
7d346000
This diff is collapsed.
Click to expand it.
configs/datasets/PJExam/PJExam_gen.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.PJExam_gen_785c37
import
PJExam_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_AX_g_gen_7a5dee
import
AX_g_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_gen.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_BoolQ_gen_8525d1
import
BoolQ_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_094411.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
BoolQDataset
BoolQ_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"passage"
],
output_column
=
"answer"
,
test_split
=
"train"
)
BoolQ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{passage}
\n
Question: {question}"
),
dict
(
role
=
"BOT"
,
prompt
=
"No."
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{passage}
\n
Question: {question}"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes."
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
BoolQ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
BoolQ_datasets
=
[
dict
(
type
=
BoolQDataset
,
abbr
=
"BoolQ"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/BoolQ/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
BoolQ_reader_cfg
,
infer_cfg
=
BoolQ_infer_cfg
,
eval_cfg
=
BoolQ_eval_cfg
,
)
]
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_CB_ppl_32adbb
import
CB_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_ppl_32adbb.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
CB_reader_cfg
=
dict
(
input_columns
=
[
"premise"
,
"hypothesis"
],
output_column
=
"label"
,
)
CB_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"contradiction"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Contradiction"
),
]),
"entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Entailment"
),
]),
"neutral"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
What is the relation between the two sentences?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Neutral"
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
CB_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
CB_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"CB"
,
path
=
"json"
,
split
=
"train"
,
data_files
=
"./data/SuperGLUE/CB/val.jsonl"
,
reader_cfg
=
CB_reader_cfg
,
infer_cfg
=
CB_infer_cfg
,
eval_cfg
=
CB_eval_cfg
,
)
]
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_ppl_ddb78c.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
COPA_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"premise"
,
"choice1"
,
"choice2"
],
output_column
=
"label"
,
test_split
=
"train"
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
Question: What may be the {question}?
\n
Answer:"
),
dict
(
role
=
"BOT"
,
prompt
=
"{choice1}"
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
Question: What may be the {question}?
\n
Answer:"
),
dict
(
role
=
"BOT"
,
prompt
=
"{choice2}"
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
COPA_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"COPA"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/COPA/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_ppl.py
0 → 100644
View file @
7d346000
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_MultiRC_ppl_83a304
import
MultiRC_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_gen_ce346a.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AXDataset_V2
RTE_reader_cfg
=
dict
(
input_columns
=
[
"hypothesis"
,
"premise"
],
output_column
=
"label"
,
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?
\n
A. Yes
\n
B. No
\n
Answer:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
RTE_datasets
=
[
dict
(
abbr
=
"RTE"
,
type
=
AXDataset_V2
,
# rte share the same format with ax
path
=
"./data/SuperGLUE/RTE/val.jsonl"
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
configs/datasets/SuperGLUE_RTE/SuperGLUE_RTE_ppl_29a22c.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
RTE_reader_cfg
=
dict
(
input_columns
=
[
"hypothesis"
,
"premise"
],
output_column
=
"label"
,
test_split
=
"train"
)
RTE_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes"
),
]),
"not_entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"No"
),
])
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
RTE_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
RTE_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"RTE"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/RTE/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
RTE_reader_cfg
,
infer_cfg
=
RTE_infer_cfg
,
eval_cfg
=
RTE_eval_cfg
,
)
]
configs/datasets/SuperGLUE_ReCoRD/SuperGLUE_ReCoRD_gen_90d5b6.py
0 → 100644
View file @
7d346000
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
ReCoRDDataset
ReCoRD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'text'
],
output_column
=
'answers'
)
ReCoRD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"Passage:{text}
\n
Result:{question}
\n
Question: What entity does ____ refer to in the result?Give me the entity name:"
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
ReCoRD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_postprocessor
=
dict
(
type
=
'ReCoRD'
))
ReCoRD_datasets
=
[
dict
(
type
=
ReCoRDDataset
,
abbr
=
'ReCoRD'
,
path
=
'./data/SuperGLUE/ReCoRD/val.jsonl'
,
reader_cfg
=
ReCoRD_reader_cfg
,
infer_cfg
=
ReCoRD_infer_cfg
,
eval_cfg
=
ReCoRD_eval_cfg
)
]
Prev
1
2
3
4
5
6
…
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment