Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
c94cc943
Commit
c94cc943
authored
Jul 05, 2023
by
Leymore
Committed by
gaotong
Jul 05, 2023
Browse files
Add release contribution
parent
e6b5bdcb
Changes
109
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
639 additions
and
0 deletions
+639
-0
configs/datasets/ARC_c/ARC_c_gen.py
configs/datasets/ARC_c/ARC_c_gen.py
+4
-0
configs/datasets/ARC_c/ARC_c_gen_3f3039.py
configs/datasets/ARC_c/ARC_c_gen_3f3039.py
+42
-0
configs/datasets/ARC_e/ARC_e_ppl_f86898.py
configs/datasets/ARC_e/ARC_e_ppl_f86898.py
+53
-0
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_220a83.py
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_220a83.py
+27
-0
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_c83c36.py
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_c83c36.py
+50
-0
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl.py
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl.py
+4
-0
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl_991e1b.py
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl_991e1b.py
+52
-0
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl_b78ad4.py
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl_b78ad4.py
+36
-0
configs/datasets/CLUE_ocnli/CLUE_ocnli_gen_01899f.py
configs/datasets/CLUE_ocnli/CLUE_ocnli_gen_01899f.py
+43
-0
configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_4f864a.py
configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_4f864a.py
+43
-0
configs/datasets/FewCLUE_chid/FewCLUE_chid_gen.py
configs/datasets/FewCLUE_chid/FewCLUE_chid_gen.py
+4
-0
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl.py
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl.py
+4
-0
configs/datasets/FewCLUE_csl/FewCLUE_csl_gen_1b0c02.py
configs/datasets/FewCLUE_csl/FewCLUE_csl_gen_1b0c02.py
+50
-0
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_gen_d6d06d.py
...gs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_gen_d6d06d.py
+48
-0
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_d3c387.py
...gs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_d3c387.py
+49
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen.py
+4
-0
configs/datasets/PJExam/PJExam_gen_785c37.py
configs/datasets/PJExam/PJExam_gen_785c37.py
+54
-0
configs/datasets/SuperGLUE_AX_b/SuperGLUE_AX_b_ppl_a65d62.py
configs/datasets/SuperGLUE_AX_b/SuperGLUE_AX_b_ppl_a65d62.py
+34
-0
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl.py
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl.py
+4
-0
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_d489ee.py
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_d489ee.py
+34
-0
No files found.
configs/datasets/ARC_c/ARC_c_gen.py
0 → 100644
View file @
c94cc943
from
mmengine.config
import
read_base
with
read_base
():
from
.ARC_c_gen_3f3039
import
ARC_c_datasets
# noqa: F401, F403
configs/datasets/ARC_c/ARC_c_gen_3f3039.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
ARCDataset
ARC_c_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"textA"
,
"textB"
,
"textC"
,
"textD"
],
output_column
=
"answerKey"
)
ARC_c_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Question: {question}
\n
A. {textA}
\n
B. {textB}
\n
C. {textC}
\n
D. {textD}
\n
Answer:"
)
],
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
ARC_c_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
ARC_c_datasets
=
[
dict
(
abbr
=
"ARC-c"
,
type
=
ARCDataset
,
path
=
"./data/ARC/ARC-c/ARC-Challenge-Dev.jsonl"
,
reader_cfg
=
ARC_c_reader_cfg
,
infer_cfg
=
ARC_c_infer_cfg
,
eval_cfg
=
ARC_c_eval_cfg
,
)
]
configs/datasets/ARC_e/ARC_e_ppl_f86898.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
ARCDataset
ARC_e_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'textA'
,
'textB'
,
'textC'
,
'textD'
],
output_column
=
'answerKey'
)
ARC_e_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"A"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Question: {question}
\n
Answer: "
),
dict
(
role
=
"BOT"
,
prompt
=
"{textA}"
)
],
),
"B"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Question: {question}
\n
Answer: "
),
dict
(
role
=
"BOT"
,
prompt
=
"{textB}"
)
],
),
"C"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Question: {question}
\n
Answer: "
),
dict
(
role
=
"BOT"
,
prompt
=
"{textC}"
)
],
),
"D"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Question: {question}
\n
Answer: "
),
dict
(
role
=
"BOT"
,
prompt
=
"{textD}"
)
],
),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ARC_e_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
ARC_e_datasets
=
[
dict
(
type
=
ARCDataset
,
abbr
=
'ARC-e'
,
path
=
'./data/ARC/ARC-e/ARC-Easy-Dev.jsonl'
,
reader_cfg
=
ARC_e_reader_cfg
,
infer_cfg
=
ARC_e_infer_cfg
,
eval_cfg
=
ARC_e_eval_cfg
)
]
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_220a83.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
CMRCDataset
CMRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'context'
],
output_column
=
'answers'
)
CMRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"文章:{context}
\n
根据上文,回答如下问题: {question}
\n
答:"
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
CMRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
)
CMRC_datasets
=
[
dict
(
type
=
CMRCDataset
,
abbr
=
'CMRC_dev'
,
path
=
'./data/CLUE/CMRC/dev.json'
,
reader_cfg
=
CMRC_reader_cfg
,
infer_cfg
=
CMRC_infer_cfg
,
eval_cfg
=
CMRC_eval_cfg
),
]
configs/datasets/CLUE_afqmc/CLUE_afqmc_ppl_c83c36.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
afqmc_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
afqmc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?"
),
dict
(
role
=
"BOT"
,
prompt
=
"不完全一致"
)
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
语句一与语句二是关于蚂蚁金融产品的疑问,两者所询问的内容是否完全一致?"
),
dict
(
role
=
"BOT"
,
prompt
=
"完全一致"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
afqmc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
afqmc_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'afqmc-dev'
,
path
=
'json'
,
data_files
=
'./data/CLUE/AFQMC/dev.json'
,
split
=
'train'
,
reader_cfg
=
afqmc_reader_cfg
,
infer_cfg
=
afqmc_infer_cfg
,
eval_cfg
=
afqmc_eval_cfg
),
]
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl.py
0 → 100644
View file @
c94cc943
from
mmengine.config
import
read_base
with
read_base
():
from
.CLUE_cmnli_ppl_1c652a
import
cmnli_datasets
# noqa: F401, F403
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl_991e1b.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
cmnli_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
cmnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"错"
)
]),
'entailment'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?"
),
dict
(
role
=
"BOT"
,
prompt
=
"对"
)
]),
'neutral'
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"如果{sentence1}为真,那么{sentence2}也为真吗?"
),
dict
(
role
=
"BOT"
,
prompt
=
"可能"
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
cmnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
cmnli_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'cmnli'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/CLUE/cmnli/cmnli_public/dev.json'
,
reader_cfg
=
cmnli_reader_cfg
,
infer_cfg
=
cmnli_infer_cfg
,
eval_cfg
=
cmnli_eval_cfg
)
]
configs/datasets/CLUE_cmnli/CLUE_cmnli_ppl_b78ad4.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
cmnli_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
cmnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:错'
,
'entailment'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:对'
,
'neutral'
:
'如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
cmnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
cmnli_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'cmnli'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/CLUE/cmnli/cmnli_public/dev.json'
,
reader_cfg
=
cmnli_reader_cfg
,
infer_cfg
=
cmnli_infer_cfg
,
eval_cfg
=
cmnli_eval_cfg
)
]
configs/datasets/CLUE_ocnli/CLUE_ocnli_gen_01899f.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
cmnliDataset_V2
ocnli_reader_cfg
=
dict
(
input_columns
=
[
"sentence1"
,
"sentence2"
],
output_column
=
"label"
,
)
# TODO: two prompt templates for ocnli
ocnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}
\n
A. 对
\n
B. 错
\n
C. 可能
\n
请从“A”,“B”,“C”中进行选择。
\n
答:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
ocnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
ocnli_datasets
=
[
dict
(
abbr
=
"ocnli"
,
type
=
cmnliDataset_V2
,
# ocnli share the same format with cmnli
path
=
"./data/CLUE/OCNLI/dev.json"
,
reader_cfg
=
ocnli_reader_cfg
,
infer_cfg
=
ocnli_infer_cfg
,
eval_cfg
=
ocnli_eval_cfg
,
)
]
configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_4f864a.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
bustm_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
bustm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
"{sentence1}。
\n
{sentence2}。
\n
两句话说的毫不相关。"
,
1
:
"{sentence1}。
\n
{sentence2}。
\n
两句话说的一个意思。"
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
bustm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
bustm_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'bustm-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'bustm-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/test_public.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
)
]
configs/datasets/FewCLUE_chid/FewCLUE_chid_gen.py
0 → 100644
View file @
c94cc943
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_chid_gen_686c63
import
chid_datasets
# noqa: F401, F403
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl.py
0 → 100644
View file @
c94cc943
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_cluewsc_ppl_2a9e61
import
cluewsc_datasets
# noqa: F401, F403
configs/datasets/FewCLUE_csl/FewCLUE_csl_gen_1b0c02.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CslDataset_V2
csl_reader_cfg
=
dict
(
input_columns
=
[
"abst"
,
"keywords"
],
output_column
=
"label"
,
)
csl_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"摘要:{abst}
\n
关键词:{keywords}
\n
上述关键词出现在学术期刊中是否恰当?
\n
A. 否
\n
B. 是
\n
请从”A“,”B“中进行选择。
\n
答:"
)
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
csl_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
csl_datasets
=
[
dict
(
abbr
=
"csl_dev"
,
type
=
CslDataset_V2
,
path
=
"./data/FewCLUE/csl/dev_few_all.json"
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
,
),
dict
(
abbr
=
"csl_test"
,
type
=
CslDataset_V2
,
path
=
"./data/FewCLUE/csl/test_public.json"
,
reader_cfg
=
csl_reader_cfg
,
infer_cfg
=
csl_infer_cfg
,
eval_cfg
=
csl_eval_cfg
,
),
]
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_gen_d6d06d.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
eprstmtDataset_V2
eprstmt_reader_cfg
=
dict
(
input_columns
=
[
"sentence"
],
output_column
=
"label"
,
test_split
=
"train"
)
eprstmt_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
'内容: "{sentence}"。请对上述内容进行情绪分类。
\n
A. 积极
\n
B. 消极
\n
请从”A“,”B“中进行选择。
\n
答:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
eprstmt_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
eprstmt_datasets
=
[
dict
(
abbr
=
"eprstmt-dev"
,
type
=
eprstmtDataset_V2
,
path
=
"./data/FewCLUE/eprstmt/dev_few_all.json"
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
,
),
dict
(
abbr
=
"eprstmt-test"
,
type
=
eprstmtDataset_V2
,
path
=
"./data/FewCLUE/eprstmt/test_public.json"
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
,
),
]
configs/datasets/FewCLUE_eprstmt/FewCLUE_eprstmt_ppl_d3c387.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
eprstmt_reader_cfg
=
dict
(
input_columns
=
[
'sentence'
],
output_column
=
'label'
,
test_split
=
'train'
)
eprstmt_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'Negative'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'内容: "{sentence}"。情绪分类:'
),
dict
(
role
=
'BOT'
,
prompt
=
'消极。'
)
]),
'Positive'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'内容: "{sentence}"。情绪分类:'
),
dict
(
role
=
'BOT'
,
prompt
=
'积极。'
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
eprstmt_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
eprstmt_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'eprstmt-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/eprstmt/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'eprstmt-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/eprstmt/test_public.json'
,
split
=
'train'
,
reader_cfg
=
eprstmt_reader_cfg
,
infer_cfg
=
eprstmt_infer_cfg
,
eval_cfg
=
eprstmt_eval_cfg
)
]
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_gen.py
0 → 100644
View file @
c94cc943
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_tnews_gen_8d59ba
import
tnews_datasets
# noqa: F401, F403
configs/datasets/PJExam/PJExam_gen_785c37.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
PJExamDataset
,
PJExamEvaluator
PJExam_datasets
=
[]
for
_name
in
[
'gk-2022-v1'
,
'gk-2022-v1-math'
,
'gk-2023-v1'
,
'gk-2023-v1-math'
,
'gk-2023-v2'
,
'gk-2023-v2-math'
,
'zk-2022-v1'
]:
_hint
=
"请你做一道</major>选择题
\n
请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。
\n
例如:【答案】A<eoa>
\n
完整的题目回答的格式如下:
\n
【解析】...<eoe>
\n
【答案】...<eoa>
\n
请你严格按照上述格式作答。
\n
题目如下:
\n
"
_reader_cfg
=
{
"input_columns"
:
[
'question'
],
"output_column"
:
'std_ans'
,
},
_infer_cfg
=
{
"ice_template"
:
{
"type"
:
PromptTemplate
,
"template"
:
{
"round"
:
[{
"role"
:
"HUMAN"
,
"prompt"
:
_hint
+
"{question}"
,
}]
},
"ice_token"
:
"</E>"
},
"retriever"
:
{
"type"
:
ZeroRetriever
},
"inferencer"
:
{
"type"
:
GenInferencer
,
"max_out_len"
:
1024
,
}
}
_eval_cfg
=
{
"evaluator"
:
{
"type"
:
PJExamEvaluator
},
"pred_role"
:
"BOT"
,
"ds_column"
:
"eval_infos"
}
_dataset
=
{
"type"
:
PJExamDataset
,
"abbr"
:
"PJExamDataset-"
+
_name
,
"path"
:
'./data/PJExam'
,
"name"
:
_name
,
"reader_cfg"
:
_reader_cfg
,
"infer_cfg"
:
_infer_cfg
,
"eval_cfg"
:
_eval_cfg
,
}
PJExam_datasets
.
append
(
_dataset
)
del
_name
,
_hint
,
_reader_cfg
,
_infer_cfg
,
_eval_cfg
,
_dataset
configs/datasets/SuperGLUE_AX_b/SuperGLUE_AX_b_ppl_a65d62.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
AX_b_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
AX_b_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'entailment'
:
'{sentence1}?entailment, {sentence2}'
,
'not_entailment'
:
'{sentence1}?not_entailment, {sentence2}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
AX_b_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
AX_b_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'AX_b'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/AX-b/AX-b.jsonl'
,
split
=
'train'
,
reader_cfg
=
AX_b_reader_cfg
,
infer_cfg
=
AX_b_infer_cfg
,
eval_cfg
=
AX_b_eval_cfg
)
]
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl.py
0 → 100644
View file @
c94cc943
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_AX_g_ppl_8d9bf9
import
AX_g_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_d489ee.py
0 → 100644
View file @
c94cc943
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
AX_g_reader_cfg
=
dict
(
input_columns
=
[
'hypothesis'
,
'premise'
],
output_column
=
'label'
,
test_split
=
'train'
)
AX_g_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'entailment'
:
'{premise}?entailment, {hypothesis}'
,
'not_entailment'
:
'{premise}?not_entailment, {hypothesis}'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
AX_g_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
AX_g_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'AX_g'
,
path
=
'json'
,
data_files
=
'./data/SuperGLUE/AX-g/AX-g.jsonl'
,
split
=
'train'
,
reader_cfg
=
AX_g_reader_cfg
,
infer_cfg
=
AX_g_infer_cfg
,
eval_cfg
=
AX_g_eval_cfg
)
]
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment