Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
cbe9fe2c
Commit
cbe9fe2c
authored
Jul 05, 2023
by
Ezra-Yu
Committed by
gaotong
Jul 05, 2023
Browse files
Add Release Contraibution
parent
36f11110
Changes
65
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
754 additions
and
0 deletions
+754
-0
.pre-commit-config.yaml
.pre-commit-config.yaml
+69
-0
configs/datasets/ARC_e/ARC_e_ppl.py
configs/datasets/ARC_e/ARC_e_ppl.py
+4
-0
configs/datasets/CLUE_C3/CLUE_C3_ppl_588820.py
configs/datasets/CLUE_C3/CLUE_C3_ppl_588820.py
+37
-0
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_72a8d5.py
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_72a8d5.py
+34
-0
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_d7096f.py
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_d7096f.py
+33
-0
configs/datasets/CLUE_DRCD/CLUE_DRCD_gen_03b96b.py
configs/datasets/CLUE_DRCD/CLUE_DRCD_gen_03b96b.py
+34
-0
configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen_305431.py
configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen_305431.py
+50
-0
configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_b6cd88.py
configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_b6cd88.py
+45
-0
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_fc45f0.py
...gs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_fc45f0.py
+58
-0
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_784b9e.py
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_784b9e.py
+48
-0
configs/datasets/GaokaoBench/GaokaoBench_gen.py
configs/datasets/GaokaoBench/GaokaoBench_gen.py
+4
-0
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen_7a5dee.py
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen_7a5dee.py
+42
-0
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_8d9bf9.py
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_8d9bf9.py
+53
-0
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl.py
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl.py
+4
-0
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen.py
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen.py
+4
-0
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_6d5e67.py
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_6d5e67.py
+43
-0
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_26c9dc.py
...atasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_26c9dc.py
+42
-0
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_gen_c39367.py
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_gen_c39367.py
+46
-0
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_ppl_4118db.py
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_ppl_4118db.py
+55
-0
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_ppl_d316eb.py
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_ppl_d316eb.py
+49
-0
No files found.
.pre-commit-config.yaml
0 → 100644
View file @
cbe9fe2c
exclude
:
|
(?x)^(
tests/data/|
opencompass/models/internal/|
opencompass/utils/internal/|
configs/
)
repos
:
-
repo
:
https://github.com/PyCQA/flake8
rev
:
5.0.4
hooks
:
-
id
:
flake8
-
repo
:
https://github.com/PyCQA/isort
rev
:
5.11.5
hooks
:
-
id
:
isort
-
repo
:
https://github.com/pre-commit/mirrors-yapf
rev
:
v0.32.0
hooks
:
-
id
:
yapf
-
repo
:
https://github.com/codespell-project/codespell
rev
:
v2.2.1
hooks
:
-
id
:
codespell
-
repo
:
https://github.com/pre-commit/pre-commit-hooks
rev
:
v4.3.0
hooks
:
-
id
:
trailing-whitespace
exclude
:
|
(?x)^(
dicts/|
projects/.*?/dicts/
)
-
id
:
check-yaml
-
id
:
end-of-file-fixer
exclude
:
|
(?x)^(
dicts/|
projects/.*?/dicts/
)
-
id
:
requirements-txt-fixer
-
id
:
double-quote-string-fixer
-
id
:
check-merge-conflict
-
id
:
fix-encoding-pragma
args
:
[
"
--remove"
]
-
id
:
mixed-line-ending
args
:
[
"
--fix=lf"
]
-
id
:
mixed-line-ending
args
:
[
"
--fix=lf"
]
-
repo
:
https://github.com/executablebooks/mdformat
rev
:
0.7.9
hooks
:
-
id
:
mdformat
args
:
[
"
--number"
,
"
--table-width"
,
"
200"
]
additional_dependencies
:
-
mdformat-openmmlab
-
mdformat_frontmatter
-
linkify-it-py
-
repo
:
https://github.com/myint/docformatter
rev
:
v1.3.1
hooks
:
-
id
:
docformatter
args
:
[
"
--in-place"
,
"
--wrap-descriptions"
,
"
79"
]
# - repo: https://github.com/open-mmlab/pre-commit-hooks
# rev: v0.2.0 # Use the ref you want to point at
# hooks:
# - id: check-algo-readme
# - id: check-copyright
# args: ["mmocr", "tests", "tools"] # these directories will be checked
configs/datasets/ARC_e/ARC_e_ppl.py
0 → 100644
View file @
cbe9fe2c
from
mmengine.config
import
read_base
with
read_base
():
from
.ARC_e_ppl_f86898
import
ARC_e_datasets
# noqa: F401, F403
configs/datasets/CLUE_C3/CLUE_C3_ppl_588820.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
C3Dataset
C3_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'content'
,
'choice0'
,
'choice1'
,
'choice2'
,
'choice3'
,
'choices'
],
output_column
=
'label'
)
C3_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
i
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"文章:{content}
\n
问题:{question}"
),
dict
(
role
=
"BOT"
,
prompt
=
f
"答案:{{choice
{
i
}
}}"
)
])
for
i
in
range
(
4
)
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
C3_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
C3_datasets
=
[
dict
(
type
=
C3Dataset
,
abbr
=
'C3'
,
path
=
'./data/CLUE/C3/dev_0.json'
,
reader_cfg
=
C3_reader_cfg
,
infer_cfg
=
C3_infer_cfg
,
eval_cfg
=
C3_eval_cfg
)
]
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_72a8d5.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
CMRCDataset
CMRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'context'
],
output_column
=
'answers'
)
CMRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"文章:{context}
\n
根据上文,回答如下问题:
\n
{question}
\n
答:"
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
CMRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_role
=
"BOT"
,
)
CMRC_datasets
=
[
dict
(
type
=
CMRCDataset
,
abbr
=
'CMRC_dev'
,
path
=
'./data/CLUE/CMRC/dev.json'
,
reader_cfg
=
CMRC_reader_cfg
,
infer_cfg
=
CMRC_infer_cfg
,
eval_cfg
=
CMRC_eval_cfg
),
]
configs/datasets/CLUE_CMRC/CLUE_CMRC_gen_d7096f.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
CMRCDataset
CMRC_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'context'
],
output_column
=
'answers'
)
CMRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"文章:{context}
\n
根据上文,回答如下问题:{question}"
),
dict
(
role
=
"BOT"
,
prompt
=
"答:"
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
CMRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_role
=
"BOT"
,
)
CMRC_datasets
=
[
dict
(
type
=
CMRCDataset
,
abbr
=
'CMRC_dev'
,
path
=
'./data/CLUE/CMRC/dev.json'
,
reader_cfg
=
CMRC_reader_cfg
,
infer_cfg
=
CMRC_infer_cfg
,
eval_cfg
=
CMRC_eval_cfg
),
]
configs/datasets/CLUE_DRCD/CLUE_DRCD_gen_03b96b.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
from
opencompass.datasets
import
DRCDDataset
DRCD_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'context'
],
output_column
=
'answers'
)
DRCD_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"文章:{context}
\n
根据上文,回答如下问题:
\n
{question}
\n
答:"
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
DRCD_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_role
=
"BOT"
,
)
DRCD_datasets
=
[
dict
(
type
=
DRCDDataset
,
abbr
=
'DRCD_dev'
,
path
=
'./data/CLUE/DRCD/dev.json'
,
reader_cfg
=
DRCD_reader_cfg
,
infer_cfg
=
DRCD_infer_cfg
,
eval_cfg
=
DRCD_eval_cfg
),
]
configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen_305431.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AFQMCDataset_V2
bustm_reader_cfg
=
dict
(
input_columns
=
[
"sentence1"
,
"sentence2"
],
output_column
=
"label"
,
test_split
=
"train"
)
bustm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请判断语句一和语句二说的是否是一个意思?
\n
A. 无关
\n
B. 相关
\n
请从“A”,“B”中进行选择。
\n
答:"
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
bustm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
bustm_datasets
=
[
dict
(
abbr
=
"bustm-dev"
,
type
=
AFQMCDataset_V2
,
# bustm share the same format with AFQMC
path
=
"./data/FewCLUE/bustm/dev_few_all.json"
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
,
),
dict
(
abbr
=
"bustm-test"
,
type
=
AFQMCDataset_V2
,
# bustm share the same format with AFQMC
path
=
"./data/FewCLUE/bustm/test_public.json"
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
,
),
]
configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_b6cd88.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CHIDDataset
chid_reader_cfg
=
dict
(
input_columns
=
[
f
'content
{
i
}
'
for
i
in
range
(
7
)],
output_column
=
'answer'
)
chid_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
i
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
f
"以下句子是否通顺?
\n
{{content
{
i
}
}}"
),
dict
(
role
=
"BOT"
,
prompt
=
"这个句子是通顺的。"
),
],
)
for
i
in
range
(
7
)
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
chid_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
)
chid_datasets
=
[
dict
(
type
=
CHIDDataset
,
path
=
'json'
,
abbr
=
'chid-dev'
,
data_files
=
'./data/FewCLUE/chid/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
),
dict
(
type
=
CHIDDataset
,
path
=
'json'
,
abbr
=
'chid-test'
,
data_files
=
'./data/FewCLUE/chid/test_public.json'
,
split
=
'train'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
),
]
configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_fc45f0.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CluewscDataset
cluewsc_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
,
'new_text'
],
output_column
=
'answer'
)
cluewsc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{text}
\n
Here, is the pronoun
\"
{span2}
\"
used to mean
\"
{span1}
\"
?"
),
dict
(
role
=
"BOT"
,
prompt
=
"No."
)
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{text}
\n
Here, is the pronoun
\"
{span2}
\"
used to mean
\"
{span1}
\"
?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes."
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
cluewsc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
cluewsc_datasets
=
[
dict
(
type
=
CluewscDataset
,
path
=
'json'
,
abbr
=
'cluewsc-dev'
,
data_files
=
'./data/FewCLUE/cluewsc/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
),
dict
(
type
=
CluewscDataset
,
path
=
'json'
,
abbr
=
'cluewsc-test'
,
data_files
=
'./data/FewCLUE/cluewsc/test_public.json'
,
split
=
'train'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
),
]
configs/datasets/FewCLUE_tnews/FewCLUE_tnews_ppl_784b9e.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
TNewsDataset
tnews_reader_cfg
=
dict
(
input_columns
=
'sentence'
,
output_column
=
'label_desc2'
)
tnews_labels
=
[
'农业新闻'
,
'旅游新闻'
,
'游戏新闻'
,
'科技类别公司新闻'
,
'体育类别新闻'
,
'初升高教育新闻'
,
'娱乐圈新闻'
,
'投资资讯'
,
'军事类别常识'
,
'车辆新闻'
,
'楼市新闻'
,
'环球不含中国类别新闻'
,
'书籍文化历史类别新闻'
,
'故事类别新闻'
,
'股票市场类别新闻'
]
tnews_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
lb
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{sentence}
\n
上述内容属于什么新闻?'
),
dict
(
role
=
'BOT'
,
prompt
=
lb
)
])
for
lb
in
tnews_labels
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
tnews_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
tnews_datasets
=
[
dict
(
type
=
TNewsDataset
,
path
=
'json'
,
abbr
=
'tnews-dev'
,
data_files
=
'./data/FewCLUE/tnews/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
),
dict
(
type
=
TNewsDataset
,
path
=
'json'
,
abbr
=
'tnews-test'
,
data_files
=
'./data/FewCLUE/tnews/test_public.json'
,
split
=
'train'
,
reader_cfg
=
tnews_reader_cfg
,
infer_cfg
=
tnews_infer_cfg
,
eval_cfg
=
tnews_eval_cfg
)
]
configs/datasets/GaokaoBench/GaokaoBench_gen.py
0 → 100644
View file @
cbe9fe2c
from
mmengine.config
import
read_base
with
read_base
():
from
.GaokaoBench_gen_aed980
import
GaokaoBench_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_gen_7a5dee.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AXDataset_V2
AX_g_reader_cfg
=
dict
(
input_columns
=
[
"hypothesis"
,
"premise"
],
output_column
=
"label"
,
)
AX_g_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?
\n
A. Yes
\n
B. No
\n
Answer:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
AX_g_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
AX_g_datasets
=
[
dict
(
abbr
=
"AX_g"
,
type
=
AXDataset_V2
,
path
=
"./data/SuperGLUE/AX-g/AX-g.jsonl"
,
reader_cfg
=
AX_g_reader_cfg
,
infer_cfg
=
AX_g_infer_cfg
,
eval_cfg
=
AX_g_eval_cfg
,
)
]
configs/datasets/SuperGLUE_AX_g/SuperGLUE_AX_g_ppl_8d9bf9.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
AX_g_reader_cfg
=
dict
(
input_columns
=
[
"hypothesis"
,
"premise"
],
output_column
=
"label"
,
test_split
=
"train"
)
AX_g_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
"entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"Yes"
),
]),
"not_entailment"
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
{hypothesis}
\n
Is the sentence below entailed by the sentence above?"
),
dict
(
role
=
"BOT"
,
prompt
=
"No"
),
])
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
AX_g_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
AX_g_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
"AX_g"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/AX-g/AX-g.jsonl"
,
split
=
"train"
,
reader_cfg
=
AX_g_reader_cfg
,
infer_cfg
=
AX_g_infer_cfg
,
eval_cfg
=
AX_g_eval_cfg
,
)
]
configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl.py
0 → 100644
View file @
cbe9fe2c
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_BoolQ_ppl_f80fb0
import
BoolQ_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_CB/SuperGLUE_CB_gen.py
0 → 100644
View file @
cbe9fe2c
from
mmengine.config
import
read_base
with
read_base
():
from
.SuperGLUE_CB_gen_bb97e1
import
CB_datasets
# noqa: F401, F403
configs/datasets/SuperGLUE_COPA/SuperGLUE_COPA_gen_6d5e67.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
COPADataset_V2
COPA_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"premise"
,
"choice1"
,
"choice2"
],
output_column
=
"label"
,
)
COPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{premise}
\n
Question: Which may be the {question}?
\n
A. {choice1}
\n
B. {choice2}
\n
Answer:"
),
],
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
COPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
COPA_datasets
=
[
dict
(
abbr
=
"COPA"
,
type
=
COPADataset_V2
,
path
=
"./data/SuperGLUE/COPA/val.jsonl"
,
reader_cfg
=
COPA_reader_cfg
,
infer_cfg
=
COPA_infer_cfg
,
eval_cfg
=
COPA_eval_cfg
,
)
]
configs/datasets/SuperGLUE_MultiRC/SuperGLUE_MultiRC_gen_26c9dc.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
MultiRCDataset_V2
MultiRC_reader_cfg
=
dict
(
input_columns
=
[
"question"
,
"text"
,
"answer"
],
output_column
=
"label"
,
)
MultiRC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{text}
\n
Question: {question}
\n
Answer: {answer}
\n
Is it true?
\n
A. Yes
\n
B. No
\n
Answer:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
MultiRC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
MultiRC_datasets
=
[
dict
(
abbr
=
"MultiRC"
,
type
=
MultiRCDataset_V2
,
path
=
"./data/SuperGLUE/MultiRC/val.jsonl"
,
reader_cfg
=
MultiRC_reader_cfg
,
infer_cfg
=
MultiRC_infer_cfg
,
eval_cfg
=
MultiRC_eval_cfg
,
)
]
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_gen_c39367.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WiCDataset_V2
WiC_reader_cfg
=
dict
(
input_columns
=
[
"word"
,
"sentence1"
,
"sentence2"
,
],
output_column
=
"label"
,
)
WiC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Sentence 1: {sentence1}
\n
Sentence 2: {sentence2}
\n
Are '{word}' in the above two sentenses the same?
\n
A. Yes
\n
B. No
\n
Answer:"
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
WiC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
WiC_datasets
=
[
dict
(
abbr
=
"WiC"
,
type
=
WiCDataset_V2
,
path
=
"./data/SuperGLUE/WiC/val.jsonl"
,
reader_cfg
=
WiC_reader_cfg
,
infer_cfg
=
WiC_infer_cfg
,
eval_cfg
=
WiC_eval_cfg
,
)
]
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_ppl_4118db.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WiCDataset
WiC_reader_cfg
=
dict
(
input_columns
=
[
"word"
,
"sentence1"
,
"sentence2"
,
],
output_column
=
"answer"
,
test_split
=
"train"
)
WiC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Sentence 1: {sentence1}
\n
Sentence 2: {sentence2}
\n
'{word}' in the above two sentenses are different."
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Sentence 1: {sentence1}
\n
Sentence 2: {sentence2}
\n
'{word}' in the above two sentenses are the same."
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
WiC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
WiC_datasets
=
[
dict
(
type
=
WiCDataset
,
abbr
=
"WiC"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/WiC/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
WiC_reader_cfg
,
infer_cfg
=
WiC_infer_cfg
,
eval_cfg
=
WiC_eval_cfg
,
)
]
configs/datasets/SuperGLUE_WiC/SuperGLUE_WiC_ppl_d316eb.py
0 → 100644
View file @
cbe9fe2c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
WiCDataset
WiC_reader_cfg
=
dict
(
input_columns
=
[
"word"
,
"sentence1"
,
"sentence2"
,
],
output_column
=
"answer"
,
test_split
=
"train"
)
WiC_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{word} in {sentence1} and {sentence2} is different."
),
]),
1
:
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{word} in {sentence1} and {sentence2} is same."
),
]),
},
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
),
)
WiC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
WiC_datasets
=
[
dict
(
type
=
WiCDataset
,
abbr
=
"WiC"
,
path
=
"json"
,
data_files
=
"./data/SuperGLUE/WiC/val.jsonl"
,
split
=
"train"
,
reader_cfg
=
WiC_reader_cfg
,
infer_cfg
=
WiC_infer_cfg
,
eval_cfg
=
WiC_eval_cfg
,
)
]
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment