Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
opencompass
Commits
be3dfa50
Commit
be3dfa50
authored
Aug 06, 2025
by
jerrrrry
Browse files
Initial commit
parents
Pipeline
#2876
failed with stages
in 0 seconds
Changes
807
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
750 additions
and
0 deletions
+750
-0
opencompass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_98dd6e.py
...pass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_98dd6e.py
+35
-0
opencompass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_ef69e7.py
...pass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_ef69e7.py
+51
-0
opencompass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_fdc6de.py
...pass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_fdc6de.py
+55
-0
opencompass/configs/datasets/ChemBench/ChemBench_gen.py
opencompass/configs/datasets/ChemBench/ChemBench_gen.py
+77
-0
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen.py
...mpass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen.py
+4
-0
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen_634f41.py
...onfigs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen_634f41.py
+53
-0
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl.py
...mpass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl.py
+4
-0
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_4b16c0.py
...onfigs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_4b16c0.py
+65
-0
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_9ef540.py
...onfigs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_9ef540.py
+43
-0
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_e53034.py
...onfigs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_e53034.py
+59
-0
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_gen.py
...compass/configs/datasets/FewCLUE_chid/FewCLUE_chid_gen.py
+4
-0
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_gen_0a29a2.py
.../configs/datasets/FewCLUE_chid/FewCLUE_chid_gen_0a29a2.py
+51
-0
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl.py
...compass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl.py
+4
-0
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_8f2872.py
.../configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_8f2872.py
+45
-0
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_acccb5.py
.../configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_acccb5.py
+39
-0
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen.py
...s/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen.py
+4
-0
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen_c68933.py
...gs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen_c68933.py
+51
-0
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl.py
...s/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl.py
+4
-0
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_12e4e0.py
...gs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_12e4e0.py
+58
-0
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_4284a0.py
...gs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_4284a0.py
+44
-0
No files found.
Too many changes to show.
To preserve performance only
807 of 807+
files are displayed.
Plain diff
Email patch
opencompass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_98dd6e.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
)
# TODO: two prompt templates for ocnli
ocnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:错'
,
'entailment'
:
'阅读文章:{sentence1}
\n
根据上文,回答如下问题: {sentence2}?
\n
答:对'
,
'neutral'
:
'如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
ocnli_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/CLUE/OCNLI/dev.json'
,
reader_cfg
=
ocnli_reader_cfg
,
infer_cfg
=
ocnli_infer_cfg
,
eval_cfg
=
ocnli_eval_cfg
)
]
opencompass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_ef69e7.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
)
# TODO: two prompt templates for ocnli
ocnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'错'
)
]),
'entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'阅读文章:{sentence1}
\n
根据上文,回答如下问题:{sentence2}?'
),
dict
(
role
=
'BOT'
,
prompt
=
'对'
)
]),
'neutral'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'如果{sentence1}为真,那么{sentence2}也为真吗?'
),
dict
(
role
=
'BOT'
,
prompt
=
'可能'
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
ocnli_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/CLUE/OCNLI/dev.json'
,
reader_cfg
=
ocnli_reader_cfg
,
infer_cfg
=
ocnli_infer_cfg
,
eval_cfg
=
ocnli_eval_cfg
)
]
opencompass/configs/datasets/CLUE_ocnli/CLUE_ocnli_ppl_fdc6de.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
ocnli_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
)
# TODO: two prompt templates for ocnli
ocnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'contradiction'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请问这两句话是什么关系?'
),
dict
(
role
=
'BOT'
,
prompt
=
'矛盾'
)
]),
'entailment'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请问这两句话是什么关系?'
),
dict
(
role
=
'BOT'
,
prompt
=
'蕴含'
)
]),
'neutral'
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请问这两句话是什么关系?'
),
dict
(
role
=
'BOT'
,
prompt
=
'无关'
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
ocnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
ocnli_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'ocnli'
,
path
=
'json'
,
split
=
'train'
,
data_files
=
'./data/CLUE/OCNLI/dev.json'
,
reader_cfg
=
ocnli_reader_cfg
,
infer_cfg
=
ocnli_infer_cfg
,
eval_cfg
=
ocnli_eval_cfg
)
]
opencompass/configs/datasets/ChemBench/ChemBench_gen.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
FixKRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
ChemBenchDataset
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
chembench_reader_cfg
=
dict
(
input_columns
=
[
'input'
,
'A'
,
'B'
,
'C'
,
'D'
],
output_column
=
'target'
,
train_split
=
'dev'
)
chembench_all_sets
=
[
'Name_Conversion'
,
'Property_Prediction'
,
'Mol2caption'
,
'Caption2mol'
,
'Product_Prediction'
,
'Retrosynthesis'
,
'Yield_Prediction'
,
'Temperature_Prediction'
,
'Solvent_Prediction'
]
chembench_datasets
=
[]
for
_name
in
chembench_all_sets
:
# _hint = f'There is a single choice question about {_name.replace("_", " ")}. Answer the question by replying A, B, C or D.'
_hint
=
f
'There is a single choice question about chemistry. Answer the question by replying A, B, C or D.'
chembench_infer_cfg
=
dict
(
ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
f
'
{
_hint
}
\n
Question: {{input}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
\n
Answer: '
),
dict
(
role
=
'BOT'
,
prompt
=
'{target}
\n
'
)
]),
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
begin
=
'</E>'
,
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
f
'
{
_hint
}
\n
Question: {{input}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
\n
Answer: '
),
],
),
ice_token
=
'</E>'
,
),
retriever
=
dict
(
type
=
FixKRetriever
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]),
inferencer
=
dict
(
type
=
GenInferencer
),
)
chembench_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
))
chembench_datasets
.
append
(
dict
(
abbr
=
f
'ChemBench_
{
_name
}
'
,
type
=
ChemBenchDataset
,
path
=
'opencompass/ChemBench'
,
name
=
_name
,
reader_cfg
=
chembench_reader_cfg
,
infer_cfg
=
chembench_infer_cfg
,
eval_cfg
=
chembench_eval_cfg
,
))
del
_name
,
_hint
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_bustm_gen_634f41
import
bustm_datasets
# noqa: F401, F403
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_gen_634f41.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
AFQMCDatasetV2
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
bustm_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
bustm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请判断语句一和语句二说的是否是一个意思?
\n
A. 无关
\n
B. 相关
\n
请从“A”,“B”中进行选择。
\n
答:'
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
bustm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
bustm_datasets
=
[
dict
(
abbr
=
'bustm-dev'
,
type
=
AFQMCDatasetV2
,
# bustm share the same format with AFQMC
path
=
'./data/FewCLUE/bustm/dev_few_all.json'
,
local_mode
=
True
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
,
),
dict
(
abbr
=
'bustm-test'
,
type
=
AFQMCDatasetV2
,
# bustm share the same format with AFQMC
path
=
'./data/FewCLUE/bustm/test_public.json'
,
local_mode
=
True
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
,
),
]
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_bustm_ppl_e53034
import
bustm_datasets
# noqa: F401, F403
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_4b16c0.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
bustm_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
bustm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
begin
=
[
dict
(
role
=
'SYSTEM'
,
fallback_role
=
'HUMAN'
,
prompt
=
'请判断以下两句话说的是否是一个意思:'
)
],
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{sentence1},{sentence2}'
),
dict
(
role
=
'BOT'
,
prompt
=
'两句话说的毫不相关。'
)
]),
1
:
dict
(
begin
=
[
dict
(
role
=
'SYSTEM'
,
fallback_role
=
'HUMAN'
,
prompt
=
'请判断以下两句话说的是否是一个意思:'
)
],
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{sentence1},{sentence2}'
),
dict
(
role
=
'BOT'
,
prompt
=
'两句话说是的一个意思。'
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
bustm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
bustm_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'bustm-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'bustm-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/test_public.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
)
]
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_9ef540.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
bustm_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
bustm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'{sentence1}。
\n
{sentence2}。
\n
两句话说的毫不相关。'
,
1
:
'{sentence1}。
\n
{sentence2}。
\n
两句话说的一个意思。'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
bustm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
bustm_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'bustm-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'bustm-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/test_public.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
)
]
opencompass/configs/datasets/FewCLUE_bustm/FewCLUE_bustm_ppl_e53034.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
bustm_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
test_split
=
'train'
)
bustm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请判断语句一和语句二说的是否是一个意思?'
),
dict
(
role
=
'BOT'
,
prompt
=
'两句话说的毫不相关。'
)
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'语句一:“{sentence1}”
\n
语句二:“{sentence2}”
\n
请判断语句一和语句二说的是否是一个意思?'
),
dict
(
role
=
'BOT'
,
prompt
=
'两句话说是的一个意思。'
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
bustm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
bustm_datasets
=
[
dict
(
type
=
HFDataset
,
abbr
=
'bustm-dev'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
),
dict
(
type
=
HFDataset
,
abbr
=
'bustm-test'
,
path
=
'json'
,
data_files
=
'./data/FewCLUE/bustm/test_public.json'
,
split
=
'train'
,
reader_cfg
=
bustm_reader_cfg
,
infer_cfg
=
bustm_infer_cfg
,
eval_cfg
=
bustm_eval_cfg
)
]
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_chid_gen_0a29a2
import
chid_datasets
# noqa: F401, F403
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_gen_0a29a2.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CHIDDatasetV2
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
chid_reader_cfg
=
dict
(
input_columns
=
[
'content'
,
'A'
,
'B'
,
'C'
,
'D'
,
'E'
,
'F'
,
'G'
],
output_column
=
'answer'
,
)
chid_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{content}
\n
请选择______处所填的词
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
F. {F}
\n
G. {G}
\n
请从”A“,”B“,”C“,”D“,”E“,”F“,”G“中进行选择。答:'
,
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
chid_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
chid_datasets
=
[
dict
(
abbr
=
'chid-dev'
,
type
=
CHIDDatasetV2
,
path
=
'./data/FewCLUE/chid/dev_few_all.json'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
,
),
dict
(
abbr
=
'chid-test'
,
type
=
CHIDDatasetV2
,
path
=
'./data/FewCLUE/chid/test_public.json'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
,
),
]
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_chid_ppl_8f2872
import
chid_datasets
# noqa: F401, F403
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_8f2872.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CHIDDataset
chid_reader_cfg
=
dict
(
input_columns
=
[
f
'content
{
i
}
'
for
i
in
range
(
7
)],
output_column
=
'answer'
)
chid_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
i
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
f
'以下句子是否通顺?
\n
{{content
{
i
}
}}'
),
dict
(
role
=
'BOT'
,
prompt
=
'这个句子是通顺的。'
),
],
)
for
i
in
range
(
7
)
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
chid_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
chid_datasets
=
[
dict
(
type
=
CHIDDataset
,
path
=
'json'
,
abbr
=
'chid-dev'
,
data_files
=
'./data/FewCLUE/chid/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
),
dict
(
type
=
CHIDDataset
,
path
=
'json'
,
abbr
=
'chid-test'
,
data_files
=
'./data/FewCLUE/chid/test_public.json'
,
split
=
'train'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
),
]
opencompass/configs/datasets/FewCLUE_chid/FewCLUE_chid_ppl_acccb5.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CHIDDataset
chid_reader_cfg
=
dict
(
input_columns
=
[
f
'content
{
i
}
'
for
i
in
range
(
7
)],
output_column
=
'answer'
)
chid_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
i
:
f
'以下句子是否通顺?
\n
{{content
{
i
}
}}
\n
这个句子是通顺的。'
for
i
in
range
(
7
)}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
chid_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
chid_datasets
=
[
dict
(
type
=
CHIDDataset
,
path
=
'json'
,
abbr
=
'chid-dev'
,
data_files
=
'./data/FewCLUE/chid/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
),
dict
(
type
=
CHIDDataset
,
path
=
'json'
,
abbr
=
'chid-test'
,
data_files
=
'./data/FewCLUE/chid/test_public.json'
,
split
=
'train'
,
reader_cfg
=
chid_reader_cfg
,
infer_cfg
=
chid_infer_cfg
,
eval_cfg
=
chid_eval_cfg
),
]
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_cluewsc_gen_c68933
import
cluewsc_datasets
# noqa: F401, F403
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_gen_c68933.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CluewscDatasetV2
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
cluewsc_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
,
'new_text'
],
output_column
=
'label'
,
)
cluewsc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{text}
\n
此处,“{span2}”是否指代“{span1}“?
\n
A. 是
\n
B. 否
\n
请从”A“,”B“中进行选择。
\n
答:'
,
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
cluewsc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
cluewsc_datasets
=
[
dict
(
abbr
=
'cluewsc-dev'
,
type
=
CluewscDatasetV2
,
path
=
'./data/FewCLUE/cluewsc/dev_few_all.json'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
,
),
dict
(
abbr
=
'cluewsc-test'
,
type
=
CluewscDatasetV2
,
path
=
'./data/FewCLUE/cluewsc/test_public.json'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
,
),
]
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl.py
0 → 100644
View file @
be3dfa50
from
mmengine.config
import
read_base
with
read_base
():
from
.FewCLUE_cluewsc_ppl_868415
import
cluewsc_datasets
# noqa: F401, F403
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_12e4e0.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CluewscDataset
cluewsc_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
,
'new_text'
],
output_column
=
'answer'
)
cluewsc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"{text}
\n
Here, is the pronoun
\"
{span2}
\"
used to mean
\"
{span1}
\"
?"
),
dict
(
role
=
'BOT'
,
prompt
=
'No.'
)
]),
1
:
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"{text}
\n
Here, is the pronoun
\"
{span2}
\"
used to mean
\"
{span1}
\"
?"
),
dict
(
role
=
'BOT'
,
prompt
=
'Yes.'
)
]),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
cluewsc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
cluewsc_datasets
=
[
dict
(
type
=
CluewscDataset
,
path
=
'json'
,
abbr
=
'cluewsc-dev'
,
data_files
=
'./data/FewCLUE/cluewsc/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
),
dict
(
type
=
CluewscDataset
,
path
=
'json'
,
abbr
=
'cluewsc-test'
,
data_files
=
'./data/FewCLUE/cluewsc/test_public.json'
,
split
=
'train'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
),
]
opencompass/configs/datasets/FewCLUE_cluewsc/FewCLUE_cluewsc_ppl_4284a0.py
0 → 100644
View file @
be3dfa50
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
CluewscDataset
cluewsc_reader_cfg
=
dict
(
input_columns
=
[
'span1'
,
'span2'
,
'text'
,
'new_text'
],
output_column
=
'answer'
)
cluewsc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
"{text}
\n
Here, is the pronoun
\"
{span2}
\"
used to mean
\"
{span1}
\"
? No."
,
1
:
"{text}
\n
Here, is the pronoun
\"
{span2}
\"
used to mean
\"
{span1}
\"
? Yes."
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
cluewsc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
cluewsc_datasets
=
[
dict
(
type
=
CluewscDataset
,
path
=
'json'
,
abbr
=
'cluewsc-dev'
,
data_files
=
'./data/FewCLUE/cluewsc/dev_few_all.json'
,
split
=
'train'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
),
dict
(
type
=
CluewscDataset
,
path
=
'json'
,
abbr
=
'cluewsc-test'
,
data_files
=
'./data/FewCLUE/cluewsc/test_public.json'
,
split
=
'train'
,
reader_cfg
=
cluewsc_reader_cfg
,
infer_cfg
=
cluewsc_infer_cfg
,
eval_cfg
=
cluewsc_eval_cfg
),
]
Prev
1
…
13
14
15
16
17
18
19
20
21
…
41
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment