Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Ecological Empowerment
opencompass
Commits
c289ecc0
Commit
c289ecc0
authored
Oct 21, 2025
by
xinghao
Browse files
Initial commit
parents
Pipeline
#3004
canceled with stages
Changes
750
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
427 additions
and
0 deletions
+427
-0
opencompass/configs/datasets/XCOPA/XCOPA_ppl.py
opencompass/configs/datasets/XCOPA/XCOPA_ppl.py
+4
-0
opencompass/configs/datasets/XCOPA/XCOPA_ppl_54058d.py
opencompass/configs/datasets/XCOPA/XCOPA_ppl_54058d.py
+31
-0
opencompass/configs/datasets/XLSum/XLSum_gen.py
opencompass/configs/datasets/XLSum/XLSum_gen.py
+4
-0
opencompass/configs/datasets/XLSum/XLSum_gen_2bb71c.py
opencompass/configs/datasets/XLSum/XLSum_gen_2bb71c.py
+29
-0
opencompass/configs/datasets/Xsum/Xsum_gen.py
opencompass/configs/datasets/Xsum/Xsum_gen.py
+4
-0
opencompass/configs/datasets/Xsum/Xsum_gen_31397e.py
opencompass/configs/datasets/Xsum/Xsum_gen_31397e.py
+39
-0
opencompass/configs/datasets/Xsum/Xsum_gen_8ea5f8.py
opencompass/configs/datasets/Xsum/Xsum_gen_8ea5f8.py
+30
-0
opencompass/configs/datasets/adv_glue/__init__.py
opencompass/configs/datasets/adv_glue/__init__.py
+11
-0
opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen.py
...figs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen.py
+4
-0
opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen_bd8ef0.py
...tasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen_bd8ef0.py
+42
-0
opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen.py
...atasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen.py
+4
-0
opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen_bd8ef0.py
.../adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen_bd8ef0.py
+42
-0
opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen.py
...figs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen.py
+4
-0
opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen_0b7326.py
...tasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen_0b7326.py
+42
-0
opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen.py
...onfigs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen.py
+4
-0
opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen_cdc277.py
...datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen_cdc277.py
+42
-0
opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen.py
...onfigs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen.py
+4
-0
opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen_8cc547.py
...datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen_8cc547.py
+42
-0
opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen.py
...figs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen.py
+4
-0
opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen_ee8d3b.py
...tasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen_ee8d3b.py
+41
-0
No files found.
Too many changes to show.
To preserve performance only
750 of 750+
files are displayed.
Plain diff
Email patch
opencompass/configs/datasets/XCOPA/XCOPA_ppl.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.XCOPA_ppl_54058d
import
XCOPA_datasets
# noqa: F401, F403
opencompass/configs/datasets/XCOPA/XCOPA_ppl_54058d.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
XCOPADataset
XCOPA_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'premise'
,
'choice1'
,
'choice2'
],
output_column
=
'label'
,
test_split
=
'train'
)
XCOPA_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'Premise:{premise}。
\n
Question:{question}。
\n
Answer: {choice1}.'
,
1
:
'Passage:{premise}。
\n
Question:{question}。
\n
Answer: {choice2}.'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
XCOPA_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
XCOPA_datasets
=
[
dict
(
type
=
XCOPADataset
,
path
=
'xcopa'
,
reader_cfg
=
XCOPA_reader_cfg
,
infer_cfg
=
XCOPA_infer_cfg
,
eval_cfg
=
XCOPA_eval_cfg
)
]
opencompass/configs/datasets/XLSum/XLSum_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.XLSum_gen_2bb71c
import
XLSum_datasets
# noqa: F401, F403
opencompass/configs/datasets/XLSum/XLSum_gen_2bb71c.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
RougeEvaluator
from
opencompass.datasets
import
XLSUMDataset
,
Xsum_postprocess
XLSum_reader_cfg
=
dict
(
input_columns
=
[
'text'
],
output_column
=
'summary'
)
XLSum_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'Document:{text}
\n
'
'Based on the previous text, provide a brief single summary:'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
XLSum_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_postprocessor
=
dict
(
type
=
Xsum_postprocess
),
)
XLSum_datasets
=
[
dict
(
type
=
XLSUMDataset
,
path
=
'csebuetnlp/xlsum'
,
reader_cfg
=
XLSum_reader_cfg
,
infer_cfg
=
XLSum_infer_cfg
,
eval_cfg
=
XLSum_eval_cfg
)
]
opencompass/configs/datasets/Xsum/Xsum_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.Xsum_gen_31397e
import
Xsum_datasets
# noqa: F401, F403
opencompass/configs/datasets/Xsum/Xsum_gen_31397e.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
RougeEvaluator
from
opencompass.datasets
import
XsumDataset
Xsum_reader_cfg
=
dict
(
input_columns
=
[
'dialogue'
],
output_column
=
'summary'
)
Xsum_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Document:{dialogue}
\n
Based on the previous text, provide a brief single summary:'
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
Xsum_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
'Xsum'
),
)
Xsum_datasets
=
[
dict
(
type
=
XsumDataset
,
abbr
=
'Xsum'
,
path
=
'opencompass/xsum'
,
reader_cfg
=
Xsum_reader_cfg
,
infer_cfg
=
Xsum_infer_cfg
,
eval_cfg
=
Xsum_eval_cfg
,
)
]
opencompass/configs/datasets/Xsum/Xsum_gen_8ea5f8.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
RougeEvaluator
from
opencompass.datasets
import
XsumDataset
,
Xsum_postprocess
Xsum_reader_cfg
=
dict
(
input_columns
=
[
'dialogue'
],
output_column
=
'summary'
)
Xsum_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'Document:{dialogue}
\n
'
'Based on the previous text, provide a brief single summary:'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
Xsum_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_postprocessor
=
dict
(
type
=
Xsum_postprocess
),
)
Xsum_datasets
=
[
dict
(
type
=
XsumDataset
,
abbr
=
'Xsum'
,
path
=
'opencompass/xsum'
,
reader_cfg
=
Xsum_reader_cfg
,
infer_cfg
=
Xsum_infer_cfg
,
eval_cfg
=
Xsum_eval_cfg
)
]
opencompass/configs/datasets/adv_glue/__init__.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_sst2.adv_glue_sst2_gen
import
adv_sst2_datasets
from
.adv_glue_qqp.adv_glue_qqp_gen
import
adv_qqp_datasets
from
.adv_glue_rte.adv_glue_rte_gen
import
adv_rte_datasets
from
.adv_glue_qnli.adv_glue_qnli_gen
import
adv_qnli_datasets
from
.adv_glue_mnli.adv_glue_mnli_gen
import
adv_mnli_datasets
from
.adv_glue_mnli_mm.adv_glue_mnli_mm_gen
import
adv_mnli_mm_datasets
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'_datasets'
)),
[])
opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_mnli_gen_bd8ef0
import
adv_mnli_datasets
# noqa: F401, F403
opencompass/configs/datasets/adv_glue/adv_glue_mnli/adv_glue_mnli_gen_bd8ef0.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
AdvMnliDataset
,
AccDropEvaluator
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
adv_mnli_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label_option'
)
adv_mnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes', 'B. maybe' or 'C. no'.
premise: {premise}
hypothesis: {hypothesis}
Answer:"""
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
adv_mnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccDropEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'ABC'
),
)
adv_mnli_datasets
=
[
dict
(
abbr
=
'adv_mnli'
,
type
=
AdvMnliDataset
,
path
=
'opencompass/advglue-dev'
,
reader_cfg
=
adv_mnli_reader_cfg
,
infer_cfg
=
adv_mnli_infer_cfg
,
eval_cfg
=
adv_mnli_eval_cfg
,
)
]
opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_mnli_mm_gen_bd8ef0
import
adv_mnli_mm_datasets
# noqa: F401, F403
opencompass/configs/datasets/adv_glue/adv_glue_mnli_mm/adv_glue_mnli_mm_gen_bd8ef0.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
AdvMnliMMDataset
,
AccDropEvaluator
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
adv_mnli_mm_reader_cfg
=
dict
(
input_columns
=
[
'premise'
,
'hypothesis'
],
output_column
=
'label_option'
)
adv_mnli_mm_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes', 'B. maybe' or 'C. no'.
premise: {premise}
hypothesis: {hypothesis}
Answer:"""
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
adv_mnli_mm_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccDropEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'ABC'
),
)
adv_mnli_mm_datasets
=
[
dict
(
abbr
=
'adv_mnli_mm'
,
type
=
AdvMnliMMDataset
,
path
=
'opencompass/advglue-dev'
,
reader_cfg
=
adv_mnli_mm_reader_cfg
,
infer_cfg
=
adv_mnli_mm_infer_cfg
,
eval_cfg
=
adv_mnli_mm_eval_cfg
,
)
]
opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_qnli_gen_0b7326
import
adv_qnli_datasets
# noqa: F401, F403
opencompass/configs/datasets/adv_glue/adv_glue_qnli/adv_glue_qnli_gen_0b7326.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
AdvQnliDataset
,
AccDropEvaluator
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
adv_qnli_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'sentence'
],
output_column
=
'label_option'
)
adv_qnli_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"""Please identify whether the sentence answers the question. The answer should be exactly 'A. yes' or 'B. no'.
question: {question}
sentence: {sentence}
Answer:"""
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
adv_qnli_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccDropEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
adv_qnli_datasets
=
[
dict
(
abbr
=
'adv_qnli'
,
type
=
AdvQnliDataset
,
path
=
'opencompass/advglue-dev'
,
reader_cfg
=
adv_qnli_reader_cfg
,
infer_cfg
=
adv_qnli_infer_cfg
,
eval_cfg
=
adv_qnli_eval_cfg
,
)
]
opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_qqp_gen_cdc277
import
adv_qqp_datasets
# noqa: F401, F403
opencompass/configs/datasets/adv_glue/adv_glue_qqp/adv_glue_qqp_gen_cdc277.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
AdvQqpDataset
,
AccDropEvaluator
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
adv_qqp_reader_cfg
=
dict
(
input_columns
=
[
'question1'
,
'question2'
],
output_column
=
'label_option'
)
adv_qqp_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"""Please identify whether Question 1 has the same meaning as Question 2. The answer should be exactly 'A. no' or 'B. yes'.
Question 1: {question1}
Question 2: {question2}
Answer:"""
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
adv_qqp_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccDropEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
adv_qqp_datasets
=
[
dict
(
abbr
=
'adv_qqp'
,
type
=
AdvQqpDataset
,
path
=
'opencompass/advglue-dev'
,
reader_cfg
=
adv_qqp_reader_cfg
,
infer_cfg
=
adv_qqp_infer_cfg
,
eval_cfg
=
adv_qqp_eval_cfg
,
)
]
opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_rte_gen_8cc547
import
adv_rte_datasets
# noqa: F401, F403
opencompass/configs/datasets/adv_glue/adv_glue_rte/adv_glue_rte_gen_8cc547.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
AdvRteDataset
,
AccDropEvaluator
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
adv_rte_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label_option'
)
adv_rte_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"""Please identify whether the premise entails the hypothesis. The answer should be exactly 'A. yes' or 'B. no'.
hypothesis: {sentence1}
premise: {sentence2}
Answer:"""
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
adv_rte_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccDropEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
adv_rte_datasets
=
[
dict
(
abbr
=
'adv_rte'
,
type
=
AdvRteDataset
,
path
=
'opencompass/advglue-dev'
,
reader_cfg
=
adv_rte_reader_cfg
,
infer_cfg
=
adv_rte_infer_cfg
,
eval_cfg
=
adv_rte_eval_cfg
,
)
]
opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen.py
0 → 100644
View file @
c289ecc0
from
mmengine.config
import
read_base
with
read_base
():
from
.adv_glue_sst2_gen_ee8d3b
import
adv_sst2_datasets
# noqa: F401, F403
opencompass/configs/datasets/adv_glue/adv_glue_sst2/adv_glue_sst2_gen_ee8d3b.py
0 → 100644
View file @
c289ecc0
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
AdvSst2Dataset
,
AccDropEvaluator
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
adv_sst2_reader_cfg
=
dict
(
input_columns
=
[
'sentence'
],
output_column
=
'label_option'
)
adv_sst2_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
"""For the given sentence, label the sentiment of the sentence as positive or negative. The answer should be exactly 'A. negative' or 'B. positive'.
sentence: {sentence}
Answer:"""
),
]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
adv_sst2_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccDropEvaluator
),
pred_role
=
'BOT'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
adv_sst2_datasets
=
[
dict
(
abbr
=
'adv_sst2'
,
type
=
AdvSst2Dataset
,
path
=
'opencompass/advglue-dev'
,
reader_cfg
=
adv_sst2_reader_cfg
,
infer_cfg
=
adv_sst2_infer_cfg
,
eval_cfg
=
adv_sst2_eval_cfg
,
)
]
Prev
1
…
26
27
28
29
30
31
32
33
34
…
38
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment