Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
dc1b82c3
Unverified
Commit
dc1b82c3
authored
Sep 27, 2023
by
Kevin Wang
Committed by
GitHub
Sep 27, 2023
Browse files
[SIG] add GLUE_MRPC dataset (#440)
parent
14fdecfe
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
55 additions
and
0 deletions
+55
-0
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl.py
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl.py
+4
-0
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl_96564c.py
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl_96564c.py
+51
-0
No files found.
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl.py
0 → 100644
View file @
dc1b82c3
from
mmengine.config
import
read_base
with
read_base
():
from
.GLUE_MRPC_ppl_96564c
import
MRPC_datasets
# noqa: F401, F403
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl_96564c.py
0 → 100644
View file @
dc1b82c3
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
FixKRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
_hint
=
"The following are semantic matching questions.
\n
"
\
"Please determine whether the following two sentences are semantically equivalent: "
\
"0 means not equivalent, 1 means equivalent.
\n
"
MRPC_infer_cfg
=
dict
(
ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
"Sentence one: {sentence1}
\n
Sentence two: {sentence2}
\n
Result: {label}"
,
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
answer
:
f
"
{
_hint
}
</E>Sentence one: {{sentence1}}
\n
Sentence two: {{sentence2}}
\n
Result:
{
answer
}
"
for
answer
in
[
0
,
1
]
},
ice_token
=
'</E>'
,
),
retriever
=
dict
(
type
=
FixKRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]))
MRPC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
MRPC_datasets
=
[]
for
_split
in
[
"validation"
,
"test"
]:
MRPC_reader_cfg
=
dict
(
input_columns
=
[
'sentence1'
,
'sentence2'
],
output_column
=
'label'
,
train_split
=
"train"
,
test_split
=
_split
)
MRPC_datasets
.
append
(
dict
(
abbr
=
f
'MRPC-
{
_split
}
'
,
type
=
HFDataset
,
path
=
'glue'
,
name
=
'mrpc'
,
reader_cfg
=
MRPC_reader_cfg
,
infer_cfg
=
MRPC_infer_cfg
,
eval_cfg
=
MRPC_eval_cfg
)
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment