Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
e8702f15
Commit
e8702f15
authored
Jul 06, 2023
by
haileyschoelkopf
Browse files
Merge branch 'big-refactor' into unscramble+toxigen
parents
dcb16263
45737a38
Changes
28
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
139 additions
and
1 deletion
+139
-1
lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py
lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py
+16
-0
lm_eval/tasks/pubmedqa/pubmedqa.yaml
lm_eval/tasks/pubmedqa/pubmedqa.yaml
+17
-0
lm_eval/tasks/race/preprocess_race.py
lm_eval/tasks/race/preprocess_race.py
+36
-0
lm_eval/tasks/race/race.yaml
lm_eval/tasks/race/race.yaml
+14
-0
lm_eval/tasks/super_glue/copa/default.yaml
lm_eval/tasks/super_glue/copa/default.yaml
+1
-1
lm_eval/tasks/swag/swag.yaml
lm_eval/tasks/swag/swag.yaml
+20
-0
lm_eval/tasks/winogrande/preprocess_winogrande.py
lm_eval/tasks/winogrande/preprocess_winogrande.py
+21
-0
lm_eval/tasks/winogrande/winogrande.yaml
lm_eval/tasks/winogrande/winogrande.yaml
+14
-0
No files found.
lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py
0 → 100644
View file @
e8702f15
def
doc_to_text
(
doc
):
ctxs
=
"
\n
"
.
join
(
doc
[
"context"
][
"contexts"
])
return
"Abstract: {}
\n
Question: {}
\n
Answer:"
.
format
(
ctxs
,
doc
[
"question"
],
doc
[
"final_decision"
]
)
def
doc_to_target
(
doc
):
return
" {}"
.
format
(
doc
[
"final_decision"
])
def
gold_alias
(
doc
):
dict_to_label
=
{
'yes'
:
0
,
'no'
:
1
,
'maybe'
:
2
}
return
dict_to_label
[
doc
[
"final_decision"
]]
\ No newline at end of file
lm_eval/tasks/pubmedqa/pubmedqa.yaml
0 → 100644
View file @
e8702f15
group
:
-
multiple_choice
task
:
pubmed_qa
dataset_path
:
pubmed_qa
dataset_name
:
pqa_labeled
output_type
:
multiple_choice
training_split
:
null
validation_split
:
null
test_split
:
train
template_aliases
:
"
{%
set
answer_choices
=
['yes',
'no',
'maybe']
%}{%
set
gold
=
final_decision
%}"
doc_to_text
:
!function
preprocess_pubmedqa.doc_to_text
doc_to_target
:
!function
preprocess_pubmedqa.doc_to_target
gold_alias
:
!function
preprocess_pubmedqa.gold_alias
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
\ No newline at end of file
lm_eval/tasks/race/preprocess_race.py
0 → 100644
View file @
e8702f15
import
ast
def
process_ast
(
string
):
return
ast
.
literal_eval
(
string
)
def
last_problem
(
doc
):
return
process_ast
(
doc
[
"problems"
])[
-
1
]
def
get_answer_option
(
problem
):
letter_to_num
=
{
"A"
:
0
,
"B"
:
1
,
"C"
:
2
,
"D"
:
3
}
answer
=
letter_to_num
[
problem
[
"answer"
]]
return
problem
[
"options"
][
answer
]
def
create_choices
(
doc
):
problem
=
last_problem
(
doc
)
choices
=
[
problem
[
"options"
][
i
]
for
i
in
range
(
4
)]
return
choices
def
doc_to_text
(
doc
):
text
=
"Article: "
+
doc
[
"article"
]
+
"
\n\n
"
for
problem
in
process_ast
(
doc
[
"problems"
])[:
-
1
]:
if
problem
[
"question"
][
-
6
:]
==
" _ ."
:
text
+=
(
problem
[
"question"
][
-
5
:]
+
get_answer_option
(
problem
)
+
"
\n
"
)
else
:
question
=
"Question: "
+
problem
[
"question"
]
+
"
\n
"
answer
=
"Answer: "
+
get_answer_option
(
problem
)
+
"
\n
"
text
+=
question
+
answer
text
+=
last_problem
(
doc
)[
"question"
]
return
text
def
doc_to_target
(
doc
):
letter_to_num
=
{
"A"
:
0
,
"B"
:
1
,
"C"
:
2
,
"D"
:
3
}
answer
=
letter_to_num
[
last_problem
(
doc
)[
"answer"
]]
return
answer
lm_eval/tasks/race/race.yaml
0 → 100644
View file @
e8702f15
group
:
-
multiple_choice
task
:
race
dataset_path
:
bfattori/race
dataset_name
:
high
output_type
:
multiple_choice
test_split
:
test
create_choices
:
!function
preprocess_race.create_choices
doc_to_text
:
!function
preprocess_race.doc_to_text
doc_to_target
:
!function
preprocess_race.doc_to_target
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
\ No newline at end of file
lm_eval/tasks/super_glue/copa/default.yaml
View file @
e8702f15
group
:
-
super-glue-lm-eval-v1
-
-
super-glue-lm-eval-v1
task
:
"
copa"
dataset_path
:
super_glue
dataset_name
:
copa
...
...
lm_eval/tasks/swag/swag.yaml
0 → 100644
View file @
e8702f15
group
:
-
multiple_choice
task
:
swag
dataset_path
:
swag
dataset_name
:
regular
output_type
:
multiple_choice
training_split
:
train
validation_split
:
validation
test_split
:
null
template_aliases
:
"
{%
set
answer_choices
=
[ending0,
ending1,
ending2,
ending3]
%}{%
set
gold
=
label
%}"
doc_to_text
:
"
{{startphrase}}"
doc_to_target
:
"
{{answer_choices[gold]}}"
gold_alias
:
"
{{gold}}"
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
\ No newline at end of file
lm_eval/tasks/winogrande/preprocess_winogrande.py
0 → 100644
View file @
e8702f15
def
partial_context
(
doc
,
option
):
# Substitute the pronoun in the sentence with the specified option
# and ignore everything after.
pronoun_loc
=
doc
[
"sentence"
].
index
(
"_"
)
return
doc
[
"sentence"
][:
pronoun_loc
]
+
option
def
partial_target
(
doc
):
# The target is everything after the document specified pronoun.
pronoun_loc
=
doc
[
"sentence"
].
index
(
"_"
)
+
1
return
doc
[
"sentence"
][
pronoun_loc
:].
strip
()
def
create_choices
(
doc
):
choices
=
[]
for
option
in
[
doc
[
"option1"
],
doc
[
"option2"
]]:
partial_ctx
=
partial_context
(
doc
,
option
)
choices
.
append
(
partial_ctx
)
return
choices
def
gold_alias
(
doc
):
answer_to_num
=
{
"1"
:
0
,
"2"
:
1
}
return
answer_to_num
[
doc
[
'answer'
]]
\ No newline at end of file
lm_eval/tasks/winogrande/winogrande.yaml
0 → 100644
View file @
e8702f15
task
:
winogrande
dataset_path
:
winogrande
dataset_name
:
winogrande_xl
output_type
:
winograd_schema
training_split
:
train
validation_split
:
validation
doc_to_target
:
!function
preprocess_winogrande.partial_target
doc_to_text
:
"
{{sentence}}"
create_choices
:
!function
preprocess_winogrande.create_choices
gold_alias
:
!function
preprocess_winogrande.gold_alias
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment