Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
1b467c57
Unverified
Commit
1b467c57
authored
Feb 06, 2021
by
Leo Gao
Committed by
GitHub
Feb 06, 2021
Browse files
Merge pull request #112 from jeffhsu3/pubmedqa
pubmedqa and sciq
parents
5dfbba05
79878d13
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
144 additions
and
1 deletion
+144
-1
lm_eval/tasks/__init__.py
lm_eval/tasks/__init__.py
+5
-0
lm_eval/tasks/pubmedqa.py
lm_eval/tasks/pubmedqa.py
+73
-0
lm_eval/tasks/sciq.py
lm_eval/tasks/sciq.py
+65
-0
scripts/write_out.py
scripts/write_out.py
+1
-1
No files found.
lm_eval/tasks/__init__.py
View file @
1b467c57
...
...
@@ -17,6 +17,8 @@ from . import lambada
from
.
import
race
from
.
import
piqa
from
.
import
triviaqa
from
.
import
pubmedqa
from
.
import
sciq
from
.
import
webqs
...
...
@@ -46,6 +48,9 @@ TASK_REGISTRY = {
"lambada"
:
lambada
.
LAMBADA
,
"piqa"
:
piqa
.
PiQA
,
"pubmedqa"
:
pubmedqa
.
Pubmed_QA
,
"sciq"
:
sciq
.
SciQ
,
#"triviaqa": triviaqa.TriviaQA,
"arc_easy"
:
arc
.
ARCEasy
,
"arc_challenge"
:
arc
.
ARCChallenge
,
...
...
lm_eval/tasks/pubmedqa.py
0 → 100644
View file @
1b467c57
import
numpy
as
np
import
json
import
random
from
.common
import
HFTask
from
lm_eval.base
import
rf
,
mean
class
Pubmed_QA
(
HFTask
):
DATASET_PATH
=
"pubmed_qa"
DATASET_NAME
=
"pqa_labeled"
def
has_training_docs
(
self
):
return
False
def
has_validation_docs
(
self
):
return
False
def
has_test_docs
(
self
):
return
True
def
test_docs
(
self
):
if
self
.
has_test_docs
():
# HF is labelled as train but its really just for testing
return
self
.
data
[
"train"
]
def
fewshot_description
(
self
):
# Average ctx length in labelled dataset is 238.9
# 2 few-shot exmamples pushes it beyond context window
return
""
def
doc_to_text
(
self
,
doc
):
ctxs
=
"
\n
"
.
join
(
doc
[
"context"
][
"contexts"
])
return
"abstract: {}
\n
question: {}
\n
answer:"
.
format
(
ctxs
,
doc
[
"question"
],
doc
[
"final_decision"
]
)
def
doc_to_target
(
self
,
doc
):
return
" {}"
.
format
(
doc
[
"final_decision"
])
def
fewshot_examples
(
self
,
k
):
# Since only test docs sample from test docs
if
self
.
_training_docs
is
None
:
self
.
_training_docs
=
list
(
self
.
test_docs
())
return
random
.
sample
(
self
.
_training_docs
,
k
)
def
construct_requests
(
self
,
doc
,
ctx
):
""" Uses RequestFactory to construct Requests and returns
an iterable of Requests which will be sent to the LM.
"""
ll_yes
,
_
=
rf
.
loglikelihood
(
ctx
,
" yes"
)
ll_no
,
_
=
rf
.
loglikelihood
(
ctx
,
" no"
)
ll_maybe
,
_
=
rf
.
loglikelihood
(
ctx
,
" maybe"
)
return
ll_yes
,
ll_no
,
ll_maybe
def
process_results
(
self
,
doc
,
results
):
gold
=
doc
[
"final_decision"
]
ll_yes
,
ll_no
,
ll_maybe
=
results
pred
=
np
.
argmax
(
results
)
return
{
"acc"
:
[
"yes"
,
"no"
,
"maybe"
][
pred
]
==
gold
,
}
def
aggregation
(
self
):
return
{
"acc"
:
mean
}
def
higher_is_better
(
self
):
return
{
"acc"
:
True
}
lm_eval/tasks/sciq.py
0 → 100644
View file @
1b467c57
import
os
import
json
from
..utils
import
sh
from
lm_eval.base
import
MultipleChoiceTask
,
rf
,
mean
import
zipfile
class
SciQ
(
MultipleChoiceTask
):
# Multiple languages and multiple years
def
download
(
self
):
if
not
os
.
path
.
exists
(
'data/sciq'
):
os
.
mkdir
(
'data/sciq'
)
sh
((
"wget https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip -O data/sciq/SciQ.zip"
))
with
zipfile
.
ZipFile
(
"data/sciq/SciQ.zip"
,
"r"
)
as
zf
:
zf
.
extractall
(
"data/sciq/"
)
def
has_training_docs
(
self
):
return
True
def
has_validation_docs
(
self
):
return
True
def
has_test_docs
(
self
):
return
True
def
_convert_standard
(
self
,
doc
):
choices
=
[
doc
[
"distractor1"
],
doc
[
"distractor2"
],
doc
[
"distractor3"
],
doc
[
"correct_answer"
],
]
src
=
doc
[
'support'
]
out_doc
=
{
"source"
:
src
,
"query"
:
doc
[
'question'
],
"choices"
:
choices
,
"gold"
:
3
,
}
return
out_doc
def
load_docs
(
self
,
textfilename
):
with
open
(
textfilename
,
'r'
)
as
j
:
docs
=
json
.
loads
(
j
.
read
())
for
record
in
docs
:
yield
self
.
_convert_standard
(
record
)
def
fewshot_description
(
self
):
# Average ctx length in labelled dataset is 238.9
# 2 few-shot exmamples pushes it beyond context window
return
""
def
training_docs
(
self
):
return
self
.
load_docs
(
"data/sciq/SciQ dataset-2 3/train.json"
)
def
validation_docs
(
self
):
return
self
.
load_docs
(
"data/sciq/SciQ dataset-2 3/valid.json"
)
def
test_docs
(
self
):
return
self
.
load_docs
(
"data/sciq/SciQ dataset-2 3/test.json"
)
def
doc_to_text
(
self
,
doc
):
return
"{}
\n
{}"
.
format
(
doc
[
"source"
],
doc
[
"query"
])
\ No newline at end of file
scripts/write_out.py
View file @
1b467c57
...
...
@@ -44,7 +44,7 @@ def main():
if
set
==
'test'
and
task
.
has_test_docs
():
docs
=
task
.
test_docs
()
iters
.
append
(
docs
)
docs
=
join_iters
(
iters
)
with
open
(
os
.
path
.
join
(
args
.
output_base_path
,
task_name
),
"w"
)
as
f
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment