Unverified Commit 1b467c57 authored by Leo Gao's avatar Leo Gao Committed by GitHub
Browse files

Merge pull request #112 from jeffhsu3/pubmedqa

pubmedqa and sciq
parents 5dfbba05 79878d13
...@@ -17,6 +17,8 @@ from . import lambada ...@@ -17,6 +17,8 @@ from . import lambada
from . import race from . import race
from . import piqa from . import piqa
from . import triviaqa from . import triviaqa
from . import pubmedqa
from . import sciq
from . import webqs from . import webqs
...@@ -46,6 +48,9 @@ TASK_REGISTRY = { ...@@ -46,6 +48,9 @@ TASK_REGISTRY = {
"lambada": lambada.LAMBADA, "lambada": lambada.LAMBADA,
"piqa": piqa.PiQA, "piqa": piqa.PiQA,
"pubmedqa" : pubmedqa.Pubmed_QA,
"sciq" : sciq.SciQ,
#"triviaqa": triviaqa.TriviaQA, #"triviaqa": triviaqa.TriviaQA,
"arc_easy": arc.ARCEasy, "arc_easy": arc.ARCEasy,
"arc_challenge": arc.ARCChallenge, "arc_challenge": arc.ARCChallenge,
......
import numpy as np
import json
import random
from .common import HFTask
from lm_eval.base import rf, mean
class Pubmed_QA(HFTask):
DATASET_PATH = "pubmed_qa"
DATASET_NAME = "pqa_labeled"
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def test_docs(self):
if self.has_test_docs():
# HF is labelled as train but its really just for testing
return self.data["train"]
def fewshot_description(self):
# Average ctx length in labelled dataset is 238.9
# 2 few-shot exmamples pushes it beyond context window
return ""
def doc_to_text(self, doc):
ctxs = "\n".join(doc["context"]["contexts"])
return "abstract: {}\nquestion: {}\nanswer:".format(
ctxs,
doc["question"],
doc["final_decision"]
)
def doc_to_target(self, doc):
return " {}".format(doc["final_decision"])
def fewshot_examples(self, k):
# Since only test docs sample from test docs
if self._training_docs is None:
self._training_docs = list(self.test_docs())
return random.sample(self._training_docs, k)
def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns
an iterable of Requests which will be sent to the LM.
"""
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
ll_maybe, _ = rf.loglikelihood(ctx, " maybe")
return ll_yes, ll_no, ll_maybe
def process_results(self, doc, results):
gold = doc["final_decision"]
ll_yes, ll_no, ll_maybe = results
pred = np.argmax(results)
return {
"acc": ["yes", "no", "maybe"][pred] == gold,
}
def aggregation(self):
return {
"acc" : mean
}
def higher_is_better(self):
return {
"acc" : True
}
import os
import json
from ..utils import sh
from lm_eval.base import MultipleChoiceTask, rf, mean
import zipfile
class SciQ(MultipleChoiceTask):
# Multiple languages and multiple years
def download(self):
if not os.path.exists('data/sciq'):
os.mkdir('data/sciq')
sh((
"wget https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip -O data/sciq/SciQ.zip"
))
with zipfile.ZipFile("data/sciq/SciQ.zip", "r") as zf:
zf.extractall("data/sciq/")
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def _convert_standard(self, doc):
choices = [
doc["distractor1"],
doc["distractor2"],
doc["distractor3"],
doc["correct_answer"],
]
src = doc['support']
out_doc = {
"source" : src,
"query" : doc['question'],
"choices" : choices,
"gold" : 3,
}
return out_doc
def load_docs(self, textfilename):
with open(textfilename, 'r') as j:
docs = json.loads(j.read())
for record in docs:
yield self._convert_standard(record)
def fewshot_description(self):
# Average ctx length in labelled dataset is 238.9
# 2 few-shot exmamples pushes it beyond context window
return ""
def training_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/train.json")
def validation_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/valid.json")
def test_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/test.json")
def doc_to_text(self, doc):
return "{}\n{}".format(doc["source"], doc["query"])
\ No newline at end of file
...@@ -44,7 +44,7 @@ def main(): ...@@ -44,7 +44,7 @@ def main():
if set == 'test' and task.has_test_docs(): if set == 'test' and task.has_test_docs():
docs = task.test_docs() docs = task.test_docs()
iters.append(docs) iters.append(docs)
docs = join_iters(iters) docs = join_iters(iters)
with open(os.path.join(args.output_base_path, task_name), "w") as f: with open(os.path.join(args.output_base_path, task_name), "w") as f:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment