"src/include/amd_inline_asm.hip.hpp" did not exist on "0d6aa311e944172dbe1bc636e74b858e8b2ed450"
Unverified Commit 2d61b3ce authored by Stella Biderman's avatar Stella Biderman Committed by GitHub
Browse files

Merge pull request #103 from EleutherAI/piqa

Implement PiQA
parents a2f5b74b 63854c10
......@@ -59,7 +59,6 @@ class LM(abc.ABC):
class Dataset(abc.ABC):
@abc.abstractmethod
def __init__(self):
self.download()
self._traindocs = None
......
......@@ -14,6 +14,7 @@ from . import naturalqs
from . import sat
from . import arithmetic
from . import lambada
from . import piqa
TASK_REGISTRY = {
# GLUE
......@@ -39,6 +40,7 @@ TASK_REGISTRY = {
# Order by benchmark/genre?
"lambada": lambada.LAMBADA,
"piqa": piqa.PiQA,
# "arc_easy": arc.ARCEasy, # not implemented yet
# "arc_challenge": arc.ARCChallenge, # not implemented yet
......
import json
import random
from lm_eval.base import Dataset
from lm_eval.base import Dataset, rf, mean
from ..utils import sh
import os
class PiQA(Dataset):
def __init__(self):
self.download()
def download(self):
#pass
#TODO: don't download if files already there
sh("""
mkdir -p data/piqa
wget https://yonatanbisk.com/piqa/data/train.jsonl -O data/piqa/piqa-train.jsonl
wget https://yonatanbisk.com/piqa/data/train-labels.lst -O data/piqa/piqa-train-labels.lst
wget https://yonatanbisk.com/piqa/data/valid.jsonl -O data/piqa/piqa-valid.jsonl
wget https://yonatanbisk.com/piqa/data/valid-labels.lst -O data/piqa/piqa-valid-labels.lst
wget https://yonatanbisk.com/piqa/data/tests.jsonl -O data/piqa/piqa-test.jsonl
""")
if not os.path.exists('data/piqa'):
#TODO: use best_download
sh("""
mkdir -p data/piqa
wget https://yonatanbisk.com/piqa/data/train.jsonl -O data/piqa/piqa-train.jsonl
wget https://yonatanbisk.com/piqa/data/train-labels.lst -O data/piqa/piqa-train-labels.lst
wget https://yonatanbisk.com/piqa/data/valid.jsonl -O data/piqa/piqa-valid.jsonl
wget https://yonatanbisk.com/piqa/data/valid-labels.lst -O data/piqa/piqa-valid-labels.lst
wget https://yonatanbisk.com/piqa/data/tests.jsonl -O data/piqa/piqa-test.jsonl
""")
def has_training_docs(self):
return True
......@@ -25,11 +24,11 @@ class PiQA(Dataset):
return True
def has_test_docs(self):
return True
return False
def load_docs(self, textfilename, labelfilename):
if labelfilename != None:
return zip([json.loads(entry) for entry in list(open(textfilename,'r'))],list(open(labelfilename, 'r')))
return zip([json.loads(entry) for entry in list(open(textfilename,'r'))],list(map(lambda x: x.strip(), open(labelfilename, 'r'))))
else:
return [json.loads(entry) for entry in list(open(textfilename,'r'))]
......@@ -39,62 +38,40 @@ class PiQA(Dataset):
def validation_docs(self):
return self.load_docs('data/piqa/piqa-valid.jsonl', 'data/piqa/piqa-valid-labels.lst')
def test_docs(self):
return self.load_docs('data/piqa/piqa-test.jsonl', None)
#def test_docs(self):
# return self.load_docs('data/piqa/piqa-test.jsonl', None)
def fewshot_description(self):
# TODO: figure out fewshot description
return ""
def doc_to_text(self, doc):
#TODO: check if oa uses newline
return doc['goal'] + ' '
return doc[0]['goal']
def doc_to_target(self, doc):
rightanswer = int(doc[1][0]) + 1
return ''.join([doc[0]['goal'],' ',doc[0]['sol'+str(rightanswer)]])
#TODO: check if oa uses newline
rightanswer = int(doc[1]) + 1
return '\n' + ''.join([doc[0]['goal'],' ',doc[0]['sol'+str(rightanswer)]])
def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
ll_1, _ = rf.loglikelihood(ctx, doc[0]['sol1'])
ll_2, _ = rf.loglikelihood(ctx, doc[0]['sol2'])
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
return ll_1, ll_2
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
ll_1, ll_2 = results
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
return {
'acc': (ll_1 > ll_2) == (int(doc[1]) == 0)
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
return {
'acc': mean
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
\ No newline at end of file
return {
'acc': True
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment