Unverified Commit 6caa0afd authored by Leo Gao's avatar Leo Gao Committed by GitHub
Browse files

Merge pull request #300 from jon-tow/hf-dataset-refactor

Refactor `Task` downloading to use `HuggingFace.datasets`
parents 7064d6b9 9434722c
......@@ -10,7 +10,6 @@ Homepage: https://math-qa.github.io/math-QA/
"""
import re
from lm_eval.base import MultipleChoiceTask
from . common import HFTask
_CITATION = """
......@@ -25,7 +24,7 @@ _CITATION = """
"""
class MathQA(HFTask, MultipleChoiceTask):
class MathQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "math_qa"
DATASET_NAME = None
......@@ -39,13 +38,23 @@ class MathQA(HFTask, MultipleChoiceTask):
def has_test_docs(self):
return True
def _convert_standard(self, doc):
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
answer_idx = ['a', 'b', 'c', 'd', 'e'].index(doc['correct'])
choices = [c[4:].rstrip(" ,") for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc['options'])]
out_doc = {
"query": "Question: " + doc['Problem'] +"\nAnswer:",
"query": "Question: " + doc['Problem'] + "\nAnswer:",
"choices": choices,
"gold": answer_idx,
}
......
......@@ -20,9 +20,8 @@ of a question's options. See section 4 of the paper for details.
Homepage: https://leaderboard.allenai.org/mctaco/submissions/public
"""
import numpy as np
from lm_eval.base import rf
from collections import defaultdict
from . common import HFTask
from lm_eval.base import rf, Task
_CITATION = """
......@@ -35,7 +34,7 @@ _CITATION = """
"""
class MCTACO(HFTask):
class MCTACO(Task):
VERSION = 0
DATASET_PATH = "mc_taco"
DATASET_NAME = None
......@@ -49,6 +48,12 @@ class MCTACO(HFTask):
def has_test_docs(self):
return True
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return self.dataset["test"]
def doc_to_text(self, doc):
return f"{doc['sentence']}\nQuestion: {doc['question']}\n"\
f"Answer: {doc['answer']}\nPlausible:"
......
......@@ -7,14 +7,11 @@ modified from Chinese high school English listening comprehension test data.
Homepage: https://github.com/Nealcly/MuTual
"""
import json
import zipfile
import shutil
import numpy as np
from pathlib import Path
import inspect
import lm_eval.datasets.mutual.mutual
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
from best_download import download_file
_CITATION = """
......@@ -30,29 +27,10 @@ _CITATION = """
class MuTualBase(Task):
VERSION = 1
BASE_PATH = Path("data/mutual")
DATASET_PATH = inspect.getfile(lm_eval.datasets.mutual.mutual)
DATASET_NAME = None
CHOICES = ['A', 'B', 'C', 'D']
def __init__(self):
super().__init__()
def download(self):
if self.BASE_PATH.exists():
return
Path.mkdir(self.BASE_PATH, parents=True)
master_zip = Path("data/master.zip")
download_file(
"https://github.com/Nealcly/MuTual/archive/master.zip",
local_file=str(master_zip),
expected_checksum="bb325cf6c672f0f02699993a37138b0fa0af6fcfc77ec81dfbe46add4d7b29f9")
with zipfile.ZipFile(master_zip, 'r') as zip:
zip.extractall("data")
Path("data/MuTual-master/data").rename(str(self.BASE_PATH))
# Remove left over files and directories.
master_zip.unlink()
shutil.rmtree("data/MuTual-master")
def has_training_docs(self):
return True
......@@ -62,18 +40,11 @@ class MuTualBase(Task):
def has_test_docs(self):
return False
def _load_docs(self, path):
for file in sorted(path.iterdir()):
if file.suffix != ".txt":
continue
with open(file, 'r', encoding='utf-8') as f:
yield json.load(f)
def training_docs(self):
return self._load_docs(self.BASE_PATH / self.DATASET_NAME / "train")
return self.dataset["train"]
def validation_docs(self):
return self._load_docs(self.BASE_PATH / self.DATASET_NAME / "dev")
return self.dataset["validation"]
def test_docs(self):
return NotImplemented
......@@ -134,8 +105,8 @@ class MuTualBase(Task):
class MuTual(MuTualBase):
DATASET_NAME = Path("mutual")
DATASET_NAME = "mutual"
class MuTualPlus(MuTualBase):
DATASET_NAME = Path("mutual_plus")
DATASET_NAME = "mutual_plus"
......@@ -15,8 +15,7 @@ not even bother with the train set.
Homepage: https://ai.google.com/research/NaturalQuestions
"""
import random
from . common import HFTask
from lm_eval.base import Task
from itertools import islice
......@@ -30,7 +29,7 @@ _CITATION = """
"""
class NaturalQs(HFTask):
class NaturalQs(Task):
VERSION = 0
DATASET_PATH = "natural_questions"
DATASET_NAME = None
......@@ -47,7 +46,12 @@ class NaturalQs(HFTask):
def training_docs(self):
# Cache training for faster few-shot.
# Data is too large to fit in memory.
return self.data["train"]
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def fewshot_examples(self, k, rnd):
# Data is too large to fit in memory. We just sample from the first bit.
......
......@@ -15,7 +15,6 @@ based algorithm and a word co-occurrence algorithm.
Homepage: https://allenai.org/data/open-book-qa
"""
from lm_eval.base import MultipleChoiceTask
from .common import HFTask
_CITATION = """
......@@ -28,7 +27,7 @@ _CITATION = """
"""
class OpenBookQA(HFTask, MultipleChoiceTask):
class OpenBookQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "openbookqa"
DATASET_NAME = "main"
......@@ -42,7 +41,18 @@ class OpenBookQA(HFTask, MultipleChoiceTask):
def has_test_docs(self):
return True
def _convert_standard(self, doc):
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
out_doc = {
"id": doc["id"],
"query": doc["question_stem"],
......
......@@ -10,15 +10,9 @@ math, computer science, and philosophy papers.
Homepage: https://pile.eleuther.ai/
"""
import os
import lm_dataformat
import abc
import numpy as np
from lm_eval.base import rf, PerplexityTask
from ..metrics import mean, matthews_corrcoef, f1_score
from ..utils import general_detokenize
from best_download import download_file
import inspect
import lm_eval.datasets.pile.pile
from lm_eval.base import PerplexityTask
_CITATION = """
......@@ -31,32 +25,10 @@ _CITATION = """
"""
class PilePerplexityTask(PerplexityTask, abc.ABC):
class PilePerplexityTask(PerplexityTask):
VERSION = 1
PILE_SET_NAME = None
VAL_PATH = 'data/pile/val.jsonl.zst'
TEST_PATH = 'data/pile/test.jsonl.zst'
def download(self):
# TODO: separate pile val/test out by component so we don't have to scan the entire file once per set
if not os.path.exists("data/pile/test.jsonl.zst"):
# todo use new best_download fallback api
os.makedirs("data/pile/", exist_ok=True)
download_file("http://eaidata.bmk.sh/data/pile/val.jsonl.zst", local_file=self.VAL_PATH, expected_checksum="264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92")
download_file("http://eaidata.bmk.sh/data/pile/test.jsonl.zst", local_file=self.TEST_PATH, expected_checksum="0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e")
def validation_docs(self):
rdr = lm_dataformat.Reader(self.VAL_PATH)
for doc, metadata in rdr.stream_data(get_meta=True):
if metadata["pile_set_name"] == self.PILE_SET_NAME:
yield doc
def test_docs(self):
rdr = lm_dataformat.Reader(self.TEST_PATH)
for doc, metadata in rdr.stream_data(get_meta=True):
if metadata["pile_set_name"] == self.PILE_SET_NAME:
yield doc
DATASET_PATH = inspect.getfile(lm_eval.datasets.pile.pile)
DATASET_NAME = None
def has_validation_docs(self):
return True
......@@ -64,90 +36,98 @@ class PilePerplexityTask(PerplexityTask, abc.ABC):
def has_test_docs(self):
return True
def validation_docs(self):
for doc in self.dataset["validation"]:
yield doc["text"]
def test_docs(self):
for doc in self.dataset["test"]:
yield doc["text"]
class PileArxiv(PilePerplexityTask):
PILE_SET_NAME = "ArXiv"
DATASET_NAME = "pile_arxiv"
class PileBooks3(PilePerplexityTask):
PILE_SET_NAME = "Books3"
DATASET_NAME = "pile_books3"
class PileBookCorpus2(PilePerplexityTask):
PILE_SET_NAME = "BookCorpus2"
DATASET_NAME = "pile_bookcorpus2"
class PileDmMathematics(PilePerplexityTask):
PILE_SET_NAME = "DM Mathematics"
DATASET_NAME = "pile_dm-mathematics"
class PileEnron(PilePerplexityTask):
PILE_SET_NAME = "Enron Emails"
DATASET_NAME = "pile_enron"
class PileEuroparl(PilePerplexityTask):
PILE_SET_NAME = "EuroParl"
DATASET_NAME = "pile_europarl"
class PileFreeLaw(PilePerplexityTask):
PILE_SET_NAME = "FreeLaw"
DATASET_NAME = "pile_freelaw"
class PileGithub(PilePerplexityTask):
PILE_SET_NAME = "Github"
DATASET_NAME = "pile_github"
class PileGutenberg(PilePerplexityTask):
PILE_SET_NAME = "Gutenberg (PG-19)"
DATASET_NAME = "pile_gutenberg"
class PileHackernews(PilePerplexityTask):
PILE_SET_NAME = "HackerNews"
DATASET_NAME = "pile_hackernews"
class PileNIHExporter(PilePerplexityTask):
PILE_SET_NAME = "NIH ExPorter"
DATASET_NAME = "pile_nih-exporter"
class PileOpenSubtitles(PilePerplexityTask):
PILE_SET_NAME = "OpenSubtitles"
DATASET_NAME = "pile_opensubtitles"
class PileOpenWebText2(PilePerplexityTask):
PILE_SET_NAME = "OpenWebText2"
DATASET_NAME = "pile_openwebtext2"
class PilePhilPapers(PilePerplexityTask):
PILE_SET_NAME = "PhilPapers"
DATASET_NAME = "pile_philpapers"
class PilePileCc(PilePerplexityTask):
PILE_SET_NAME = "Pile-CC"
DATASET_NAME = "pile_pile-cc"
class PilePubmedAbstracts(PilePerplexityTask):
PILE_SET_NAME = "PubMed Abstracts"
DATASET_NAME = "pile_pubmed-abstracts"
class PilePubmedCentral(PilePerplexityTask):
PILE_SET_NAME = "PubMed Central"
DATASET_NAME = "pile_pubmed-central"
class PileStackExchange(PilePerplexityTask):
PILE_SET_NAME = "StackExchange"
DATASET_NAME = "pile_stackexchange"
class PileUspto(PilePerplexityTask):
PILE_SET_NAME = "USPTO Backgrounds"
DATASET_NAME = "pile_upsto"
class PileUbuntuIrc(PilePerplexityTask):
PILE_SET_NAME = "Ubuntu IRC"
DATASET_NAME = "pile_ubuntu-irc"
class PileWikipedia(PilePerplexityTask):
PILE_SET_NAME = "Wikipedia (en)"
DATASET_NAME = "pile_wikipedia"
class PileYoutubeSubtitles(PilePerplexityTask):
PILE_SET_NAME = "YoutubeSubtitles"
DATASET_NAME = "pile_youtubesubtitles"
......@@ -9,10 +9,7 @@ actually learning about the world?
Homepage: https://yonatanbisk.com/piqa/
"""
import numpy as np
from lm_eval.base import MultipleChoiceTask, rf
from ..metrics import mean
from . common import HFTask
from lm_eval.base import MultipleChoiceTask
_CITATION = """
......@@ -29,7 +26,7 @@ _CITATION = """
"""
class PiQA(HFTask, MultipleChoiceTask):
class PiQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "piqa"
DATASET_NAME = None
......@@ -43,7 +40,15 @@ class PiQA(HFTask, MultipleChoiceTask):
def has_test_docs(self):
return False
def _convert_standard(self, doc):
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def _process_doc(self, doc):
out_doc = {
"goal": doc["goal"],
"choices": [doc["sol1"], doc["sol2"]],
......
......@@ -15,7 +15,6 @@ have been trained on data not specifically collected to succeed on PROST."
Homepage: https://github.com/nala-cub/prost
"""
from lm_eval.base import MultipleChoiceTask
from . common import HFTask
_CITATION = """
......@@ -36,7 +35,7 @@ _CITATION = """
"""
class PROST(HFTask, MultipleChoiceTask):
class PROST(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "corypaik/prost"
DATASET_NAME = None
......@@ -50,6 +49,9 @@ class PROST(HFTask, MultipleChoiceTask):
def has_test_docs(self):
return True
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None):
assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.'
return super().fewshot_context(
......@@ -59,7 +61,7 @@ class PROST(HFTask, MultipleChoiceTask):
description=description
)
def _convert_standard(self, doc):
def _process_doc(self, doc):
out_doc = {
"query": f"{doc['context']}\nQuestion: {doc['ex_question']}\nAnswer:",
"choices": [doc['A'], doc['B'], doc['C'], doc['D']],
......
......@@ -16,9 +16,8 @@ and (4) a yes/no/maybe answer which summarizes the conclusion.
Homepage: https://pubmedqa.github.io/
"""
import numpy as np
from .common import HFTask
from lm_eval.base import rf
from ..metrics import mean
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
......@@ -32,7 +31,7 @@ _CITATION = """
"""
class Pubmed_QA(HFTask):
class Pubmed_QA(Task):
VERSION = 0
DATASET_PATH = "pubmed_qa"
DATASET_NAME = "pqa_labeled"
......@@ -49,7 +48,7 @@ class Pubmed_QA(HFTask):
def test_docs(self):
if self.has_test_docs():
# HF is labelled as train but its really just for testing
return self.data["train"]
return self.dataset["train"]
def doc_to_text(self, doc):
ctxs = "\n".join(doc["context"]["contexts"])
......
......@@ -13,9 +13,6 @@ and Entrance Exam.
Homepage: http://nlp.uned.es/clef-qa/repository/qa4mre.php
"""
import os
import xml.etree.ElementTree as ET
from best_download import download_file
from lm_eval.base import MultipleChoiceTask
......@@ -31,35 +28,8 @@ _CITATION = """
class QA4MRE(MultipleChoiceTask):
VERSION = 0
YEAR = None
def download(self):
year = self.YEAR
lang = "EN"
base_path = (
"http://nlp.uned.es/clef-qa/repository/js/scripts/downloadFile.php?"
"file=/var/www/html/nlp/clef-qa/repository/resources/QA4MRE/"
)
# TODO: add side tasks?
variable_year_path = {
2011: '2011/Training_Data/Goldstandard/',
2012: '2012/Main_Task/Training_Data/Goldstandard/Used_in_Evaluation/',
2013: '2013/Main_Task/Training_Data/Goldstandard/'
}
sha256sums = {
2011 : "6d2524952a3a015f2a82df785b85b5578681e3602ec276b4e72c01f4ebc50034",
2012 : "f9edaf408f8ac93f89a643a0d0b19263a1bb5ce64f19b2af10df279a656dfb24",
2013 : "c60e5aa4ec77e0493ef0b11d46bd1d74d58a499a3a2f871b8cf3af9536f0f094",
}
vpath = variable_year_path[year]
url_path = f"{base_path}{vpath}QA4MRE-{year}-{lang}_GS.xml"
if not os.path.exists("data/qa4mre"):
os.makedirs("data/qa4mre", exist_ok=True)
if not os.path.isfile(f"data/qa4mre/QA4MRE-{year}-{lang}"):
download_file(
url_path,
local_file=f"data/qa4mre/QA4MRE-{year}-{lang}_GS.xml",
expected_checksum=sha256sums[year],
)
DATASET_PATH = "qa4mre"
DATASET_NAME = None
def has_training_docs(self):
return False
......@@ -70,39 +40,31 @@ class QA4MRE(MultipleChoiceTask):
def has_test_docs(self):
return True
def _convert_standard(self, question):
choices = [i.text for i in question.iter('answer')]
def test_docs(self):
# `qa4mre` only has train data so we use it for the test docs.
return map(self._process_doc, self.dataset["train"])
def _process_doc(self, doc):
choices = doc["answer_options"]["answer_str"]
out_doc = {
"query" : question.find('q_str').text,
"choices": choices,
"gold" : int(question.find("./answer[@correct='Yes']").attrib["a_id"]) - 1,
"source": doc["document_str"].strip().replace("\'", "'"),
"query": doc["question_str"],
"choices": choices,
"gold": int(doc["correct_answer_id"]) - 1,
}
return out_doc
def load_docs(self, textfilename, tfds=False):
tree = ET.parse(textfilename)
root = tree.getroot()
# TODO: context is much larger than the context sometimes
# at the moment, it just gets left-truncated by LM automatically, and maybe that's good enough?
for reading_test in root.iter('reading-test'):
src = reading_test[0].text
src = src.strip().replace("\'", "'")
for qid, question in enumerate(reading_test.iter('q')):
out_doc = self._convert_standard(question)
out_doc['source'] = src
yield out_doc
def test_docs(self):
return self.load_docs(f"data/qa4mre/QA4MRE-{self.YEAR}-EN_GS.xml")
def doc_to_text(self, doc):
return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"])
class QA4MRE_2011(QA4MRE):
YEAR = 2011
DATASET_NAME = "2011.main.EN"
class QA4MRE_2012(QA4MRE):
YEAR = 2012
DATASET_NAME = "2012.main.EN"
class QA4MRE_2013(QA4MRE):
YEAR = 2013
DATASET_NAME = "2013.main.EN"
......@@ -11,13 +11,10 @@ provide supporting evidence to answers.
Homepage: https://allenai.org/data/qasper
"""
from collections import Counter
from math import exp
import random
import re
import string
from lm_eval.base import rf
from lm_eval.base import rf, Task
from lm_eval.metrics import f1_score, mean
from .common import HFTask
_CITATION = """
......@@ -104,11 +101,20 @@ def token_f1_score(prediction, ground_truth):
return f1
class QASPER(HFTask):
class QASPER(Task):
VERSION = 0
DATASET_PATH = "qasper"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def doc_to_text(self, doc):
return (
"TITLE: "
......@@ -130,14 +136,14 @@ class QASPER(HFTask):
return " " + answer
def training_docs(self):
for doc in self.data["train"]:
yield from self.process_doc(doc)
for doc in self.dataset["train"]:
yield from self._process_doc(doc)
def validation_docs(self):
for doc in self.data["train"]:
yield from self.process_doc(doc)
for doc in self.dataset["validation"]:
yield from self._process_doc(doc)
def process_doc(self, doc):
def _process_doc(self, doc):
"""Given a `doc`, flatten it out so that each JSON blob
contains exactly one question and one answer. Logic taken from
the reference implementation available at
......
......@@ -10,10 +10,9 @@ a teacher who answers the questions by providing short excerpts (spans) from the
Homepage: https://quac.ai/
"""
import json
import os
import inspect
import lm_eval.datasets.quac.quac
from lm_eval.base import Task
from ..utils import sh
_CITATION = """
......@@ -28,18 +27,8 @@ _CITATION = """
class QuAC(Task):
VERSION = 0
def __init__(self):
super().__init__()
def download(self):
if not os.path.exists('data/quac'):
# TODO: convert to use best_download
sh("""
mkdir -p data/quac
wget https://s3.amazonaws.com/my89public/quac/train_v0.2.json -O data/quac/train_v0.2.json
wget https://s3.amazonaws.com/my89public/quac/val_v0.2.json -O data/quac/val_v0.2.json
""")
DATASET_PATH = inspect.getfile(lm_eval.datasets.quac.quac)
DATASET_NAME = None
def has_training_docs(self):
return True
......@@ -51,28 +40,20 @@ class QuAC(Task):
return False
def training_docs(self):
myjson = json.load(open('data/quac/train_v0.2.json'))['data']
return self.load_doc(myjson)
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
myjson = json.load(open('data/quac/val_v0.2.json'))['data']
return self.load_doc(myjson)
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
raise NotImplementedError("QuAC has no test docs.")
def load_doc(self, myjson):
docs = []
for item in myjson:
title = item['title'] + ' - ' + item['section_title']
paragraph = item['paragraphs'][0]['context'].replace("CANNOTANSWER", "")
qas = item['paragraphs'][0]['qas']
qa_pairs = [(qa['question'], qa['answers'][0]['text']) for qa in qas]
for (question, answer) in qa_pairs:
doc = { 'title': title, 'paragraph': paragraph, 'question': question, 'answer': answer }
docs.append(doc)
return docs
def _process_doc(self, doc):
doc["title"] = doc['title'] + ' - ' + doc['section_title']
return doc
def doc_to_text(self, doc):
return 'TITLE: ' + doc['title'] + '\n' + 'PARAGRAPH: ' + doc['paragraph'] + '\n\n' + 'Q: ' + doc['question'] + '\n\n' + 'A: '
......@@ -88,7 +69,7 @@ class QuAC(Task):
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
......
......@@ -12,9 +12,8 @@ Homepage: https://www.cs.cmu.edu/~glai1/data/race/
import collections
import datasets
import numpy as np
from lm_eval.base import rf
from ..metrics import mean
from . common import HFTask
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
......@@ -35,16 +34,14 @@ class each:
return list(map(self.f, other))
class RACE(HFTask):
VERSION = 0
class RACE(Task):
VERSION = 1
DATASET_PATH = "race"
DATASET_NAME = "high"
cache = {}
letter_to_num = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
assert datasets.__version__ == "1.15.1", "RACE requires datasets==1.15.1!"
def has_training_docs(self):
return True
......
......@@ -7,7 +7,8 @@ multiple-choice analogy questions; 5 choices per question.
Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art)
"""
import os
import inspect
import lm_eval.datasets.sat_analogies.sat_analogies
from lm_eval.base import MultipleChoiceTask
......@@ -25,20 +26,18 @@ _CITATION = """
"""
class SATAnalogies(MultipleChoiceTask):
class SATAnalogies(MultipleChoiceTask):
VERSION = 0
NEEDS_MANUAL_DL = True
def __init__(self):
super().__init__()
DATASET_PATH = inspect.getfile(lm_eval.datasets.sat_analogies.sat_analogies)
DATASET_NAME = None
def download(self):
# We should be using a checksum here.
# The canonical sha256 hash is below:
# 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc
if not os.path.exists('data/sat/SAT-package-V3.txt'):
raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.')
def __init__(self, data_dir: str):
"""
SAT Analog Questions is not publicly available. You must request the data
by emailing Peter Turney and then download it to a local directory path
which should be passed into the `data_dir` arg.
"""
super().__init__(data_dir=data_dir)
def has_training_docs(self):
return False
......@@ -51,38 +50,20 @@ class SATAnalogies(MultipleChoiceTask):
def training_docs(self):
return []
def test_docs(self):
return []
def validation_docs(self):
data = []
return map(self._process_doc, self.dataset["validation"])
with open("data/sat/SAT-package-V3.txt", "r") as f:
record = []
for line in f:
line = line.strip()
if len(line) == 0 and record:
data.append(record)
record = []
elif len(line) > 0 and line[0] == '#':
continue
else:
record.append(line)
data.append(record)
for record in data:
source = record[-8]
query = record[-7]
choices = record[-6:-1]
answer_key = record[-1]
def test_docs(self):
return []
doc = {
'source': source,
'query': query.split(' ')[:2],
'choices': ["{} is to {}".format(*c.split(' ')[:2]) for c in choices],
'gold': ['a','b','c','d','e'].index(answer_key.strip()),
}
yield doc
def _process_doc(self, doc):
return {
'source': doc['source'],
'query': doc['stem'].split(' ')[:2],
'choices': ["{} is to {}".format(*c.split(' ')[:2]) for c in doc["choices"]],
'gold': ['a', 'b', 'c', 'd', 'e'].index(doc['solution'].strip()),
}
def doc_to_text(self, doc):
return "{} is to {} as".format(*doc['query'])
......@@ -9,11 +9,7 @@ with supporting evidence for the correct answer is provided.
Homepage: https://allenai.org/data/sciq
"""
import os
import json
import zipfile
from lm_eval.base import MultipleChoiceTask
from best_download import download_file
_CITATION = """
......@@ -28,17 +24,8 @@ _CITATION = """
class SciQ(MultipleChoiceTask):
VERSION = 0
# Multiple languages and multiple years
def download(self):
if not os.path.exists('data/sciq'):
os.makedirs('data/sciq', exist_ok=True)
download_file(
'https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip',
local_file='data/sciq/SciQ.zip',
expected_checksum='7f3312f6ac6b09970b32942d106a8c44ec0dad46a0369f17d635aff8e348a87c',
)
with zipfile.ZipFile("data/sciq/SciQ.zip", "r") as zf:
zf.extractall("data/sciq/")
DATASET_PATH = "sciq"
DATASET_NAME = None
def has_training_docs(self):
return True
......@@ -49,36 +36,32 @@ class SciQ(MultipleChoiceTask):
def has_test_docs(self):
return True
def _convert_standard(self, doc):
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
choices = [
doc["distractor1"],
doc["distractor2"],
doc["distractor1"],
doc["distractor2"],
doc["distractor3"],
doc["correct_answer"],
]
src = doc['support']
out_doc = {
"source" : src,
"query" : doc['question'],
"choices" : choices,
"gold" : 3,
"source": src,
"query": doc['question'],
"choices": choices,
"gold": 3,
}
return out_doc
def load_docs(self, textfilename):
with open(textfilename, 'r') as j:
docs = json.loads(j.read())
for record in docs:
yield self._convert_standard(record)
def training_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/train.json")
def validation_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/valid.json")
def test_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/test.json")
def doc_to_text(self, doc):
return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]).strip()
......@@ -15,9 +15,7 @@ Homepage: https://rajpurkar.github.io/SQuAD-explorer/
"""
import datasets
from math import exp
from lm_eval.base import rf
from lm_eval.metrics import f1_score, mean
from . common import HFTask
from lm_eval.base import rf, Task
from functools import partial
from packaging import version
......@@ -45,7 +43,7 @@ def _squad_agg(key, items):
return _squad_metric(predictions=predictions, references=references)[key]
class SQuAD2(HFTask):
class SQuAD2(Task):
VERSION = 1
DATASET_PATH = "squad_v2"
DATASET_NAME = None
......@@ -63,10 +61,10 @@ class SQuAD2(HFTask):
return False
def training_docs(self):
return self.data["train"]
return self.dataset["train"]
def validation_docs(self):
return self.data["validation"]
return self.dataset["validation"]
def doc_to_text(self, doc):
return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:'
......
......@@ -8,8 +8,9 @@ to choose the correct ending to a four-sentence story.
Homepage: https://cs.rochester.edu/nlp/rocstories/
"""
import csv
from lm_eval.base import Task
import numpy as np
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
......@@ -34,11 +35,16 @@ _CITATION = """
class StoryCloze(Task):
VERSION = 0
NEEDS_MANUAL_DL = True
DATASET_PATH = "story_cloze"
DATASET_NAME = None
def download(self):
#TODO: replace with Eye link
pass
def __init__(self, data_dir: str):
"""
StoryCloze is not publicly available. You must download the data by
following https://cs.rochester.edu/nlp/rocstories/ and pass the folder
path into the `data_dir` arg.
"""
super().__init__(data_dir=data_dir)
def has_training_docs(self):
return False
......@@ -52,40 +58,46 @@ class StoryCloze(Task):
def training_docs(self):
pass
def load_doc(self, filename):
with open(filename, newline='') as file:
filereader = csv.reader(file)
return list(filereader)
def validation_docs(self):
return self.load_doc("data/storycloze/cloze_test_val__winter2018-cloze_test_ALL_val - 1 - 1.csv")
return self.dataset["validation"]
def test_docs(self):
return self.load_doc("data/storycloze/cloze_test_test__winter2018-cloze_test_ALL_test - 1.csv")
return self.dataset["test"]
def doc_to_text(self, doc):
return ' '.join([*doc[1:5]])
return ' '.join([
doc["input_sentence_1"],
doc["input_sentence_2"],
doc["input_sentence_3"],
doc["input_sentence_4"],
])
def doc_to_target(self, doc):
return " " + doc[int(doc[-1]) - 4]
clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
# `- 1` because the `answer_right_ending` index is 1-based.
return " " + clozes[doc["answer_right_ending"] - 1]
def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of
""" Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
lls = [
rf.loglikelihood(ctx, " {}".format(choice))[0]
for choice in clozes
]
return lls
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
......@@ -93,23 +105,36 @@ class StoryCloze(Task):
:param results:
The results of the requests created in construct_requests.
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
gold = doc["answer_right_ending"] - 1
acc = 1. if np.argmax(results) == gold else 0.
return {
"acc": acc
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
return {
"acc": mean
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
# TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented')
return {
"acc": True
}
class StoryCloze2016(StoryCloze):
DATASET_NAME = "2016"
class StoryCloze2018(StoryCloze):
DATASET_NAME = "2018"
......@@ -12,10 +12,9 @@ TODO: WSC requires free-form generation.
import numpy as np
import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics
from . common import HFTask, yesno
from lm_eval.base import rf
from ..metrics import mean, acc_all, metric_max_over_ground_truths
from ..utils import general_detokenize
from lm_eval.base import rf, Task
from lm_eval.metrics import mean, acc_all, metric_max_over_ground_truths, yesno
from lm_eval.utils import general_detokenize
_CITATION = """
......@@ -33,7 +32,7 @@ _CITATION = """
"""
class BoolQ(HFTask):
class BoolQ(Task):
VERSION = 1
DATASET_PATH = "super_glue"
DATASET_NAME = "boolq"
......@@ -47,6 +46,14 @@ class BoolQ(HFTask):
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return f"{doc['passage']}\nQuestion: {doc['question']}?\nAnswer:"
......@@ -81,7 +88,7 @@ class BoolQ(HFTask):
}
class CommitmentBank(HFTask):
class CommitmentBank(Task):
VERSION = 1
DATASET_PATH = "super_glue"
DATASET_NAME = "cb"
......@@ -95,6 +102,14 @@ class CommitmentBank(HFTask):
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format(
doc["premise"],
......@@ -148,7 +163,7 @@ class CommitmentBank(HFTask):
}
class Copa(HFTask):
class Copa(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "copa"
......@@ -162,6 +177,14 @@ class Copa(HFTask):
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
# Drop the period
connector = {
......@@ -208,7 +231,7 @@ class Copa(HFTask):
return choice[0].lower() + choice[1:]
class MultiRC(HFTask):
class MultiRC(Task):
VERSION = 1
DATASET_PATH = "super_glue"
DATASET_NAME = "multirc"
......@@ -222,6 +245,14 @@ class MultiRC(HFTask):
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:"
......@@ -260,7 +291,7 @@ class MultiRC(HFTask):
}
class ReCoRD(HFTask):
class ReCoRD(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "record"
......@@ -279,13 +310,13 @@ class ReCoRD(HFTask):
# Each doc consists of multiple answer candidates, each of which is scored yes/no.
if self._training_docs is None:
self._training_docs = []
for doc in self.data["train"]:
for doc in self.dataset["train"]:
self._training_docs.append(self._process_doc(doc))
return self._training_docs
def validation_docs(self):
# See: training_docs
for doc in self.data["validation"]:
for doc in self.dataset["validation"]:
yield self._process_doc(doc)
@classmethod
......@@ -349,7 +380,7 @@ class ReCoRD(HFTask):
}
class WordsInContext(HFTask):
class WordsInContext(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "wic"
......@@ -363,6 +394,14 @@ class WordsInContext(HFTask):
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the" \
" two sentences above?\nAnswer:".format(
......@@ -401,7 +440,7 @@ class WordsInContext(HFTask):
}
class SGWinogradSchemaChallenge(HFTask):
class SGWinogradSchemaChallenge(Task):
VERSION = 0
# Note: This implementation differs from Fig G.32 because this is the SuperGLUE,
# binary version of the task.
......@@ -423,11 +462,14 @@ class SGWinogradSchemaChallenge(HFTask):
# GPT-3 Paper's format only uses positive examples for fewshot "training"
self._training_docs = [
doc for doc in
self.data["train"]
self.dataset["train"]
if doc["label"]
]
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
raw_passage = doc["text"]
# NOTE: HuggingFace span indices are word-based not character-based.
......
......@@ -90,7 +90,7 @@ class GeneralTranslationTask(Task):
super().__init__()
def download(self):
def download(self, data_dir=None, cache_dir=None, download_mode=None):
# This caches in the users home dir automatically
self.src_file, self.ref_file = \
sacrebleu.download_test_set(self.sacrebleu_dataset, self.sacrebleu_language_pair)
......
......@@ -9,13 +9,10 @@ high quality distant supervision for answering the questions.
Homepage: https://nlp.cs.washington.edu/triviaqa/
"""
import os
import json
import jsonlines
import inspect
import lm_eval.datasets.triviaqa.triviaqa
from lm_eval.base import Task, rf
from ..metrics import mean
from ..utils import sh
from best_download import download_file
from lm_eval.metrics import mean
_CITATION = """
......@@ -33,14 +30,8 @@ _CITATION = """
class TriviaQA(Task):
VERSION = 0
def download(self):
if not os.path.exists('data/triviaqa/unfiltered-web-train.jsonl'):
os.makedirs("data/triviaqa/", exist_ok=True)
download_file("http://eaidata.bmk.sh/data/triviaqa-unfiltered.tar.gz", local_file="data/triviaqa/triviaqa-unfiltered.tar.gz", expected_checksum="adc19b42769062d241a8fbe834c56e58598d9322eb6c614e9f33a68a2cf5523e")
sh("""
cd data/triviaqa/
tar -xf triviaqa-unfiltered.tar.gz
""")
DATASET_PATH = inspect.getfile(lm_eval.datasets.triviaqa.triviaqa)
DATASET_NAME = None
def has_training_docs(self):
return True
......@@ -52,19 +43,19 @@ class TriviaQA(Task):
return False
def training_docs(self):
return jsonlines.open('data/triviaqa/unfiltered-web-train.jsonl')
return self.dataset['train']
def validation_docs(self):
return jsonlines.open('data/triviaqa/unfiltered-web-dev.jsonl')
return self.dataset['validation']
def test_docs(self):
raise NotImplementedError()
def doc_to_text(self, doc):
return f"Question: {doc['Question']}\nAnswer:"
return f"Question: {doc['question']}\nAnswer:"
def doc_to_target(self, doc):
return " " + doc['Answer']['Value']
return " " + doc['answer']['value']
def _remove_prefixes(self, aliases):
# Optimization: Remove any alias that has a strict prefix elsewhere in the list
......@@ -74,12 +65,11 @@ class TriviaQA(Task):
for alias in aliases[1:]:
if not alias.startswith(ret[-1]):
ret.append(alias)
return ret
def construct_requests(self, doc, ctx):
ret = []
for alias in self._remove_prefixes(doc['Answer']['Aliases']):
for alias in self._remove_prefixes(doc['answer']['aliases']):
_, is_prediction = rf.loglikelihood(ctx, " " + alias)
ret.append(is_prediction)
return ret
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment