Unverified Commit e0cfeb90 authored by Jonathan Tow's avatar Jonathan Tow Committed by GitHub
Browse files

Merge branch 'master' into researcher2

parents f9b81151 6caa0afd
"""
LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning
https://arxiv.org/pdf/2007.08124.pdf
LogiQA is a dataset for testing human logical reasoning. It consists of 8,678 QA
instances, covering multiple types of deductive reasoning. Results show that state-
of-the-art neural models perform by far worse than human ceiling. The dataset can
also serve as a benchmark for reinvestigating logical AI under the deep learning
NLP setting.
Homepage: https://github.com/lgw863/LogiQA-dataset
"""
import inspect
import lm_eval.datasets.logiqa.logiqa
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
from best_download import download_file
from pathlib import Path
_CITATION = """
@misc{liu2020logiqa,
title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang},
year={2020},
eprint={2007.08124},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
class LogiQA(MultipleChoiceTask): class LogiQA(MultipleChoiceTask):
VERSION = 0 VERSION = 0
DATASET_PATH = Path("data/logiqa") DATASET_PATH = inspect.getfile(lm_eval.datasets.logiqa.logiqa)
DATASET_NAME = None
def download(self):
if self.DATASET_PATH.exists():
return
Path.mkdir(self.DATASET_PATH, parents=True)
base_url = "https://raw.githubusercontent.com/lgw863/LogiQA-dataset/master"
splits = [
{"name": "Train", "checksum": "7d5bb1f58278e33b395744cd2ad8d7600faa0b3c4d615c659a44ec1181d759fa"},
{"name": "Eval", "checksum": "4c49e6753b7262c001506b9151135abf722247035ab075dad93acdea5789c01f"},
{"name": "Test", "checksum": "359acb78c37802208f7fde9e2f6574b8526527c63d6a336f90a53f1932cb4701"}
]
for split in splits:
file = self.DATASET_PATH / f"{split['name']}.txt"
download_file(f"{base_url}/{split['name']}.txt", local_file=str(file), expected_checksum=split["checksum"])
def has_training_docs(self): def has_training_docs(self):
return True return True
...@@ -30,7 +41,18 @@ class LogiQA(MultipleChoiceTask): ...@@ -30,7 +41,18 @@ class LogiQA(MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def _convert_standard(self, doc): def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
def format_example(doc, choices): def format_example(doc, choices):
""" """
Passage: <passage> Passage: <passage>
...@@ -42,7 +64,7 @@ class LogiQA(MultipleChoiceTask): ...@@ -42,7 +64,7 @@ class LogiQA(MultipleChoiceTask):
D. <choice4> D. <choice4>
Answer: Answer:
""" """
prompt = "Passage: " + doc["passage"] + "\n" prompt = "Passage: " + doc["context"] + "\n"
prompt += "Question: " + doc["question"] + "\nChoices:\n" prompt += "Question: " + doc["question"] + "\nChoices:\n"
for choice, option in zip(choices, doc["options"]): for choice, option in zip(choices, doc["options"]):
prompt += f"{choice.upper()}. {option}\n" prompt += f"{choice.upper()}. {option}\n"
...@@ -53,34 +75,9 @@ class LogiQA(MultipleChoiceTask): ...@@ -53,34 +75,9 @@ class LogiQA(MultipleChoiceTask):
"passage": doc["passage"], # Used for decontamination "passage": doc["passage"], # Used for decontamination
"query": format_example(doc, choices), "query": format_example(doc, choices),
"choices": doc["options"], "choices": doc["options"],
"gold": choices.index(doc["answerKey"]) "gold": choices.index(doc["label"])
} }
def _load_docs(self, filename):
def normalize(text):
return text.replace(".", ". ").strip()
with open(filename, 'r') as f:
docs = f.read().strip().split("\n\n")
for rawdoc in docs:
rawdoc = rawdoc.split("\n")
doc = {
"answerKey": rawdoc[0].strip(),
"passage": normalize(rawdoc[1]),
"question": normalize(rawdoc[2]),
"options": [normalize(option[2:]) for option in rawdoc[3:]]
}
yield self._convert_standard(doc)
def training_docs(self):
return self._load_docs(self.DATASET_PATH / "Train.txt")
def validation_docs(self):
return self._load_docs(self.DATASET_PATH / "Eval.txt")
def test_docs(self):
return self._load_docs(self.DATASET_PATH / "Test.txt")
def doc_to_text(self, doc): def doc_to_text(self, doc):
return doc["query"] return doc["query"]
......
"""
MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms
https://arxiv.org/pdf/1905.13319.pdf
MathQA is a large-scale dataset of 37k English multiple-choice math word problems
covering multiple math domain categories by modeling operation programs corresponding
to word problems in the AQuA dataset (Ling et al., 2017).
Homepage: https://math-qa.github.io/math-QA/
"""
import re import re
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
from . common import HFTask
class MathQA(HFTask, MultipleChoiceTask): _CITATION = """
@misc{amini2019mathqa,
title={MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms},
author={Aida Amini and Saadia Gabriel and Peter Lin and Rik Koncel-Kedziorski and Yejin Choi and Hannaneh Hajishirzi},
year={2019},
eprint={1905.13319},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
class MathQA(MultipleChoiceTask):
VERSION = 0 VERSION = 0
DATASET_PATH = "math_qa" DATASET_PATH = "math_qa"
DATASET_NAME = None DATASET_NAME = None
...@@ -17,13 +38,23 @@ class MathQA(HFTask, MultipleChoiceTask): ...@@ -17,13 +38,23 @@ class MathQA(HFTask, MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def _convert_standard(self, doc): def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
answer_idx = ['a', 'b', 'c', 'd', 'e'].index(doc['correct']) answer_idx = ['a', 'b', 'c', 'd', 'e'].index(doc['correct'])
choices = [c[4:].rstrip(" ,") for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc['options'])] choices = [c[4:].rstrip(" ,") for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc['options'])]
out_doc = { out_doc = {
"query": "Question: " + doc['Problem'] +"\nAnswer:", "query": "Question: " + doc['Problem'] + "\nAnswer:",
"choices": choices, "choices": choices,
"gold": answer_idx, "gold": answer_idx,
} }
......
...@@ -3,6 +3,12 @@ ...@@ -3,6 +3,12 @@
A Study of Temporal Commonsense Understanding A Study of Temporal Commonsense Understanding
https://arxiv.org/pdf/1909.03065.pdf https://arxiv.org/pdf/1909.03065.pdf
MC-TACO is a dataset of 13k question-answer pairs that require temporal commonsense
comprehension. The dataset contains five temporal properties, (1) duration (how long
an event takes), (2) temporal ordering (typical order of events), (3) typical time
(when an event occurs), (4) frequency (how often an event occurs), and (5) stationarity
(whether a state is maintained for a very long time or indefinitely).
WARNING: Running this task with a `--limit` arg will give misleading results! The WARNING: Running this task with a `--limit` arg will give misleading results! The
corresponding dataset is structured such that each multiple-choice-question gathered corresponding dataset is structured such that each multiple-choice-question gathered
by the authors is split into question-option pairs, where each such pair gets by the authors is split into question-option pairs, where each such pair gets
...@@ -11,6 +17,14 @@ shuffles these documents, setting `--limit` will likely "cut off" certain candid ...@@ -11,6 +17,14 @@ shuffles these documents, setting `--limit` will likely "cut off" certain candid
answers. This is a problem because the task's metrics require an exhaustive evaluation answers. This is a problem because the task's metrics require an exhaustive evaluation
of a question's options. See section 4 of the paper for details. of a question's options. See section 4 of the paper for details.
Homepage: https://leaderboard.allenai.org/mctaco/submissions/public
"""
import numpy as np
from collections import defaultdict
from lm_eval.base import rf, Task
_CITATION = """
@inproceedings{ZKNR19, @inproceedings{ZKNR19,
author = {Ben Zhou, Daniel Khashabi, Qiang Ning and Dan Roth}, author = {Ben Zhou, Daniel Khashabi, Qiang Ning and Dan Roth},
title = {“Going on a vacation” takes longer than “Going for a walk”: A Study of Temporal Commonsense Understanding }, title = {“Going on a vacation” takes longer than “Going for a walk”: A Study of Temporal Commonsense Understanding },
...@@ -19,13 +33,8 @@ of a question's options. See section 4 of the paper for details. ...@@ -19,13 +33,8 @@ of a question's options. See section 4 of the paper for details.
} }
""" """
import numpy as np
from lm_eval.base import rf
from collections import defaultdict
from . common import HFTask
class MCTACO(HFTask): class MCTACO(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "mc_taco" DATASET_PATH = "mc_taco"
DATASET_NAME = None DATASET_NAME = None
...@@ -39,6 +48,12 @@ class MCTACO(HFTask): ...@@ -39,6 +48,12 @@ class MCTACO(HFTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return self.dataset["test"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return f"{doc['sentence']}\nQuestion: {doc['question']}\n"\ return f"{doc['sentence']}\nQuestion: {doc['question']}\n"\
f"Answer: {doc['answer']}\nPlausible:" f"Answer: {doc['answer']}\nPlausible:"
......
...@@ -2,6 +2,19 @@ ...@@ -2,6 +2,19 @@
MuTual: A Dataset for Multi-Turn Dialogue Reasoning MuTual: A Dataset for Multi-Turn Dialogue Reasoning
https://www.aclweb.org/anthology/2020.acl-main.130/ https://www.aclweb.org/anthology/2020.acl-main.130/
MuTual is a retrieval-based dataset for multi-turn dialogue reasoning, which is
modified from Chinese high school English listening comprehension test data.
Homepage: https://github.com/Nealcly/MuTual
"""
import numpy as np
import inspect
import lm_eval.datasets.mutual.mutual
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{mutual, @inproceedings{mutual,
title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning", title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning",
author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" , author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" ,
...@@ -10,41 +23,14 @@ https://www.aclweb.org/anthology/2020.acl-main.130/ ...@@ -10,41 +23,14 @@ https://www.aclweb.org/anthology/2020.acl-main.130/
publisher = "Association for Computational Linguistics", publisher = "Association for Computational Linguistics",
} }
""" """
import json
import zipfile
import shutil
import numpy as np
from pathlib import Path
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
from best_download import download_file
class MuTualBase(Task): class MuTualBase(Task):
VERSION = 1 VERSION = 1
BASE_PATH = Path("data/mutual") DATASET_PATH = inspect.getfile(lm_eval.datasets.mutual.mutual)
DATASET_NAME = None DATASET_NAME = None
CHOICES = ['A', 'B', 'C', 'D'] CHOICES = ['A', 'B', 'C', 'D']
def __init__(self):
super().__init__()
def download(self):
if self.BASE_PATH.exists():
return
Path.mkdir(self.BASE_PATH, parents=True)
master_zip = Path("data/master.zip")
download_file(
"https://github.com/Nealcly/MuTual/archive/master.zip",
local_file=str(master_zip),
expected_checksum="bb325cf6c672f0f02699993a37138b0fa0af6fcfc77ec81dfbe46add4d7b29f9")
with zipfile.ZipFile(master_zip, 'r') as zip:
zip.extractall("data")
Path("data/MuTual-master/data").rename(str(self.BASE_PATH))
# Remove left over files and directories.
master_zip.unlink()
shutil.rmtree("data/MuTual-master")
def has_training_docs(self): def has_training_docs(self):
return True return True
...@@ -54,18 +40,11 @@ class MuTualBase(Task): ...@@ -54,18 +40,11 @@ class MuTualBase(Task):
def has_test_docs(self): def has_test_docs(self):
return False return False
def _load_docs(self, path):
for file in sorted(path.iterdir()):
if file.suffix != ".txt":
continue
with open(file, 'r', encoding='utf-8') as f:
yield json.load(f)
def training_docs(self): def training_docs(self):
return self._load_docs(self.BASE_PATH / self.DATASET_NAME / "train") return self.dataset["train"]
def validation_docs(self): def validation_docs(self):
return self._load_docs(self.BASE_PATH / self.DATASET_NAME / "dev") return self.dataset["validation"]
def test_docs(self): def test_docs(self):
return NotImplemented return NotImplemented
...@@ -132,8 +111,8 @@ class MuTualBase(Task): ...@@ -132,8 +111,8 @@ class MuTualBase(Task):
class MuTual(MuTualBase): class MuTual(MuTualBase):
DATASET_NAME = Path("mutual") DATASET_NAME = "mutual"
class MuTualPlus(MuTualBase): class MuTualPlus(MuTualBase):
DATASET_NAME = Path("mutual_plus") DATASET_NAME = "mutual_plus"
import random """
from . common import HFTask Natural Questions: a Benchmark for Question Answering Research
https://storage.googleapis.com/pub-tools-public-publication-data/pdf/1f7b46b5378d757553d3e92ead36bda2e4254244.pdf
The Natural Questions (NQ) corpus is a question-answering dataset that contains
questions from real users and requires QA systems to read and comprehend an entire
Wikipedia article that may or may not contain the answer to the question. The
inclusion of real user questions, and the requirement that solutions should read
an entire page to find the answer, cause NQ to be a more realistic and challenging
task than prior QA datasets.
TODO: NaturalQS has a *really* large train set that huggingface just automatically
downloads even if you dont use it. we should try and only download the val set and
not even bother with the train set.
Homepage: https://ai.google.com/research/NaturalQuestions
"""
from lm_eval.base import Task
from itertools import islice from itertools import islice
class NaturalQs(HFTask): _CITATION = """
VERSION = 0 @article{47761,
# TODO: naturalqs has a *really* large train set that huggingface just title={Natural Questions: a Benchmark for Question Answering Research},
# automatically downloads even if you dont use it. we should try and only author={Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
# download the val set and not even bother with the train set. year={2019},
journal={Transactions of the Association of Computational Linguistics}
}
"""
class NaturalQs(Task):
VERSION = 0
DATASET_PATH = "natural_questions" DATASET_PATH = "natural_questions"
DATASET_NAME = None DATASET_NAME = None
...@@ -24,7 +46,12 @@ class NaturalQs(HFTask): ...@@ -24,7 +46,12 @@ class NaturalQs(HFTask):
def training_docs(self): def training_docs(self):
# Cache training for faster few-shot. # Cache training for faster few-shot.
# Data is too large to fit in memory. # Data is too large to fit in memory.
return self.data["train"] if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def fewshot_examples(self, k, rnd): def fewshot_examples(self, k, rnd):
# Data is too large to fit in memory. We just sample from the first bit. # Data is too large to fit in memory. We just sample from the first bit.
......
"""
Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering
https://arxiv.org/pdf/1809.02789.pdf
OpenBookQA is a question-answering dataset modeled after open book exams for
assessing human understanding of a subject. It consists of 5,957 multiple-choice
elementary-level science questions (4,957 train, 500 dev, 500 test), which probe
the understanding of a small “book” of 1,326 core science facts and the application
of these facts to novel situations. For training, the dataset includes a mapping
from each question to the core science fact it was designed to probe. Answering
OpenBookQA questions requires additional broad common knowledge, not contained
in the book. The questions, by design, are answered incorrectly by both a retrieval-
based algorithm and a word co-occurrence algorithm.
Homepage: https://allenai.org/data/open-book-qa
"""
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
from .common import HFTask
class OpenBookQA(HFTask, MultipleChoiceTask): _CITATION = """
@inproceedings{OpenBookQA2018,
title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},
booktitle={EMNLP},
year={2018}
}
"""
class OpenBookQA(MultipleChoiceTask):
VERSION = 0 VERSION = 0
DATASET_PATH = "openbookqa" DATASET_PATH = "openbookqa"
DATASET_NAME = "main" DATASET_NAME = "main"
...@@ -16,7 +41,18 @@ class OpenBookQA(HFTask, MultipleChoiceTask): ...@@ -16,7 +41,18 @@ class OpenBookQA(HFTask, MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def _convert_standard(self, doc): def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
out_doc = { out_doc = {
"id": doc["id"], "id": doc["id"],
"query": doc["question_stem"], "query": doc["question_stem"],
......
import os """
The Pile: An 800GB Dataset of Diverse Text for Language Modeling
import lm_dataformat https://arxiv.org/pdf/2101.00027.pdf
import abc
import numpy as np The Pile is a 825 GiB diverse, open source language modelling data set that consists
from lm_eval.base import rf, PerplexityTask of 22 smaller, high-quality datasets combined together. To score well on Pile
from ..metrics import mean, matthews_corrcoef, f1_score BPB (bits per byte), a model must be able to understand many disparate domains
from ..utils import general_detokenize including books, github repositories, webpages, chat logs, and medical, physics,
from best_download import download_file math, computer science, and philosophy papers.
Homepage: https://pile.eleuther.ai/
class PilePerplexityTask(PerplexityTask, abc.ABC): """
import inspect
import lm_eval.datasets.pile.pile
from lm_eval.base import PerplexityTask
_CITATION = """
@article{pile,
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
journal={arXiv preprint arXiv:2101.00027},
year={2020}
}
"""
class PilePerplexityTask(PerplexityTask):
VERSION = 1 VERSION = 1
DATASET_PATH = inspect.getfile(lm_eval.datasets.pile.pile)
PILE_SET_NAME = None DATASET_NAME = None
VAL_PATH = 'data/pile/val.jsonl.zst'
TEST_PATH = 'data/pile/test.jsonl.zst'
def download(self):
# TODO: separate pile val/test out by component so we don't have to scan the entire file once per set
if not os.path.exists("data/pile/test.jsonl.zst"):
# todo use new best_download fallback api
os.makedirs("data/pile/", exist_ok=True)
download_file("http://eaidata.bmk.sh/data/pile/val.jsonl.zst", local_file=self.VAL_PATH, expected_checksum="264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92")
download_file("http://eaidata.bmk.sh/data/pile/test.jsonl.zst", local_file=self.TEST_PATH, expected_checksum="0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e")
def validation_docs(self):
rdr = lm_dataformat.Reader(self.VAL_PATH)
for doc, metadata in rdr.stream_data(get_meta=True):
if metadata["pile_set_name"] == self.PILE_SET_NAME:
yield doc
def test_docs(self):
rdr = lm_dataformat.Reader(self.TEST_PATH)
for doc, metadata in rdr.stream_data(get_meta=True):
if metadata["pile_set_name"] == self.PILE_SET_NAME:
yield doc
def has_validation_docs(self): def has_validation_docs(self):
return True return True
...@@ -42,90 +36,98 @@ class PilePerplexityTask(PerplexityTask, abc.ABC): ...@@ -42,90 +36,98 @@ class PilePerplexityTask(PerplexityTask, abc.ABC):
def has_test_docs(self): def has_test_docs(self):
return True return True
def validation_docs(self):
for doc in self.dataset["validation"]:
yield doc["text"]
def test_docs(self):
for doc in self.dataset["test"]:
yield doc["text"]
class PileArxiv(PilePerplexityTask): class PileArxiv(PilePerplexityTask):
PILE_SET_NAME = "ArXiv" DATASET_NAME = "pile_arxiv"
class PileBooks3(PilePerplexityTask): class PileBooks3(PilePerplexityTask):
PILE_SET_NAME = "Books3" DATASET_NAME = "pile_books3"
class PileBookCorpus2(PilePerplexityTask): class PileBookCorpus2(PilePerplexityTask):
PILE_SET_NAME = "BookCorpus2" DATASET_NAME = "pile_bookcorpus2"
class PileDmMathematics(PilePerplexityTask): class PileDmMathematics(PilePerplexityTask):
PILE_SET_NAME = "DM Mathematics" DATASET_NAME = "pile_dm-mathematics"
class PileEnron(PilePerplexityTask): class PileEnron(PilePerplexityTask):
PILE_SET_NAME = "Enron Emails" DATASET_NAME = "pile_enron"
class PileEuroparl(PilePerplexityTask): class PileEuroparl(PilePerplexityTask):
PILE_SET_NAME = "EuroParl" DATASET_NAME = "pile_europarl"
class PileFreeLaw(PilePerplexityTask): class PileFreeLaw(PilePerplexityTask):
PILE_SET_NAME = "FreeLaw" DATASET_NAME = "pile_freelaw"
class PileGithub(PilePerplexityTask): class PileGithub(PilePerplexityTask):
PILE_SET_NAME = "Github" DATASET_NAME = "pile_github"
class PileGutenberg(PilePerplexityTask): class PileGutenberg(PilePerplexityTask):
PILE_SET_NAME = "Gutenberg (PG-19)" DATASET_NAME = "pile_gutenberg"
class PileHackernews(PilePerplexityTask): class PileHackernews(PilePerplexityTask):
PILE_SET_NAME = "HackerNews" DATASET_NAME = "pile_hackernews"
class PileNIHExporter(PilePerplexityTask): class PileNIHExporter(PilePerplexityTask):
PILE_SET_NAME = "NIH ExPorter" DATASET_NAME = "pile_nih-exporter"
class PileOpenSubtitles(PilePerplexityTask): class PileOpenSubtitles(PilePerplexityTask):
PILE_SET_NAME = "OpenSubtitles" DATASET_NAME = "pile_opensubtitles"
class PileOpenWebText2(PilePerplexityTask): class PileOpenWebText2(PilePerplexityTask):
PILE_SET_NAME = "OpenWebText2" DATASET_NAME = "pile_openwebtext2"
class PilePhilPapers(PilePerplexityTask): class PilePhilPapers(PilePerplexityTask):
PILE_SET_NAME = "PhilPapers" DATASET_NAME = "pile_philpapers"
class PilePileCc(PilePerplexityTask): class PilePileCc(PilePerplexityTask):
PILE_SET_NAME = "Pile-CC" DATASET_NAME = "pile_pile-cc"
class PilePubmedAbstracts(PilePerplexityTask): class PilePubmedAbstracts(PilePerplexityTask):
PILE_SET_NAME = "PubMed Abstracts" DATASET_NAME = "pile_pubmed-abstracts"
class PilePubmedCentral(PilePerplexityTask): class PilePubmedCentral(PilePerplexityTask):
PILE_SET_NAME = "PubMed Central" DATASET_NAME = "pile_pubmed-central"
class PileStackExchange(PilePerplexityTask): class PileStackExchange(PilePerplexityTask):
PILE_SET_NAME = "StackExchange" DATASET_NAME = "pile_stackexchange"
class PileUspto(PilePerplexityTask): class PileUspto(PilePerplexityTask):
PILE_SET_NAME = "USPTO Backgrounds" DATASET_NAME = "pile_upsto"
class PileUbuntuIrc(PilePerplexityTask): class PileUbuntuIrc(PilePerplexityTask):
PILE_SET_NAME = "Ubuntu IRC" DATASET_NAME = "pile_ubuntu-irc"
class PileWikipedia(PilePerplexityTask): class PileWikipedia(PilePerplexityTask):
PILE_SET_NAME = "Wikipedia (en)" DATASET_NAME = "pile_wikipedia"
class PileYoutubeSubtitles(PilePerplexityTask): class PileYoutubeSubtitles(PilePerplexityTask):
PILE_SET_NAME = "YoutubeSubtitles" DATASET_NAME = "pile_youtubesubtitles"
import numpy as np """
from lm_eval.base import MultipleChoiceTask, rf PIQA: Reasoning about Physical Commonsense in Natural Language
from ..metrics import mean https://arxiv.org/pdf/1911.11641.pdf
from . common import HFTask
Physical Interaction: Question Answering (PIQA) is a physical commonsense
reasoning and a corresponding benchmark dataset. PIQA was designed to investigate
the physical knowledge of existing models. To what extent are current approaches
actually learning about the world?
class PiQA(HFTask, MultipleChoiceTask): Homepage: https://yonatanbisk.com/piqa/
"""
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{Bisk2020,
author = {Yonatan Bisk and Rowan Zellers and
Ronan Le Bras and Jianfeng Gao
and Yejin Choi},
title = {PIQA: Reasoning about Physical Commonsense in
Natural Language},
booktitle = {Thirty-Fourth AAAI Conference on
Artificial Intelligence},
year = {2020},
}
"""
class PiQA(MultipleChoiceTask):
VERSION = 0 VERSION = 0
DATASET_PATH = "piqa" DATASET_PATH = "piqa"
DATASET_NAME = None DATASET_NAME = None
...@@ -18,7 +40,15 @@ class PiQA(HFTask, MultipleChoiceTask): ...@@ -18,7 +40,15 @@ class PiQA(HFTask, MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return False return False
def _convert_standard(self, doc): def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def _process_doc(self, doc):
out_doc = { out_doc = {
"goal": doc["goal"], "goal": doc["goal"],
"choices": [doc["sol1"], doc["sol2"]], "choices": [doc["sol1"], doc["sol2"]],
......
...@@ -2,27 +2,40 @@ ...@@ -2,27 +2,40 @@
PROST: Physical Reasoning about Objects Through Space and Time PROST: Physical Reasoning about Objects Through Space and Time
https://arxiv.org/pdf/2106.03634.pdf https://arxiv.org/pdf/2106.03634.pdf
PROST, Physical Reasoning about Objects Through Space and Time, is a dataset
consisting of 18,736 multiple-choice questions made from 14 manually curated
templates, covering 10 physical reasoning concepts. All questions are designed
to probe both causal and masked language models in a zero-shot setting.
NOTE: PROST is limited to the zero-shot setting to adhere to authors' intentions NOTE: PROST is limited to the zero-shot setting to adhere to authors' intentions
as discussed in section 7 of the paper: "We hope that the community will use as discussed in section 7 of the paper: "We hope that the community will use
this dataset in the intended way: in a zero-shot setting to probe models which this dataset in the intended way: in a zero-shot setting to probe models which
have been trained on data not specifically collected to succeed on PROST." have been trained on data not specifically collected to succeed on PROST."
# TODO: Update citation when it is made available at https://github.com/nala-cub/prost. Homepage: https://github.com/nala-cub/prost
@misc{arocaouellette2021prost,
title={PROST: Physical Reasoning of Objects through Space and Time},
author={Stéphane Aroca-Ouellette and Cory Paik and Alessandro Roncone and Katharina Kann},
year={2021},
eprint={2106.03634},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
""" """
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
from . common import HFTask
class PROST(HFTask, MultipleChoiceTask): _CITATION = """
@inproceedings{aroca-ouellette-etal-2021-prost,
title = "{PROST}: {P}hysical Reasoning about Objects through Space and Time",
author = "Aroca-Ouellette, St{\'e}phane and
Paik, Cory and
Roncone, Alessandro and
Kann, Katharina",
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-acl.404",
pages = "4597--4608",
}
"""
class PROST(MultipleChoiceTask):
VERSION = 0 VERSION = 0
DATASET_PATH = "corypaik/prost" DATASET_PATH = "corypaik/prost"
DATASET_NAME = None DATASET_NAME = None
...@@ -36,6 +49,9 @@ class PROST(HFTask, MultipleChoiceTask): ...@@ -36,6 +49,9 @@ class PROST(HFTask, MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None):
assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.' assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.'
return super().fewshot_context( return super().fewshot_context(
...@@ -45,7 +61,7 @@ class PROST(HFTask, MultipleChoiceTask): ...@@ -45,7 +61,7 @@ class PROST(HFTask, MultipleChoiceTask):
description=description description=description
) )
def _convert_standard(self, doc): def _process_doc(self, doc):
out_doc = { out_doc = {
"query": f"{doc['context']}\nQuestion: {doc['ex_question']}\nAnswer:", "query": f"{doc['context']}\nQuestion: {doc['ex_question']}\nAnswer:",
"choices": [doc['A'], doc['B'], doc['C'], doc['D']], "choices": [doc['A'], doc['B'], doc['C'], doc['D']],
......
"""
PubMedQA: A Dataset for Biomedical Research Question Answering
https://arxiv.org/pdf/1909.06146.pdf
PubMedQA is a novel biomedical question answering (QA) dataset collected from
PubMed abstracts. The task of PubMedQA is to answer research questions with
yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after
coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA
has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA
instances. Each PubMedQA instance is composed of (1) a question which is either
an existing research article title or derived from one, (2) a context which is
the corresponding abstract without its conclusion, (3) a long answer, which is
the conclusion of the abstract and, presumably, answers the research question,
and (4) a yes/no/maybe answer which summarizes the conclusion.
Homepage: https://pubmedqa.github.io/
"""
import numpy as np import numpy as np
from .common import HFTask from lm_eval.base import rf, Task
from lm_eval.base import rf from lm_eval.metrics import mean
from ..metrics import mean
_CITATION = """
@inproceedings{jin2019pubmedqa,
title={PubMedQA: A Dataset for Biomedical Research Question Answering},
author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua},
booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
pages={2567--2577},
year={2019}
}
"""
class Pubmed_QA(HFTask): class Pubmed_QA(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "pubmed_qa" DATASET_PATH = "pubmed_qa"
DATASET_NAME = "pqa_labeled" DATASET_NAME = "pqa_labeled"
...@@ -21,7 +48,7 @@ class Pubmed_QA(HFTask): ...@@ -21,7 +48,7 @@ class Pubmed_QA(HFTask):
def test_docs(self): def test_docs(self):
if self.has_test_docs(): if self.has_test_docs():
# HF is labelled as train but its really just for testing # HF is labelled as train but its really just for testing
return self.data["train"] return self.dataset["train"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
ctxs = "\n".join(doc["context"]["contexts"]) ctxs = "\n".join(doc["context"]["contexts"])
......
import os """
import xml.etree.ElementTree as ET QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation
from best_download import download_file https://www.cs.cmu.edu/~./hovy/papers/13CLEF-QA4MRE.pdf
The (English only) QA4MRE challenge which was run as a Lab at CLEF 2011-2013.
The main objective of this exercise is to develop a methodology for evaluating
Machine Reading systems through Question Answering and Reading Comprehension
Tests. Systems should be able to extract knowledge from large volumes of text
and use this knowledge to answer questions. Four different tasks have been
organized during these years: Main Task, Processing Modality and Negation for
Machine Reading, Machine Reading of Biomedical Texts about Alzheimer's disease,
and Entrance Exam.
Homepage: http://nlp.uned.es/clef-qa/repository/qa4mre.php
"""
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{Peas2013QA4MRE2O,
title={QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation},
author={Anselmo Pe{\~n}as and Eduard H. Hovy and Pamela Forner and {\'A}lvaro Rodrigo and Richard F. E. Sutcliffe and Roser Morante},
booktitle={CLEF},
year={2013}
}
"""
class QA4MRE(MultipleChoiceTask): class QA4MRE(MultipleChoiceTask):
VERSION = 0 VERSION = 0
YEAR = None DATASET_PATH = "qa4mre"
def download(self): DATASET_NAME = None
year = self.YEAR
lang = "EN"
base_path = (
"http://nlp.uned.es/clef-qa/repository/js/scripts/downloadFile.php?"
"file=/var/www/html/nlp/clef-qa/repository/resources/QA4MRE/"
)
# TODO: add side tasks?
variable_year_path = {
2011: '2011/Training_Data/Goldstandard/',
2012: '2012/Main_Task/Training_Data/Goldstandard/Used_in_Evaluation/',
2013: '2013/Main_Task/Training_Data/Goldstandard/'
}
sha256sums = {
2011 : "6d2524952a3a015f2a82df785b85b5578681e3602ec276b4e72c01f4ebc50034",
2012 : "f9edaf408f8ac93f89a643a0d0b19263a1bb5ce64f19b2af10df279a656dfb24",
2013 : "c60e5aa4ec77e0493ef0b11d46bd1d74d58a499a3a2f871b8cf3af9536f0f094",
}
vpath = variable_year_path[year]
url_path = f"{base_path}{vpath}QA4MRE-{year}-{lang}_GS.xml"
if not os.path.exists("data/qa4mre"):
os.makedirs("data/qa4mre", exist_ok=True)
if not os.path.isfile(f"data/qa4mre/QA4MRE-{year}-{lang}"):
download_file(
url_path,
local_file=f"data/qa4mre/QA4MRE-{year}-{lang}_GS.xml",
expected_checksum=sha256sums[year],
)
def has_training_docs(self): def has_training_docs(self):
return False return False
...@@ -45,30 +40,19 @@ class QA4MRE(MultipleChoiceTask): ...@@ -45,30 +40,19 @@ class QA4MRE(MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def _convert_standard(self, question): def test_docs(self):
choices = [i.text for i in question.iter('answer')] # `qa4mre` only has train data so we use it for the test docs.
return map(self._process_doc, self.dataset["train"])
def _process_doc(self, doc):
choices = doc["answer_options"]["answer_str"]
out_doc = { out_doc = {
"query" : question.find('q_str').text, "source": doc["document_str"].strip().replace("\'", "'"),
"choices": choices, "query": doc["question_str"],
"gold" : int(question.find("./answer[@correct='Yes']").attrib["a_id"]) - 1, "choices": choices,
"gold": int(doc["correct_answer_id"]) - 1,
} }
return out_doc return out_doc
def load_docs(self, textfilename, tfds=False):
tree = ET.parse(textfilename)
root = tree.getroot()
# TODO: context is much larger than the context sometimes
# at the moment, it just gets left-truncated by LM automatically, and maybe that's good enough?
for reading_test in root.iter('reading-test'):
src = reading_test[0].text
src = src.strip().replace("\'", "'")
for qid, question in enumerate(reading_test.iter('q')):
out_doc = self._convert_standard(question)
out_doc['source'] = src
yield out_doc
def test_docs(self):
return self.load_docs(f"data/qa4mre/QA4MRE-{self.YEAR}-EN_GS.xml")
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]) return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"])
...@@ -81,10 +65,12 @@ class QA4MRE(MultipleChoiceTask): ...@@ -81,10 +65,12 @@ class QA4MRE(MultipleChoiceTask):
class QA4MRE_2011(QA4MRE): class QA4MRE_2011(QA4MRE):
YEAR = 2011 DATASET_NAME = "2011.main.EN"
class QA4MRE_2012(QA4MRE): class QA4MRE_2012(QA4MRE):
YEAR = 2012 DATASET_NAME = "2012.main.EN"
class QA4MRE_2013(QA4MRE): class QA4MRE_2013(QA4MRE):
YEAR = 2013 DATASET_NAME = "2013.main.EN"
...@@ -2,34 +2,42 @@ ...@@ -2,34 +2,42 @@
A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers
https://arxiv.org/abs/2105.03011 https://arxiv.org/abs/2105.03011
QASPER is a dataset of 5,049 questions over 1,585 Natural Language Processing papers.
Each question is written by an NLP practitioner who read only the title and abstract
of the corresponding paper, and the question seeks information present in the full
text. The questions are then answered by a separate set of NLP practitioners who also
provide supporting evidence to answers.
Homepage: https://allenai.org/data/qasper
"""
from collections import Counter
import re
import string
from lm_eval.base import rf, Task
from lm_eval.metrics import f1_score, mean
_CITATION = """
@article{DBLP:journals/corr/abs-2105-03011, @article{DBLP:journals/corr/abs-2105-03011,
author = {Pradeep Dasigi and author = {Pradeep Dasigi and
Kyle Lo and Kyle Lo and
Iz Beltagy and Iz Beltagy and
Arman Cohan and Arman Cohan and
Noah A. Smith and Noah A. Smith and
Matt Gardner}, Matt Gardner},
title = {A Dataset of Information-Seeking Questions and Answers Anchored in title = {A Dataset of Information-Seeking Questions and Answers Anchored in
Research Papers}, Research Papers},
journal = {CoRR}, journal = {CoRR},
volume = {abs/2105.03011}, volume = {abs/2105.03011},
year = {2021}, year = {2021},
url = {https://arxiv.org/abs/2105.03011}, url = {https://arxiv.org/abs/2105.03011},
eprinttype = {arXiv}, eprinttype = {arXiv},
eprint = {2105.03011}, eprint = {2105.03011},
timestamp = {Fri, 14 May 2021 12:13:30 +0200}, timestamp = {Fri, 14 May 2021 12:13:30 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib}, biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib},
bibsource = {dblp computer science bibliography, https://dblp.org} bibsource = {dblp computer science bibliography, https://dblp.org}
} }
""" """
from collections import Counter
from math import exp
import random
import re
import string
from lm_eval.base import rf
from lm_eval.metrics import f1_score, mean
from .common import HFTask
def normalize_answer(s): def normalize_answer(s):
...@@ -93,11 +101,20 @@ def token_f1_score(prediction, ground_truth): ...@@ -93,11 +101,20 @@ def token_f1_score(prediction, ground_truth):
return f1 return f1
class QASPER(HFTask): class QASPER(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "qasper" DATASET_PATH = "qasper"
DATASET_NAME = None DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def doc_to_text(self, doc): def doc_to_text(self, doc):
return ( return (
"TITLE: " "TITLE: "
...@@ -119,14 +136,14 @@ class QASPER(HFTask): ...@@ -119,14 +136,14 @@ class QASPER(HFTask):
return " " + answer return " " + answer
def training_docs(self): def training_docs(self):
for doc in self.data["train"]: for doc in self.dataset["train"]:
yield from self.process_doc(doc) yield from self._process_doc(doc)
def validation_docs(self): def validation_docs(self):
for doc in self.data["train"]: for doc in self.dataset["validation"]:
yield from self.process_doc(doc) yield from self._process_doc(doc)
def process_doc(self, doc): def _process_doc(self, doc):
"""Given a `doc`, flatten it out so that each JSON blob """Given a `doc`, flatten it out so that each JSON blob
contains exactly one question and one answer. Logic taken from contains exactly one question and one answer. Logic taken from
the reference implementation available at the reference implementation available at
......
...@@ -2,34 +2,33 @@ ...@@ -2,34 +2,33 @@
QuAC: Question Answering in Context QuAC: Question Answering in Context
https://arxiv.org/abs/1808.07036 https://arxiv.org/abs/1808.07036
Question Answering in Context (QuAC) is a dataset for modeling, understanding, and
participating in information seeking dialog. Data instances consist of an interactive
dialog between two crowd workers: (1) a student who poses a sequence of freeform
questions to learn as much as possible about a hidden Wikipedia text, and (2)
a teacher who answers the questions by providing short excerpts (spans) from the text.
Homepage: https://quac.ai/
"""
import inspect
import lm_eval.datasets.quac.quac
from lm_eval.base import Task
_CITATION = """
@article{choi2018quac, @article{choi2018quac,
title={Quac: Question answering in context}, title={Quac: Question answering in context},
author={Choi, Eunsol and He, He and Iyyer, Mohit and Yatskar, Mark and Yih, Wen-tau and Choi, Yejin and Liang, Percy and Zettlemoyer, Luke}, author={Choi, Eunsol and He, He and Iyyer, Mohit and Yatskar, Mark and Yih, Wen-tau and Choi, Yejin and Liang, Percy and Zettlemoyer, Luke},
journal={arXiv preprint arXiv:1808.07036}, journal={arXiv preprint arXiv:1808.07036},
year={2018} year={2018}
} }
""" """
import json
import os
from lm_eval.base import Task
from ..utils import sh
class QuAC(Task): class QuAC(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.quac.quac)
def __init__(self): DATASET_NAME = None
super().__init__()
def download(self):
if not os.path.exists('data/quac'):
# TODO: convert to use best_download
sh("""
mkdir -p data/quac
wget https://s3.amazonaws.com/my89public/quac/train_v0.2.json -O data/quac/train_v0.2.json
wget https://s3.amazonaws.com/my89public/quac/val_v0.2.json -O data/quac/val_v0.2.json
""")
def has_training_docs(self): def has_training_docs(self):
return True return True
...@@ -41,28 +40,20 @@ class QuAC(Task): ...@@ -41,28 +40,20 @@ class QuAC(Task):
return False return False
def training_docs(self): def training_docs(self):
myjson = json.load(open('data/quac/train_v0.2.json'))['data'] if self._training_docs is None:
return self.load_doc(myjson) self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self): def validation_docs(self):
myjson = json.load(open('data/quac/val_v0.2.json'))['data'] return map(self._process_doc, self.dataset["validation"])
return self.load_doc(myjson)
def test_docs(self): def test_docs(self):
raise NotImplementedError("QuAC has no test docs.") raise NotImplementedError("QuAC has no test docs.")
def load_doc(self, myjson): def _process_doc(self, doc):
docs = [] doc["title"] = doc['title'] + ' - ' + doc['section_title']
for item in myjson: return doc
title = item['title'] + ' - ' + item['section_title']
paragraph = item['paragraphs'][0]['context'].replace("CANNOTANSWER", "")
qas = item['paragraphs'][0]['qas']
qa_pairs = [(qa['question'], qa['answers'][0]['text']) for qa in qas]
for (question, answer) in qa_pairs:
doc = { 'title': title, 'paragraph': paragraph, 'question': question, 'answer': answer }
docs.append(doc)
return docs
def doc_to_text(self, doc): def doc_to_text(self, doc):
return 'TITLE: ' + doc['title'] + '\n' + 'PARAGRAPH: ' + doc['paragraph'] + '\n\n' + 'Q: ' + doc['question'] + '\n\n' + 'A: ' return 'TITLE: ' + doc['title'] + '\n' + 'PARAGRAPH: ' + doc['paragraph'] + '\n\n' + 'Q: ' + doc['question'] + '\n\n' + 'A: '
...@@ -84,7 +75,7 @@ class QuAC(Task): ...@@ -84,7 +75,7 @@ class QuAC(Task):
:param ctx: str :param ctx: str
The context string, generated by fewshot_context. This includes the natural The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question language description, as well as the few shot examples, and the question
part of the document for `doc`. part of the document for `doc`.
""" """
# TODO: implement evaluation. # TODO: implement evaluation.
raise NotImplementedError('Evaluation not implemented') raise NotImplementedError('Evaluation not implemented')
......
"""
RACE: Large-scale ReAding Comprehension Dataset From Examinations
https://arxiv.org/pdf/1704.04683.pdf
RACE is a large-scale reading comprehension dataset with more than 28,000 passages
and nearly 100,000 questions. The dataset is collected from English examinations
in China, which are designed for middle school and high school students. The dataset
can be served as the training and test sets for machine comprehension.
Homepage: https://www.cs.cmu.edu/~glai1/data/race/
"""
import collections import collections
import datasets import datasets
import numpy as np import numpy as np
from lm_eval.base import rf from lm_eval.base import rf, Task
from ..metrics import mean from lm_eval.metrics import mean
from . common import HFTask
_CITATION = """
@article{lai2017large,
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
journal={arXiv preprint arXiv:1704.04683},
year={2017}
}
"""
class each: class each:
...@@ -14,16 +34,14 @@ class each: ...@@ -14,16 +34,14 @@ class each:
return list(map(self.f, other)) return list(map(self.f, other))
class RACE(HFTask): class RACE(Task):
VERSION = 0 VERSION = 1
DATASET_PATH = "race" DATASET_PATH = "race"
DATASET_NAME = "high" DATASET_NAME = "high"
cache = {} cache = {}
letter_to_num = {'A': 0, 'B': 1, 'C': 2, 'D': 3} letter_to_num = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
assert datasets.__version__ == "1.15.1", "RACE requires datasets==1.15.1!"
def has_training_docs(self): def has_training_docs(self):
return True return True
......
import os """
Similarity of Semantic Relations
https://arxiv.org/pdf/cs/0608100.pdf
SAT (Scholastic Aptitude Test) Analogy Questions is a dataset comprising 374
multiple-choice analogy questions; 5 choices per question.
Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art)
"""
import inspect
import lm_eval.datasets.sat_analogies.sat_analogies
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
class SATAnalogies(MultipleChoiceTask): _CITATION = """
VERSION = 0 @article{article,
NEEDS_MANUAL_DL = True author = {Turney, Peter},
year = {2006},
def __init__(self): month = {09},
super().__init__() pages = {379-416},
title = {Similarity of Semantic Relations},
volume = {32},
journal = {Computational Linguistics},
doi = {10.1162/coli.2006.32.3.379}
}
"""
def download(self):
# We should be using a checksum here.
# The canonical sha256 hash is below:
# 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc
if not os.path.exists('data/sat/SAT-package-V3.txt'): class SATAnalogies(MultipleChoiceTask):
raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.') VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.sat_analogies.sat_analogies)
DATASET_NAME = None
def __init__(self, data_dir: str):
"""
SAT Analog Questions is not publicly available. You must request the data
by emailing Peter Turney and then download it to a local directory path
which should be passed into the `data_dir` arg.
"""
super().__init__(data_dir=data_dir)
def has_training_docs(self): def has_training_docs(self):
return False return False
...@@ -28,38 +50,20 @@ class SATAnalogies(MultipleChoiceTask): ...@@ -28,38 +50,20 @@ class SATAnalogies(MultipleChoiceTask):
def training_docs(self): def training_docs(self):
return [] return []
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self): def test_docs(self):
return [] return []
def validation_docs(self): def _process_doc(self, doc):
data = [] return {
'source': doc['source'],
with open("data/sat/SAT-package-V3.txt", "r") as f: 'query': doc['stem'].split(' ')[:2],
record = [] 'choices': ["{} is to {}".format(*c.split(' ')[:2]) for c in doc["choices"]],
for line in f: 'gold': ['a', 'b', 'c', 'd', 'e'].index(doc['solution'].strip()),
line = line.strip() }
if len(line) == 0 and record:
data.append(record)
record = []
elif len(line) > 0 and line[0] == '#':
continue
else:
record.append(line)
data.append(record)
for record in data:
source = record[-8]
query = record[-7]
choices = record[-6:-1]
answer_key = record[-1]
doc = {
'source': source,
'query': query.split(' ')[:2],
'choices': ["{} is to {}".format(*c.split(' ')[:2]) for c in choices],
'gold': ['a','b','c','d','e'].index(answer_key.strip()),
}
yield doc
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "{} is to {} as".format(*doc['query']) return "{} is to {} as".format(*doc['query'])
......
import os """
import json Crowdsourcing Multiple Choice Science Questions
import zipfile https://aclanthology.org/W17-4413.pdf
The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics,
Chemistry and Biology, among others. The questions are in multiple-choice format
with 4 answer options each. For the majority of the questions, an additional paragraph
with supporting evidence for the correct answer is provided.
Homepage: https://allenai.org/data/sciq
"""
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
from best_download import download_file
_CITATION = """
@inproceedings{Welbl2017CrowdsourcingMC,
title={Crowdsourcing Multiple Choice Science Questions},
author={Johannes Welbl and Nelson F. Liu and Matt Gardner},
booktitle={NUT@EMNLP},
year={2017}
}
"""
class SciQ(MultipleChoiceTask): class SciQ(MultipleChoiceTask):
VERSION = 0 VERSION = 0
# Multiple languages and multiple years DATASET_PATH = "sciq"
def download(self): DATASET_NAME = None
if not os.path.exists('data/sciq'):
os.makedirs('data/sciq', exist_ok=True)
download_file(
'https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip',
local_file='data/sciq/SciQ.zip',
expected_checksum='7f3312f6ac6b09970b32942d106a8c44ec0dad46a0369f17d635aff8e348a87c',
)
with zipfile.ZipFile("data/sciq/SciQ.zip", "r") as zf:
zf.extractall("data/sciq/")
def has_training_docs(self): def has_training_docs(self):
return True return True
...@@ -28,36 +36,32 @@ class SciQ(MultipleChoiceTask): ...@@ -28,36 +36,32 @@ class SciQ(MultipleChoiceTask):
def has_test_docs(self): def has_test_docs(self):
return True return True
def _convert_standard(self, doc): def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
choices = [ choices = [
doc["distractor1"], doc["distractor1"],
doc["distractor2"], doc["distractor2"],
doc["distractor3"], doc["distractor3"],
doc["correct_answer"], doc["correct_answer"],
] ]
src = doc['support'] src = doc['support']
out_doc = { out_doc = {
"source" : src, "source": src,
"query" : doc['question'], "query": doc['question'],
"choices" : choices, "choices": choices,
"gold" : 3, "gold": 3,
} }
return out_doc return out_doc
def load_docs(self, textfilename):
with open(textfilename, 'r') as j:
docs = json.loads(j.read())
for record in docs:
yield self._convert_standard(record)
def training_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/train.json")
def validation_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/valid.json")
def test_docs(self):
return self.load_docs("data/sciq/SciQ dataset-2 3/test.json")
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]).strip() return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]).strip()
......
"""
Know What You Don’t Know: Unanswerable Questions for SQuAD
https://arxiv.org/pdf/1806.03822.pdf
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset,
consisting of questions posed by crowdworkers on a set of Wikipedia articles,
where the answer to every question is a segment of text, or span, from the
corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable
questions written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but
also determine when no answer is supported by the paragraph and abstain from answering.
Homepage: https://rajpurkar.github.io/SQuAD-explorer/
"""
import datasets import datasets
from math import exp from math import exp
from lm_eval.base import rf from lm_eval.base import rf, Task
from lm_eval.metrics import f1_score, mean
from . common import HFTask
from functools import partial from functools import partial
from packaging import version from packaging import version
_CITATION = """
@misc{rajpurkar2018know,
title={Know What You Don't Know: Unanswerable Questions for SQuAD},
author={Pranav Rajpurkar and Robin Jia and Percy Liang},
year={2018},
eprint={1806.03822},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
def _squad_metric(predictions, references): def _squad_metric(predictions, references):
squad_metric = datasets.load_metric("squad_v2") squad_metric = datasets.load_metric("squad_v2")
return squad_metric.compute(predictions=predictions, references=references) return squad_metric.compute(predictions=predictions, references=references)
...@@ -18,7 +43,7 @@ def _squad_agg(key, items): ...@@ -18,7 +43,7 @@ def _squad_agg(key, items):
return _squad_metric(predictions=predictions, references=references)[key] return _squad_metric(predictions=predictions, references=references)[key]
class SQuAD2(HFTask): class SQuAD2(Task):
VERSION = 1 VERSION = 1
DATASET_PATH = "squad_v2" DATASET_PATH = "squad_v2"
DATASET_NAME = None DATASET_NAME = None
...@@ -36,10 +61,10 @@ class SQuAD2(HFTask): ...@@ -36,10 +61,10 @@ class SQuAD2(HFTask):
return False return False
def training_docs(self): def training_docs(self):
return self.data["train"] return self.dataset["train"]
def validation_docs(self): def validation_docs(self):
return self.data["validation"] return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:' return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:'
......
import csv """
from lm_eval.base import Task A Corpus and Cloze Evaluation for Deeper Understanding of Commonsense Stories
https://arxiv.org/pdf/1604.01696.pdf
'Story Cloze Test' (2018) is a commonsense reasoning framework for evaluating story
understanding, story generation, and script learning. This test requires a system
to choose the correct ending to a four-sentence story.
Homepage: https://cs.rochester.edu/nlp/rocstories/
"""
import numpy as np
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{sharma-etal-2018-tackling,
title = "Tackling the Story Ending Biases in The Story Cloze Test",
author = "Sharma, Rishi and
Allen, James and
Bakhshandeh, Omid and
Mostafazadeh, Nasrin",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-2119",
doi = "10.18653/v1/P18-2119",
pages = "752--757",
abstract = "The Story Cloze Test (SCT) is a recent framework for evaluating story comprehension and script learning. There have been a variety of models tackling the SCT so far. Although the original goal behind the SCT was to require systems to perform deep language understanding and commonsense reasoning for successful narrative understanding, some recent models could perform significantly better than the initial baselines by leveraging human-authorship biases discovered in the SCT dataset. In order to shed some light on this issue, we have performed various data analysis and analyzed a variety of top performing models presented for this task. Given the statistics we have aggregated, we have designed a new crowdsourcing scheme that creates a new SCT dataset, which overcomes some of the biases. We benchmark a few models on the new dataset and show that the top-performing model on the original SCT dataset fails to keep up its performance. Our findings further signify the importance of benchmarking NLP systems on various evolving test sets.",
}
"""
class StoryCloze(Task): class StoryCloze(Task):
VERSION = 0 VERSION = 0
NEEDS_MANUAL_DL = True DATASET_PATH = "story_cloze"
DATASET_NAME = None
def download(self): def __init__(self, data_dir: str):
#TODO: replace with Eye link """
pass StoryCloze is not publicly available. You must download the data by
following https://cs.rochester.edu/nlp/rocstories/ and pass the folder
path into the `data_dir` arg.
"""
super().__init__(data_dir=data_dir)
def has_training_docs(self): def has_training_docs(self):
return False return False
...@@ -22,19 +58,19 @@ class StoryCloze(Task): ...@@ -22,19 +58,19 @@ class StoryCloze(Task):
def training_docs(self): def training_docs(self):
pass pass
def load_doc(self, filename):
with open(filename, newline='') as file:
filereader = csv.reader(file)
return list(filereader)
def validation_docs(self): def validation_docs(self):
return self.load_doc("data/storycloze/cloze_test_val__winter2018-cloze_test_ALL_val - 1 - 1.csv") return self.dataset["validation"]
def test_docs(self): def test_docs(self):
return self.load_doc("data/storycloze/cloze_test_test__winter2018-cloze_test_ALL_test - 1.csv") return self.dataset["test"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return ' '.join([*doc[1:5]]) return ' '.join([
doc["input_sentence_1"],
doc["input_sentence_2"],
doc["input_sentence_3"],
doc["input_sentence_4"],
])
def should_decontaminate(self): def should_decontaminate(self):
return True return True
...@@ -43,25 +79,31 @@ class StoryCloze(Task): ...@@ -43,25 +79,31 @@ class StoryCloze(Task):
return doc["context"] return doc["context"]
def doc_to_target(self, doc): def doc_to_target(self, doc):
return " " + doc[int(doc[-1]) - 4] clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
# `- 1` because the `answer_right_ending` index is 1-based.
return " " + clozes[doc["answer_right_ending"] - 1]
def construct_requests(self, doc, ctx): def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of """ Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM. Requests which will be sent to the LM.
:param doc: :param doc:
The document as returned from training_docs, validation_docs, or test_docs. The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str :param ctx: str
The context string, generated by fewshot_context. This includes the natural The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question language description, as well as the few shot examples, and the question
part of the document for `doc`. part of the document for `doc`.
""" """
# TODO: implement evaluation. clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
raise NotImplementedError('Evaluation not implemented') lls = [
rf.loglikelihood(ctx, " {}".format(choice))[0]
for choice in clozes
]
return lls
def process_results(self, doc, results): def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a """Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of dict where keys are the names of submetrics and values are the values of
the metric for that one document the metric for that one document
:param doc: :param doc:
...@@ -69,23 +111,36 @@ class StoryCloze(Task): ...@@ -69,23 +111,36 @@ class StoryCloze(Task):
:param results: :param results:
The results of the requests created in construct_requests. The results of the requests created in construct_requests.
""" """
# TODO: implement evaluation. gold = doc["answer_right_ending"] - 1
raise NotImplementedError('Evaluation not implemented') acc = 1. if np.argmax(results) == gold else 0.
return {
"acc": acc
}
def aggregation(self): def aggregation(self):
""" """
:returns: {str: [float] -> float} :returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics functions that aggregate a list of metrics
""" """
# TODO: implement evaluation. return {
raise NotImplementedError('Evaluation not implemented') "acc": mean
}
def higher_is_better(self): def higher_is_better(self):
""" """
:returns: {str: bool} :returns: {str: bool}
A dictionary where keys are the names of submetrics and values are A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better whether a higher value of the submetric is better
""" """
# TODO: implement evaluation. return {
raise NotImplementedError('Evaluation not implemented') "acc": True
}
class StoryCloze2016(StoryCloze):
DATASET_NAME = "2016"
class StoryCloze2018(StoryCloze):
DATASET_NAME = "2018"
""" """
To-do: SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems
- WSC requires free-form generation https://w4ngatang.github.io/static/papers/superglue.pdf
- ReCoRD
SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language
understanding tasks.
Homepage: https://super.gluebenchmark.com/
TODO: WSC requires free-form generation.
""" """
import numpy as np import numpy as np
import sklearn import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics import transformers.data.metrics.squad_metrics as squad_metrics
from . common import HFTask, yesno from lm_eval.base import rf, Task
from lm_eval.base import rf from lm_eval.metrics import mean, acc_all, metric_max_over_ground_truths, yesno
from ..metrics import mean, acc_all, metric_max_over_ground_truths from lm_eval.utils import general_detokenize
from ..utils import general_detokenize
_CITATION = """
@inproceedings{NEURIPS2019_4496bf24,
author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf},
volume = {32},
year = {2019}
}
"""
class BoolQ(HFTask): class BoolQ(Task):
VERSION = 1 VERSION = 1
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
DATASET_NAME = "boolq" DATASET_NAME = "boolq"
...@@ -26,6 +46,14 @@ class BoolQ(HFTask): ...@@ -26,6 +46,14 @@ class BoolQ(HFTask):
def has_test_docs(self): def has_test_docs(self):
return False return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return f"{doc['passage']}\nQuestion: {doc['question']}?\nAnswer:" return f"{doc['passage']}\nQuestion: {doc['question']}?\nAnswer:"
...@@ -66,7 +94,7 @@ class BoolQ(HFTask): ...@@ -66,7 +94,7 @@ class BoolQ(HFTask):
} }
class CommitmentBank(HFTask): class CommitmentBank(Task):
VERSION = 1 VERSION = 1
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
DATASET_NAME = "cb" DATASET_NAME = "cb"
...@@ -80,6 +108,14 @@ class CommitmentBank(HFTask): ...@@ -80,6 +108,14 @@ class CommitmentBank(HFTask):
def has_test_docs(self): def has_test_docs(self):
return False return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format( return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format(
doc["premise"], doc["premise"],
...@@ -133,7 +169,7 @@ class CommitmentBank(HFTask): ...@@ -133,7 +169,7 @@ class CommitmentBank(HFTask):
} }
class Copa(HFTask): class Copa(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
DATASET_NAME = "copa" DATASET_NAME = "copa"
...@@ -147,6 +183,14 @@ class Copa(HFTask): ...@@ -147,6 +183,14 @@ class Copa(HFTask):
def has_test_docs(self): def has_test_docs(self):
return False return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
# Drop the period # Drop the period
connector = { connector = {
...@@ -193,7 +237,7 @@ class Copa(HFTask): ...@@ -193,7 +237,7 @@ class Copa(HFTask):
return choice[0].lower() + choice[1:] return choice[0].lower() + choice[1:]
class MultiRC(HFTask): class MultiRC(Task):
VERSION = 1 VERSION = 1
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
DATASET_NAME = "multirc" DATASET_NAME = "multirc"
...@@ -207,6 +251,14 @@ class MultiRC(HFTask): ...@@ -207,6 +251,14 @@ class MultiRC(HFTask):
def has_test_docs(self): def has_test_docs(self):
return False return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:" return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:"
...@@ -245,7 +297,7 @@ class MultiRC(HFTask): ...@@ -245,7 +297,7 @@ class MultiRC(HFTask):
} }
class ReCoRD(HFTask): class ReCoRD(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
DATASET_NAME = "record" DATASET_NAME = "record"
...@@ -264,13 +316,13 @@ class ReCoRD(HFTask): ...@@ -264,13 +316,13 @@ class ReCoRD(HFTask):
# Each doc consists of multiple answer candidates, each of which is scored yes/no. # Each doc consists of multiple answer candidates, each of which is scored yes/no.
if self._training_docs is None: if self._training_docs is None:
self._training_docs = [] self._training_docs = []
for doc in self.data["train"]: for doc in self.dataset["train"]:
self._training_docs.append(self._process_doc(doc)) self._training_docs.append(self._process_doc(doc))
return self._training_docs return self._training_docs
def validation_docs(self): def validation_docs(self):
# See: training_docs # See: training_docs
for doc in self.data["validation"]: for doc in self.dataset["validation"]:
yield self._process_doc(doc) yield self._process_doc(doc)
@classmethod @classmethod
...@@ -334,7 +386,7 @@ class ReCoRD(HFTask): ...@@ -334,7 +386,7 @@ class ReCoRD(HFTask):
} }
class WordsInContext(HFTask): class WordsInContext(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
DATASET_NAME = "wic" DATASET_NAME = "wic"
...@@ -348,6 +400,14 @@ class WordsInContext(HFTask): ...@@ -348,6 +400,14 @@ class WordsInContext(HFTask):
def has_test_docs(self): def has_test_docs(self):
return False return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the" \ return "Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the" \
" two sentences above?\nAnswer:".format( " two sentences above?\nAnswer:".format(
...@@ -386,7 +446,7 @@ class WordsInContext(HFTask): ...@@ -386,7 +446,7 @@ class WordsInContext(HFTask):
} }
class SGWinogradSchemaChallenge(HFTask): class SGWinogradSchemaChallenge(Task):
VERSION = 0 VERSION = 0
# Note: This implementation differs from Fig G.32 because this is the SuperGLUE, # Note: This implementation differs from Fig G.32 because this is the SuperGLUE,
# binary version of the task. # binary version of the task.
...@@ -408,11 +468,14 @@ class SGWinogradSchemaChallenge(HFTask): ...@@ -408,11 +468,14 @@ class SGWinogradSchemaChallenge(HFTask):
# GPT-3 Paper's format only uses positive examples for fewshot "training" # GPT-3 Paper's format only uses positive examples for fewshot "training"
self._training_docs = [ self._training_docs = [
doc for doc in doc for doc in
self.data["train"] self.dataset["train"]
if doc["label"] if doc["label"]
] ]
return self._training_docs return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
raw_passage = doc["text"] raw_passage = doc["text"]
# NOTE: HuggingFace span indices are word-based not character-based. # NOTE: HuggingFace span indices are word-based not character-based.
......
"""
NOTE: This file implements translation tasks using datasets from WMT conferences,
provided by sacrebleu. Traditionally they are evaluated with BLEU scores. TER
and CHRF are other options.
We defer citations and descriptions of the many translations tasks used
here to the SacreBLEU repo from which we've obtained the datasets:
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
Homepage: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
"""
import pycountry import pycountry
from pprint import pprint from pprint import pprint
from sacrebleu import sacrebleu from sacrebleu import sacrebleu
...@@ -6,13 +17,21 @@ from lm_eval.base import Task, rf ...@@ -6,13 +17,21 @@ from lm_eval.base import Task, rf
from typing import List from typing import List
_CITATION = """
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
""" """
This file implements translation tasks using datasets from WMT conferences, provided by sacrebleu.
Traditionally they are evaluated with BLEU scores. TER and CHRF are other options.
See sacrebleu.DATASETS for all available datasets. There are a lot!
"""
sacrebleu_datasets = sacrebleu.DATASETS sacrebleu_datasets = sacrebleu.DATASETS
...@@ -71,7 +90,7 @@ class GeneralTranslationTask(Task): ...@@ -71,7 +90,7 @@ class GeneralTranslationTask(Task):
super().__init__() super().__init__()
def download(self): def download(self, data_dir=None, cache_dir=None, download_mode=None):
# This caches in the users home dir automatically # This caches in the users home dir automatically
self.src_file, self.ref_file = \ self.src_file, self.ref_file = \
sacrebleu.download_test_set(self.sacrebleu_dataset, self.sacrebleu_language_pair) sacrebleu.download_test_set(self.sacrebleu_dataset, self.sacrebleu_language_pair)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment