Commit d2a9b759 authored by haileyschoelkopf's avatar haileyschoelkopf
Browse files

in-place replace main with lm-eval2, keeping old git history

parent 814940e8
...@@ -12,8 +12,9 @@ in the broader discourse. ...@@ -12,8 +12,9 @@ in the broader discourse.
Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
""" """
from lm_eval.base import Task, rf from lm_eval.api.task import Task
from lm_eval.metrics import mean, perplexity from lm_eval.api.instance import LoglikelihoodInstance
from lm_eval.api.metrics import mean, perplexity
_CITATION = """ _CITATION = """
...@@ -31,6 +32,8 @@ _CITATION = """ ...@@ -31,6 +32,8 @@ _CITATION = """
class LambadaBase(Task): class LambadaBase(Task):
VERSION = None VERSION = None
OUTPUT_TYPE = "loglikelihood"
def training_docs(self): def training_docs(self):
if self.has_training_docs(): if self.has_training_docs():
return self.dataset["train"] return self.dataset["train"]
...@@ -55,12 +58,14 @@ class LambadaBase(Task): ...@@ -55,12 +58,14 @@ class LambadaBase(Task):
def doc_to_target(self, doc): def doc_to_target(self, doc):
return " " + doc["text"].rsplit(" ", 1)[1] return " " + doc["text"].rsplit(" ", 1)[1]
def construct_requests(self, doc, ctx): def construct_requests(self, doc, ctx, **kwargs):
ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) return LoglikelihoodInstance(doc=doc, arguments=(ctx, self.doc_to_target(doc)), **kwargs)
return ll, is_greedy return ll, is_greedy
def process_results(self, doc, results): def process_results(self, doc, results):
# TODO: this ^ is a hack. filters should make it so that we only have one response per request that we score
results = results[0] # TODO: recheck this. currently a list of [(ll, is_greedy)] is passed in
ll, is_greedy = results ll, is_greedy = results
return {"ppl": ll, "acc": int(is_greedy)} return {"ppl": ll, "acc": int(is_greedy)}
...@@ -75,7 +80,7 @@ class LambadaBase(Task): ...@@ -75,7 +80,7 @@ class LambadaBase(Task):
class LambadaStandard(LambadaBase): class LambadaStandard(LambadaBase):
"""The LAMBADA task using the standard original LAMBADA dataset.""" """The LAMBADA task using the standard original LAMBADA dataset."""
VERSION = 0 VERSION = "2.0"
DATASET_PATH = "lambada" DATASET_PATH = "lambada"
def has_training_docs(self): def has_training_docs(self):
...@@ -95,7 +100,7 @@ class LambadaOpenAI(LambadaBase): ...@@ -95,7 +100,7 @@ class LambadaOpenAI(LambadaBase):
Reference: https://github.com/openai/gpt-2/issues/131#issuecomment-497136199 Reference: https://github.com/openai/gpt-2/issues/131#issuecomment-497136199
""" """
VERSION = 0 VERSION = "2.0"
DATASET_PATH = "EleutherAI/lambada_openai" DATASET_PATH = "EleutherAI/lambada_openai"
def has_training_docs(self): def has_training_docs(self):
......
"""
The LAMBADA dataset: Word prediction requiring a broad discourse context∗
https://arxiv.org/pdf/1606.06031.pdf
Cloze-style LAMBADA dataset.
LAMBADA is a dataset to evaluate the capabilities of computational models for text
understanding by means of a word prediction task. LAMBADA is a collection of narrative
passages sharing the characteristic that human subjects are able to guess their last
word if they are exposed to the whole passage, but not if they only see the last
sentence preceding the target word. To succeed on LAMBADA, computational models
cannot simply rely on local context, but must be able to keep track of information
in the broader discourse.
Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
"""
from lm_eval.tasks.lambada import LambadaOpenAI, LambadaStandard
_CITATION = """
@misc{
author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
title={The LAMBADA dataset},
DOI={10.5281/zenodo.2630551},
publisher={Zenodo},
year={2016},
month={Aug}
}
"""
class LambadaStandardCloze(LambadaStandard):
"""Cloze-style LambadaStandard."""
VERSION = 0
def doc_to_text(self, doc):
return doc["text"].rsplit(" ", 1)[0] + " ____. ->"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["text"]
def doc_to_target(self, doc):
return " " + doc["text"].rsplit(" ", 1)[1]
class LambadaOpenAICloze(LambadaOpenAI):
"""Cloze-style LambadaOpenAI."""
VERSION = 0
def doc_to_text(self, doc):
return doc["text"].rsplit(" ", 1)[0] + " ____. ->"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["text"]
def doc_to_target(self, doc):
return " " + doc["text"].rsplit(" ", 1)[1]
"""
The LAMBADA (OpenAI) dataset: Word prediction requiring a broad discourse context∗
https://arxiv.org/pdf/1606.06031.pdf
The LAMBADA OpenAI dataset machine-translated to other languages.
LAMBADA is a dataset to evaluate the capabilities of computational models for text
understanding by means of a word prediction task. LAMBADA is a collection of narrative
passages sharing the characteristic that human subjects are able to guess their last
word if they are exposed to the whole passage, but not if they only see the last
sentence preceding the target word. To succeed on LAMBADA, computational models
cannot simply rely on local context, but must be able to keep track of information
in the broader discourse.
Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
Reference (OpenAI): https://github.com/openai/gpt-2/issues/131#issuecomment-497136199
"""
from .lambada import LambadaOpenAI
_CITATION = """
@misc{
author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
title={The LAMBADA dataset},
DOI={10.5281/zenodo.2630551},
publisher={Zenodo},
year={2016},
month={Aug}
}
"""
class LambadaOpenAIMultilingualEnglish(LambadaOpenAI):
VERSION = 0
DATASET_NAME = "en"
class LambadaOpenAIMultilingualFrench(LambadaOpenAI):
VERSION = 0
DATASET_NAME = "fr"
class LambadaOpenAIMultilingualGerman(LambadaOpenAI):
VERSION = 0
DATASET_NAME = "de"
class LambadaOpenAIMultilingualItalian(LambadaOpenAI):
VERSION = 0
DATASET_NAME = "it"
class LambadaOpenAIMultilingualSpanish(LambadaOpenAI):
VERSION = 0
DATASET_NAME = "es"
LANG_CLASSES = [
LambadaOpenAIMultilingualEnglish,
LambadaOpenAIMultilingualFrench,
LambadaOpenAIMultilingualGerman,
LambadaOpenAIMultilingualItalian,
LambadaOpenAIMultilingualSpanish,
]
def construct_tasks():
tasks = {}
for lang_class in LANG_CLASSES:
tasks[f"lambada_openai_mt_{lang_class.DATASET_NAME}"] = lang_class
return tasks
"""
LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning
https://arxiv.org/pdf/2007.08124.pdf
LogiQA is a dataset for testing human logical reasoning. It consists of 8,678 QA
instances, covering multiple types of deductive reasoning. Results show that state-
of-the-art neural models perform by far worse than human ceiling. The dataset can
also serve as a benchmark for reinvestigating logical AI under the deep learning
NLP setting.
Homepage: https://github.com/lgw863/LogiQA-dataset
"""
import inspect
import lm_eval.datasets.logiqa.logiqa
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@misc{liu2020logiqa,
title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang},
year={2020},
eprint={2007.08124},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
class LogiQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.logiqa.logiqa)
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
def format_example(doc, choices):
"""
Passage: <passage>
Question: <question>
Choices:
A. <choice1>
B. <choice2>
C. <choice3>
D. <choice4>
Answer:
"""
prompt = "Passage: " + doc["context"] + "\n"
prompt += "Question: " + doc["question"] + "\nChoices:\n"
for choice, option in zip(choices, doc["options"]):
prompt += f"{choice.upper()}. {option}\n"
prompt += "Answer:"
return prompt
choices = ["a", "b", "c", "d"]
return {
"passage": doc["context"], # Used for decontamination
"query": format_example(doc, choices),
"choices": doc["options"],
"gold": choices.index(doc["label"]),
}
def doc_to_text(self, doc):
return doc["query"]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["passage"]
"""
MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms
https://arxiv.org/pdf/1905.13319.pdf
MathQA is a large-scale dataset of 37k English multiple-choice math word problems
covering multiple math domain categories by modeling operation programs corresponding
to word problems in the AQuA dataset (Ling et al., 2017).
Homepage: https://math-qa.github.io/math-QA/
"""
import re
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@misc{amini2019mathqa,
title={MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms},
author={Aida Amini and Saadia Gabriel and Peter Lin and Rik Koncel-Kedziorski and Yejin Choi and Hannaneh Hajishirzi},
year={2019},
eprint={1905.13319},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
class MathQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "math_qa"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
answer_idx = ["a", "b", "c", "d", "e"].index(doc["correct"])
choices = [
c[4:].rstrip(" ,")
for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc["options"])
]
out_doc = {
"query": "Question: " + doc["Problem"] + "\nAnswer:",
"choices": choices,
"gold": answer_idx,
}
return out_doc
def doc_to_text(self, doc):
return doc["query"]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["query"]
"""
“Going on a vacation” takes longer than “Going for a walk”:
A Study of Temporal Commonsense Understanding
https://arxiv.org/pdf/1909.03065.pdf
MC-TACO is a dataset of 13k question-answer pairs that require temporal commonsense
comprehension. The dataset contains five temporal properties, (1) duration (how long
an event takes), (2) temporal ordering (typical order of events), (3) typical time
(when an event occurs), (4) frequency (how often an event occurs), and (5) stationarity
(whether a state is maintained for a very long time or indefinitely).
WARNING: Running this task with a `--limit` arg will give misleading results! The
corresponding dataset is structured such that each multiple-choice-question gathered
by the authors is split into question-option pairs, where each such pair gets
siloed into an individual document for plausibility testing. Because the harness
shuffles these documents, setting `--limit` will likely "cut off" certain candidate
answers. This is a problem because the task's metrics require an exhaustive evaluation
of a question's options. See section 4 of the paper for details.
Homepage: https://leaderboard.allenai.org/mctaco/submissions/public
"""
import numpy as np
from collections import defaultdict
from lm_eval.base import rf, Task
_CITATION = """
@inproceedings{ZKNR19,
author = {Ben Zhou, Daniel Khashabi, Qiang Ning and Dan Roth},
title = {“Going on a vacation” takes longer than “Going for a walk”: A Study of Temporal Commonsense Understanding },
booktitle = {EMNLP},
year = {2019},
}
"""
class MCTACO(Task):
VERSION = 0
DATASET_PATH = "mc_taco"
DATASET_NAME = None
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return self.dataset["test"]
def doc_to_text(self, doc):
return (
f"{doc['sentence']}\nQuestion: {doc['question']}\n"
f"Answer: {doc['answer']}\nPlausible:"
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["question"] + " " + doc["sentence"]
def doc_to_target(self, doc):
return " " + ["no", "yes"][doc["label"]]
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
ll_no, _ = rf.loglikelihood(ctx, " no")
ll_yes, _ = rf.loglikelihood(ctx, " yes")
return ll_no, ll_yes
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
ll_no, ll_yes = results
gold = doc["label"]
pred = int(ll_yes > ll_no)
question_id = self._question2id(doc)
items = (gold, pred, question_id)
return {"em": items, "f1": items}
def _question2id(self, doc):
"""Returns an identifier for the question in the given document."""
return " ".join([doc["sentence"], doc["question"]])
def aggregation(self):
return {
"f1": f1,
"em": exact_match,
}
def higher_is_better(self):
return {
"f1": True,
"em": True,
}
def exact_match(items):
"""
Counts a question as correct if the model accurately classifies the plausibility
of an answer for all candidate answers. See section 4 "Evaluation Metrics" in the paper.
"""
results = list(zip(*items))
accuracies = defaultdict(list)
for gold, pred, question in zip(results[0], results[1], results[2]):
accuracies[question].append(pred == gold)
return np.mean([int(all(accs)) for accs in accuracies.values()])
def f1(items):
"""See section 4 "Evaluation Metrics" in the paper about the F1 metric used."""
results = list(zip(*items))
# Group the positive ("yes" = 1) golds and predictions by question.
gold_positives, pred_positives = defaultdict(list), defaultdict(list)
for gold, pred, question in zip(results[0], results[1], results[2]):
gold_positives[question].append(gold)
pred_positives[question].append(pred)
f1 = []
for question in gold_positives.keys():
gp, pp = sum(gold_positives[question]), sum(pred_positives[question])
tp = sum(np.logical_and(gold_positives[question], pred_positives[question]))
p = tp / pp if pp > 0.0 else 1.0
r = tp / gp if gp > 0.0 else 1.0
if p + r > 0.0:
f1.append(2.0 * (p * r) / (p + r))
return np.mean(f1)
"""
MuTual: A Dataset for Multi-Turn Dialogue Reasoning
https://www.aclweb.org/anthology/2020.acl-main.130/
MuTual is a retrieval-based dataset for multi-turn dialogue reasoning, which is
modified from Chinese high school English listening comprehension test data.
Homepage: https://github.com/Nealcly/MuTual
"""
import numpy as np
import inspect
import lm_eval.datasets.mutual.mutual
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{mutual,
title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning",
author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" ,
booktitle = "Proceedings of the 58th Conference of the Association for Computational Linguistics",
year = "2020",
publisher = "Association for Computational Linguistics",
}
"""
class MuTualBase(Task):
VERSION = 1
DATASET_PATH = inspect.getfile(lm_eval.datasets.mutual.mutual)
DATASET_NAME = None
CHOICES = ["A", "B", "C", "D"]
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return self.dataset["train"]
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return NotImplemented
def doc_to_text(self, doc):
return self.detokenize(doc["article"])
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["article"]
def doc_to_target(self, doc):
return " " + self.detokenize(doc["options"][self.CHOICES.index(doc["answers"])])
def construct_requests(self, doc, ctx):
lls = []
for option in doc["options"]:
lls.append(rf.loglikelihood(ctx, f" {self.detokenize(option)}")[0])
return lls
def detokenize(self, text):
text = text.replace(" '", "'")
text = text.replace(" \n", "\n")
text = text.replace("\n ", "\n")
text = text.replace(" n't", "n't")
text = text.replace("`` ", '"')
text = text.replace("''", '"')
# punctuation
text = text.replace(" :", ":")
text = text.replace(" ;", ";")
text = text.replace(" !", "!")
text = text.replace(" ?", "?")
text = text.replace(" ,", ",")
text = text.replace(" .", ".")
return text
def process_results(self, doc, results):
gold = self.CHOICES.index(doc["answers"])
r4_1 = np.argmax(results) == gold # r4_1 = accuracy
ranks = sorted(results, reverse=True)
r4_2 = (ranks.index(results[gold]) == 1) + r4_1
mrr = 1.0 / (ranks.index(results[gold]) + 1) # `+ 1` for index offset
return {"r@1": r4_1, "r@2": r4_2, "mrr": mrr}
def aggregation(self):
return {"r@1": mean, "r@2": mean, "mrr": mean}
def higher_is_better(self):
return {"r@1": True, "r@2": True, "mrr": True}
class MuTual(MuTualBase):
DATASET_NAME = "mutual"
class MuTualPlus(MuTualBase):
DATASET_NAME = "mutual_plus"
"""
Natural Questions: a Benchmark for Question Answering Research
https://storage.googleapis.com/pub-tools-public-publication-data/pdf/1f7b46b5378d757553d3e92ead36bda2e4254244.pdf
The Natural Questions (NQ) corpus is a question-answering dataset that contains
questions from real users and requires QA systems to read and comprehend an entire
Wikipedia article that may or may not contain the answer to the question. The
inclusion of real user questions, and the requirement that solutions should read
an entire page to find the answer, cause NQ to be a more realistic and challenging
task than prior QA datasets.
TODO: NaturalQS has a *really* large train set that huggingface just automatically
downloads even if you dont use it. we should try and only download the val set and
not even bother with the train set.
Homepage: https://ai.google.com/research/NaturalQuestions
"""
from lm_eval.base import Task
from itertools import islice
_CITATION = """
@article{47761,
title={Natural Questions: a Benchmark for Question Answering Research},
author={Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
year={2019},
journal={Transactions of the Association of Computational Linguistics}
}
"""
class NaturalQs(Task):
VERSION = 0
DATASET_PATH = "natural_questions"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
# Cache training for faster few-shot.
# Data is too large to fit in memory.
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def fewshot_examples(self, k, rnd):
# Data is too large to fit in memory. We just sample from the first bit.
if self._training_docs is None:
self._training_docs = list(islice(self.training_docs(), 0, 100000))
return rnd.sample(self._training_docs, k)
def doc_to_text(self, doc):
return "Q: " + doc["question"]["text"] + "\n\n" + "A:"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["question"]["text"]
def doc_to_target(self, doc):
# There's a short answer and a long answer. Based on the paper, I'm using the long answer.
# short_answer = doc["annotations"]["short_answers"][0]["text"]
long_answer_start = doc["annotations"]["long_answer"][0]["start_token"]
long_answer_end = doc["annotations"]["long_answer"][0]["end_token"]
long_answer_span = doc["document"]["tokens"]["token"][
long_answer_start:long_answer_end
]
long_answer_is_html = doc["document"]["tokens"]["is_html"][
long_answer_start:long_answer_end
]
long_answer_chars = [
tok
for (tok, is_html) in zip(long_answer_span, long_answer_is_html)
if not is_html
]
long_answer = " ".join(long_answer_chars)
return long_answer # Replace with short_answer[0] for short answer
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
"""
Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering
https://arxiv.org/pdf/1809.02789.pdf
OpenBookQA is a question-answering dataset modeled after open book exams for
assessing human understanding of a subject. It consists of 5,957 multiple-choice
elementary-level science questions (4,957 train, 500 dev, 500 test), which probe
the understanding of a small “book” of 1,326 core science facts and the application
of these facts to novel situations. For training, the dataset includes a mapping
from each question to the core science fact it was designed to probe. Answering
OpenBookQA questions requires additional broad common knowledge, not contained
in the book. The questions, by design, are answered incorrectly by both a retrieval-
based algorithm and a word co-occurrence algorithm.
Homepage: https://allenai.org/data/open-book-qa
"""
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{OpenBookQA2018,
title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},
booktitle={EMNLP},
year={2018}
}
"""
class OpenBookQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "openbookqa"
DATASET_NAME = "main"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
out_doc = {
"id": doc["id"],
"query": doc["question_stem"],
"choices": doc["choices"]["text"],
"gold": ["A", "B", "C", "D"].index(doc["answerKey"].strip()),
}
return out_doc
def doc_to_text(self, doc):
return doc["query"]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["query"]
"""
The Pile: An 800GB Dataset of Diverse Text for Language Modeling
https://arxiv.org/pdf/2101.00027.pdf
The Pile is a 825 GiB diverse, open source language modelling data set that consists
of 22 smaller, high-quality datasets combined together. To score well on Pile
BPB (bits per byte), a model must be able to understand many disparate domains
including books, github repositories, webpages, chat logs, and medical, physics,
math, computer science, and philosophy papers.
Homepage: https://pile.eleuther.ai/
"""
import inspect
import lm_eval.datasets.pile.pile
from lm_eval.base import PerplexityTask
_CITATION = """
@article{pile,
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
journal={arXiv preprint arXiv:2101.00027},
year={2020}
}
"""
class PilePerplexityTask(PerplexityTask):
VERSION = 1
DATASET_PATH = inspect.getfile(lm_eval.datasets.pile.pile)
DATASET_NAME = None
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def validation_docs(self):
for doc in self.dataset["validation"]:
yield doc["text"]
def test_docs(self):
for doc in self.dataset["test"]:
yield doc["text"]
class PileArxiv(PilePerplexityTask):
DATASET_NAME = "pile_arxiv"
class PileBooks3(PilePerplexityTask):
DATASET_NAME = "pile_books3"
class PileBookCorpus2(PilePerplexityTask):
DATASET_NAME = "pile_bookcorpus2"
class PileDmMathematics(PilePerplexityTask):
DATASET_NAME = "pile_dm-mathematics"
class PileEnron(PilePerplexityTask):
DATASET_NAME = "pile_enron"
class PileEuroparl(PilePerplexityTask):
DATASET_NAME = "pile_europarl"
class PileFreeLaw(PilePerplexityTask):
DATASET_NAME = "pile_freelaw"
class PileGithub(PilePerplexityTask):
DATASET_NAME = "pile_github"
class PileGutenberg(PilePerplexityTask):
DATASET_NAME = "pile_gutenberg"
class PileHackernews(PilePerplexityTask):
DATASET_NAME = "pile_hackernews"
class PileNIHExporter(PilePerplexityTask):
DATASET_NAME = "pile_nih-exporter"
class PileOpenSubtitles(PilePerplexityTask):
DATASET_NAME = "pile_opensubtitles"
class PileOpenWebText2(PilePerplexityTask):
DATASET_NAME = "pile_openwebtext2"
class PilePhilPapers(PilePerplexityTask):
DATASET_NAME = "pile_philpapers"
class PilePileCc(PilePerplexityTask):
DATASET_NAME = "pile_pile-cc"
class PilePubmedAbstracts(PilePerplexityTask):
DATASET_NAME = "pile_pubmed-abstracts"
class PilePubmedCentral(PilePerplexityTask):
DATASET_NAME = "pile_pubmed-central"
class PileStackExchange(PilePerplexityTask):
DATASET_NAME = "pile_stackexchange"
class PileUspto(PilePerplexityTask):
DATASET_NAME = "pile_upsto"
class PileUbuntuIrc(PilePerplexityTask):
DATASET_NAME = "pile_ubuntu-irc"
class PileWikipedia(PilePerplexityTask):
DATASET_NAME = "pile_wikipedia"
class PileYoutubeSubtitles(PilePerplexityTask):
DATASET_NAME = "pile_youtubesubtitles"
"""
PIQA: Reasoning about Physical Commonsense in Natural Language
https://arxiv.org/pdf/1911.11641.pdf
Physical Interaction: Question Answering (PIQA) is a physical commonsense
reasoning and a corresponding benchmark dataset. PIQA was designed to investigate
the physical knowledge of existing models. To what extent are current approaches
actually learning about the world?
Homepage: https://yonatanbisk.com/piqa/
"""
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{Bisk2020,
author = {Yonatan Bisk and Rowan Zellers and
Ronan Le Bras and Jianfeng Gao
and Yejin Choi},
title = {PIQA: Reasoning about Physical Commonsense in
Natural Language},
booktitle = {Thirty-Fourth AAAI Conference on
Artificial Intelligence},
year = {2020},
}
"""
class PiQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "piqa"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def _process_doc(self, doc):
out_doc = {
"goal": doc["goal"],
"choices": [doc["sol1"], doc["sol2"]],
"gold": doc["label"],
}
return out_doc
def doc_to_text(self, doc):
return "Question: " + doc["goal"] + "\nAnswer:"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["goal"]
"""
PROST: Physical Reasoning about Objects Through Space and Time
https://arxiv.org/pdf/2106.03634.pdf
PROST, Physical Reasoning about Objects Through Space and Time, is a dataset
consisting of 18,736 multiple-choice questions made from 14 manually curated
templates, covering 10 physical reasoning concepts. All questions are designed
to probe both causal and masked language models in a zero-shot setting.
NOTE: PROST is limited to the zero-shot setting to adhere to authors' intentions
as discussed in section 7 of the paper: "We hope that the community will use
this dataset in the intended way: in a zero-shot setting to probe models which
have been trained on data not specifically collected to succeed on PROST."
Homepage: https://github.com/nala-cub/prost
"""
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{aroca-ouellette-etal-2021-prost,
title = "{PROST}: {P}hysical Reasoning about Objects through Space and Time",
author = "Aroca-Ouellette, St{\'e}phane and
Paik, Cory and
Roncone, Alessandro and
Kann, Katharina",
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-acl.404",
pages = "4597--4608",
}
"""
class PROST(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "corypaik/prost"
DATASET_NAME = None
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def fewshot_context(
self, doc, num_fewshot, provide_description=None, rnd=None, description=None
):
assert (
num_fewshot == 0
), "PROST is designed to probe models in a zero-shot fashion only."
return super().fewshot_context(
doc=doc, num_fewshot=num_fewshot, rnd=rnd, description=description
)
def _process_doc(self, doc):
out_doc = {
"query": f"{doc['context']}\nQuestion: {doc['ex_question']}\nAnswer:",
"choices": [doc["A"], doc["B"], doc["C"], doc["D"]],
"gold": doc["label"],
}
return out_doc
def doc_to_text(self, doc):
return doc["query"]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["query"]
"""
PubMedQA: A Dataset for Biomedical Research Question Answering
https://arxiv.org/pdf/1909.06146.pdf
PubMedQA is a novel biomedical question answering (QA) dataset collected from
PubMed abstracts. The task of PubMedQA is to answer research questions with
yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after
coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA
has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA
instances. Each PubMedQA instance is composed of (1) a question which is either
an existing research article title or derived from one, (2) a context which is
the corresponding abstract without its conclusion, (3) a long answer, which is
the conclusion of the abstract and, presumably, answers the research question,
and (4) a yes/no/maybe answer which summarizes the conclusion.
Homepage: https://pubmedqa.github.io/
"""
import numpy as np
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{jin2019pubmedqa,
title={PubMedQA: A Dataset for Biomedical Research Question Answering},
author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua},
booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
pages={2567--2577},
year={2019}
}
"""
class Pubmed_QA(Task):
VERSION = 0
DATASET_PATH = "pubmed_qa"
DATASET_NAME = "pqa_labeled"
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def test_docs(self):
if self.has_test_docs():
# HF is labelled as train but its really just for testing
return self.dataset["train"]
def doc_to_text(self, doc):
ctxs = "\n".join(doc["context"]["contexts"])
return "Abstract: {}\nQuestion: {}\nAnswer:".format(
ctxs, doc["question"], doc["final_decision"]
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["question"] + " " + "\n".join(doc["context"]["contexts"])
def doc_to_target(self, doc):
return " {}".format(doc["final_decision"])
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns
an iterable of Requests which will be sent to the LM.
"""
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
ll_maybe, _ = rf.loglikelihood(ctx, " maybe")
return ll_yes, ll_no, ll_maybe
def process_results(self, doc, results):
gold = doc["final_decision"]
ll_yes, ll_no, ll_maybe = results
pred = np.argmax(results)
return {
"acc": ["yes", "no", "maybe"][pred] == gold,
}
def aggregation(self):
return {"acc": mean}
def higher_is_better(self):
return {"acc": True}
"""
QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation
https://www.cs.cmu.edu/~./hovy/papers/13CLEF-QA4MRE.pdf
The (English only) QA4MRE challenge which was run as a Lab at CLEF 2011-2013.
The main objective of this exercise is to develop a methodology for evaluating
Machine Reading systems through Question Answering and Reading Comprehension
Tests. Systems should be able to extract knowledge from large volumes of text
and use this knowledge to answer questions. Four different tasks have been
organized during these years: Main Task, Processing Modality and Negation for
Machine Reading, Machine Reading of Biomedical Texts about Alzheimer's disease,
and Entrance Exam.
Homepage: http://nlp.uned.es/clef-qa/repository/qa4mre.php
"""
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{Peas2013QA4MRE2O,
title={QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation},
author={Anselmo Pe{\~n}as and Eduard H. Hovy and Pamela Forner and {\'A}lvaro Rodrigo and Richard F. E. Sutcliffe and Roser Morante},
booktitle={CLEF},
year={2013}
}
""" # noqa: W605
class QA4MRE(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "qa4mre"
DATASET_NAME = None
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def test_docs(self):
# `qa4mre` only has train data so we use it for the test docs.
return map(self._process_doc, self.dataset["train"])
def _process_doc(self, doc):
choices = doc["answer_options"]["answer_str"]
out_doc = {
"source": doc["document_str"].strip().replace("'", "'"),
"query": doc["question_str"],
"choices": choices,
"gold": int(doc["correct_answer_id"]) - 1,
}
return out_doc
def doc_to_text(self, doc):
return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"])
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["source"] + " " + doc["query"]
class QA4MRE_2011(QA4MRE):
DATASET_NAME = "2011.main.EN"
class QA4MRE_2012(QA4MRE):
DATASET_NAME = "2012.main.EN"
class QA4MRE_2013(QA4MRE):
DATASET_NAME = "2013.main.EN"
"""
A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers
https://arxiv.org/abs/2105.03011
QASPER is a dataset of 5,049 questions over 1,585 Natural Language Processing papers.
Each question is written by an NLP practitioner who read only the title and abstract
of the corresponding paper, and the question seeks information present in the full
text. The questions are then answered by a separate set of NLP practitioners who also
provide supporting evidence to answers.
Homepage: https://allenai.org/data/qasper
"""
from collections import Counter
import re
import string
from lm_eval.base import rf, Task
from lm_eval.metrics import f1_score, mean
_CITATION = """
@article{DBLP:journals/corr/abs-2105-03011,
author = {Pradeep Dasigi and
Kyle Lo and
Iz Beltagy and
Arman Cohan and
Noah A. Smith and
Matt Gardner},
title = {A Dataset of Information-Seeking Questions and Answers Anchored in
Research Papers},
journal = {CoRR},
volume = {abs/2105.03011},
year = {2021},
url = {https://arxiv.org/abs/2105.03011},
eprinttype = {arXiv},
eprint = {2105.03011},
timestamp = {Fri, 14 May 2021 12:13:30 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
def normalize_answer(s):
"""
Taken from the official evaluation script for v1.1 of the SQuAD dataset.
Lower text and remove punctuation, articles and extra whitespace.
"""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def categorise_answer(answer_blob):
if answer_blob["unanswerable"]:
answer = "unanswerable"
answer_type = "unanswerable"
return answer, answer_type
elif answer_blob["yes_no"]:
answer = "yes"
answer_type = "bool"
return answer, answer_type
elif answer_blob["free_form_answer"]:
answer = answer_blob["free_form_answer"]
answer_type = "free form answer"
return answer, answer_type
elif answer_blob["extractive_spans"]:
answer = answer_blob["extractive_spans"]
answer_type = "extractive_spans"
return answer, answer_type
elif answer_blob["yes_no"] is False:
answer = "no"
answer_type = "bool"
return answer, answer_type
def token_f1_score(prediction, ground_truth):
"""
Taken from the official evaluation script for v1.1 of the SQuAD dataset.
"""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
class QASPER(Task):
VERSION = 0
DATASET_PATH = "qasper"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def doc_to_text(self, doc):
return (
"TITLE: "
+ doc["title"]
+ "\n"
+ "ABSTRACT: "
+ doc["abstract"]
+ "\n\n"
+ "Q: "
+ doc["question"]
+ "\n\n"
+ "A:"
)
def doc_to_target(self, doc):
answer = doc["answer"]
if isinstance(answer, list):
answer = ", ".join(answer)
return " " + answer
def training_docs(self):
for doc in self.dataset["train"]:
yield from self._process_doc(doc)
def validation_docs(self):
for doc in self.dataset["validation"]:
yield from self._process_doc(doc)
def _process_doc(self, doc):
"""Given a `doc`, flatten it out so that each JSON blob
contains exactly one question and one answer. Logic taken from
the reference implementation available at
https://github.com/allenai/qasper-led-baseline/blob/main/scripts/evaluator.py
"""
obs_list = []
for question, answer_list in zip(doc["qas"]["question"], doc["qas"]["answers"]):
for answer_blob in answer_list["answer"]:
answer, answer_type = categorise_answer(answer_blob)
obs_list.append(
{
"title": doc["title"],
"abstract": doc["abstract"],
"question": question,
"answer": answer,
"answer_type": answer_type,
}
)
return obs_list
def process_results(self, doc, results):
# TODO: Calculate a score for extractive spans once a request type for generating
# extractive spans is available
if not results:
return {}
elif len(results) == 1:
[res] = results
elif len(results) == 2:
[ll_yes, ll_no] = results
# TODO: Handle unanswerability first
# unanswerable_gold = doc["answer_type"] == "unanswerable"
# unanswerable_pred = exp(logprob_unanswerable)
# res_dict["f1_unanswerable"] = (unanswerable_gold, unanswerable_pred)
res_dict = {}
# Handle yes/no questions
if doc["answer_type"] == "bool":
gold = 1 if doc["answer"] == "yes" else 0
pred = ll_yes > ll_no
res_dict["f1_yesno"] = (gold, pred)
# Handle completions
if doc["answer_type"] == "free form answer":
res_dict["f1_abstractive"] = token_f1_score(res, doc["answer"])
# TODO: Handle extraction
# if doc["answer_type"] == "extractive_spans":
# res_dict["f1_extractive"] = 0
return res_dict
def aggregation(self):
return {
"f1_yesno": f1_score,
"f1_abstractive": mean,
}
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# unanswerable = rf.loglikelihood(ctx, " " + "unanswerable")
if doc["answer_type"] in ("free form answer"):
return [rf.greedy_until(ctx, ["\n"])]
elif doc["answer_type"] in ("bool"):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return [ll_yes, ll_no]
else:
return []
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"f1_yesno": True,
"f1_abstractive": True,
}
"""
QuAC: Question Answering in Context
https://arxiv.org/abs/1808.07036
Question Answering in Context (QuAC) is a dataset for modeling, understanding, and
participating in information seeking dialog. Data instances consist of an interactive
dialog between two crowd workers: (1) a student who poses a sequence of freeform
questions to learn as much as possible about a hidden Wikipedia text, and (2)
a teacher who answers the questions by providing short excerpts (spans) from the text.
Homepage: https://quac.ai/
"""
import inspect
import lm_eval.datasets.quac.quac
from lm_eval.base import Task
_CITATION = """
@article{choi2018quac,
title={Quac: Question answering in context},
author={Choi, Eunsol and He, He and Iyyer, Mohit and Yatskar, Mark and Yih, Wen-tau and Choi, Yejin and Liang, Percy and Zettlemoyer, Luke},
journal={arXiv preprint arXiv:1808.07036},
year={2018}
}
"""
class QuAC(Task):
VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.quac.quac)
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
raise NotImplementedError("QuAC has no test docs.")
def _process_doc(self, doc):
doc["title"] = doc["title"] + " - " + doc["section_title"]
return doc
def doc_to_text(self, doc):
return (
"TITLE: "
+ doc["title"]
+ "\n"
+ "PARAGRAPH: "
+ doc["paragraph"]
+ "\n\n"
+ "Q: "
+ doc["question"]
+ "\n\n"
+ "A: "
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["paragraph"]
def doc_to_target(self, doc):
return doc["answer"]
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
"""
RACE: Large-scale ReAding Comprehension Dataset From Examinations
https://arxiv.org/pdf/1704.04683.pdf
RACE is a large-scale reading comprehension dataset with more than 28,000 passages
and nearly 100,000 questions. The dataset is collected from English examinations
in China, which are designed for middle school and high school students. The dataset
can be served as the training and test sets for machine comprehension.
Homepage: https://www.cs.cmu.edu/~glai1/data/race/
"""
import collections
import datasets
import numpy as np
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
@article{lai2017large,
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
journal={arXiv preprint arXiv:1704.04683},
year={2017}
}
"""
class each:
def __init__(self, f):
self.f = f
def __rrshift__(self, other):
return list(map(self.f, other))
class RACE(Task):
VERSION = 1
DATASET_PATH = "race"
DATASET_NAME = "high"
cache = {}
letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3}
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def _collate_data(self, set):
if set in self.cache:
return self.cache[set]
# One big issue with HF's implementation of this dataset: it makes a
# separate document for each question; meanwhile, in the GPT3 paper it
# is shown that one document is made per passage.
r = collections.defaultdict(list)
for item in datasets.load_dataset(
path=self.DATASET_PATH, name=self.DATASET_NAME
)[set]:
r[item["article"]].append(item)
res = list(
r.values()
>> each(
lambda x: {
"article": x[0]["article"],
"problems": x
>> each(
lambda y: {
"question": y["question"],
"answer": y["answer"],
"options": y["options"],
}
),
}
)
)
self.cache[set] = res
return res
def training_docs(self):
return self._collate_data("train")
def validation_docs(self):
return self._collate_data("validation")
def test_docs(self):
return self._collate_data("test")
@classmethod
def get_answer_option(cls, problem):
answer = cls.letter_to_num[problem["answer"]]
return problem["options"][answer]
@classmethod
def last_problem(cls, doc):
return doc["problems"][-1]
def doc_to_text(self, doc):
text = "Article: " + doc["article"] + "\n\n"
for problem in doc["problems"][:-1]:
if problem["question"][-6:] == " _ .":
text += (
problem["question"][-5:] + self.get_answer_option(problem) + "\n"
)
else:
question = "Question: " + problem["question"] + "\n"
answer = "Answer: " + self.get_answer_option(problem) + "\n"
text += question + answer
text += self.last_problem(doc)["question"]
return text
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["article"]
def doc_to_target(self, doc):
return " " + self.get_answer_option(self.last_problem(doc))
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
problem = self.last_problem(doc)
ll_choices = [
rf.loglikelihood(ctx, " " + problem["options"][i])[0] for i in range(4)
]
return ll_choices
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
gold = self.letter_to_num[self.last_problem(doc)["answer"]]
pred = np.argmax(results)
return {"acc": int(pred == gold)}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {"acc": mean}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {"acc": True}
"""
Similarity of Semantic Relations
https://arxiv.org/pdf/cs/0608100.pdf
SAT (Scholastic Aptitude Test) Analogy Questions is a dataset comprising 374
multiple-choice analogy questions; 5 choices per question.
Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art)
"""
import inspect
import lm_eval.datasets.sat_analogies.sat_analogies
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@article{article,
author = {Turney, Peter},
year = {2006},
month = {09},
pages = {379-416},
title = {Similarity of Semantic Relations},
volume = {32},
journal = {Computational Linguistics},
doi = {10.1162/coli.2006.32.3.379}
}
"""
class SATAnalogies(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.sat_analogies.sat_analogies)
DATASET_NAME = None
def __init__(self, data_dir: str):
"""
SAT Analog Questions is not publicly available. You must request the data
by emailing Peter Turney and then download it to a local directory path
which should be passed into the `data_dir` arg.
"""
super().__init__(data_dir=data_dir)
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return []
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return []
def _process_doc(self, doc):
return {
"source": doc["source"],
"query": doc["stem"].split(" ")[:2],
"choices": [
"{} is to {}".format(*c.split(" ")[:2]) for c in doc["choices"]
],
"gold": ["a", "b", "c", "d", "e"].index(doc["solution"].strip()),
}
def doc_to_text(self, doc):
return "{} is to {} as".format(*doc["query"])
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["source"] + "\n" + " ".join(doc["query"])
"""
Crowdsourcing Multiple Choice Science Questions
https://aclanthology.org/W17-4413.pdf
The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics,
Chemistry and Biology, among others. The questions are in multiple-choice format
with 4 answer options each. For the majority of the questions, an additional paragraph
with supporting evidence for the correct answer is provided.
Homepage: https://allenai.org/data/sciq
"""
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{Welbl2017CrowdsourcingMC,
title={Crowdsourcing Multiple Choice Science Questions},
author={Johannes Welbl and Nelson F. Liu and Matt Gardner},
booktitle={NUT@EMNLP},
year={2017}
}
"""
class SciQ(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "sciq"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
choices = [
doc["distractor1"],
doc["distractor2"],
doc["distractor3"],
doc["correct_answer"],
]
src = doc["support"]
out_doc = {
"source": src,
"query": doc["question"],
"choices": choices,
"gold": 3,
}
return out_doc
def doc_to_text(self, doc):
return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]).strip()
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["source"] + " " + doc["query"]
"""
Know What You Don’t Know: Unanswerable Questions for SQuAD
https://arxiv.org/pdf/1806.03822.pdf
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset,
consisting of questions posed by crowdworkers on a set of Wikipedia articles,
where the answer to every question is a segment of text, or span, from the
corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable
questions written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but
also determine when no answer is supported by the paragraph and abstain from answering.
Homepage: https://rajpurkar.github.io/SQuAD-explorer/
"""
import datasets
from math import exp
from lm_eval.base import rf, Task
from functools import partial
from packaging import version
_CITATION = """
@misc{rajpurkar2018know,
title={Know What You Don't Know: Unanswerable Questions for SQuAD},
author={Pranav Rajpurkar and Robin Jia and Percy Liang},
year={2018},
eprint={1806.03822},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
def _squad_metric(predictions, references):
squad_metric = datasets.load_metric("squad_v2")
return squad_metric.compute(predictions=predictions, references=references)
def _squad_agg(key, items):
predictions, references = zip(*items)
return _squad_metric(predictions=predictions, references=references).get(key, 0)
class SQuAD2(Task):
VERSION = 1
DATASET_PATH = "squad_v2"
DATASET_NAME = None
# HF changed squad on us so we have to make sure we aren't running the old one
assert version.parse(datasets.__version__) >= version.parse(
"1.11.0"
), "datasets v1.11.0 or later required for SQuAD"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return self.dataset["train"]
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return (
"Title: "
+ doc["title"]
+ "\n\n"
+ "Background: "
+ doc["context"]
+ "\n\n"
+ "Question: "
+ doc["question"]
+ "\n\n"
+ "Answer:"
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["context"]
def doc_to_target(self, doc):
answer_list = doc["answers"]["text"]
if len(answer_list) > 0:
answer = answer_list[0]
else:
answer = "unanswerable"
return " " + answer
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
continuation = rf.greedy_until(ctx, ["\n"])
is_unanswerable = rf.loglikelihood(ctx, " " + "unanswerable")
return continuation, is_unanswerable
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
continuation, (logprob_unanswerable, _) = results
no_answer_probability = exp(logprob_unanswerable)
predictions = {
"id": doc["id"],
"prediction_text": continuation,
"no_answer_probability": no_answer_probability,
}
references = {
"id": doc["id"],
"answers": doc["answers"],
}
return {
"exact": (
predictions,
references,
), # Exact match (the normalized answer exactly match the gold answer)
"f1": (
predictions,
references,
), # The F-score of predicted tokens versus the gold answer
"HasAns_exact": (
predictions,
references,
), # Exact match (the normalized answer exactly match the gold answer)
"HasAns_f1": (
predictions,
references,
), # The F-score of predicted tokens versus the gold answer
"NoAns_exact": (
predictions,
references,
), # Exact match (the normalized answer exactly match the gold answer)
"NoAns_f1": (
predictions,
references,
), # The F-score of predicted tokens versus the gold answer
"best_exact": (
predictions,
references,
), # Best exact match (with varying threshold)
"best_f1": (predictions, references), # Best F1 (with varying threshold)
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {
"exact": partial(
_squad_agg, "exact"
), # Exact match (the normalized answer exactly match the gold answer)
"f1": partial(
_squad_agg, "f1"
), # The F-score of predicted tokens versus the gold answer
"HasAns_exact": partial(
_squad_agg, "HasAns_exact"
), # Exact match (the normalized answer exactly match the gold answer)
"HasAns_f1": partial(
_squad_agg, "HasAns_f1"
), # The F-score of predicted tokens versus the gold answer
"NoAns_exact": partial(
_squad_agg, "NoAns_exact"
), # Exact match (the normalized answer exactly match the gold answer)
"NoAns_f1": partial(
_squad_agg, "NoAns_f1"
), # The F-score of predicted tokens versus the gold answer
"best_exact": partial(
_squad_agg, "best_exact"
), # Best exact match (with varying threshold)
"best_f1": partial(
_squad_agg, "best_f1"
), # Best F1 (with varying threshold)
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"exact": True, # Exact match (the normalized answer exactly match the gold answer)
"f1": True, # The F-score of predicted tokens versus the gold answer
"HasAns_exact": True, # Exact match (the normalized answer exactly match the gold answer)
"HasAns_f1": True, # The F-score of predicted tokens versus the gold answer
"NoAns_exact": True, # Exact match (the normalized answer exactly match the gold answer)
"NoAns_f1": True, # The F-score of predicted tokens versus the gold answer
"best_exact": True, # Best exact match (with varying threshold)
"best_f1": True, # Best F1 (with varying threshold)
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment