"vscode:/vscode.git/clone" did not exist on "13842e413cfa95893185e4330fab61f6f70d19e8"
Commit 3f13d15f authored by Jonathan Tow's avatar Jonathan Tow
Browse files

Make citations module-level constants

parent a1aceacd
...@@ -18,7 +18,14 @@ answers. This is a problem because the task's metrics require an exhaustive eval ...@@ -18,7 +18,14 @@ answers. This is a problem because the task's metrics require an exhaustive eval
of a question's options. See section 4 of the paper for details. of a question's options. See section 4 of the paper for details.
Homepage: https://leaderboard.allenai.org/mctaco/submissions/public Homepage: https://leaderboard.allenai.org/mctaco/submissions/public
"""
import numpy as np
from lm_eval.base import rf
from collections import defaultdict
from . common import HFTask
_CITATION = """
@inproceedings{ZKNR19, @inproceedings{ZKNR19,
author = {Ben Zhou, Daniel Khashabi, Qiang Ning and Dan Roth}, author = {Ben Zhou, Daniel Khashabi, Qiang Ning and Dan Roth},
title = {“Going on a vacation” takes longer than “Going for a walk”: A Study of Temporal Commonsense Understanding }, title = {“Going on a vacation” takes longer than “Going for a walk”: A Study of Temporal Commonsense Understanding },
...@@ -27,11 +34,6 @@ Homepage: https://leaderboard.allenai.org/mctaco/submissions/public ...@@ -27,11 +34,6 @@ Homepage: https://leaderboard.allenai.org/mctaco/submissions/public
} }
""" """
import numpy as np
from lm_eval.base import rf
from collections import defaultdict
from . common import HFTask
class MCTACO(HFTask): class MCTACO(HFTask):
VERSION = 0 VERSION = 0
......
...@@ -6,14 +6,6 @@ MuTual is a retrieval-based dataset for multi-turn dialogue reasoning, which is ...@@ -6,14 +6,6 @@ MuTual is a retrieval-based dataset for multi-turn dialogue reasoning, which is
modified from Chinese high school English listening comprehension test data. modified from Chinese high school English listening comprehension test data.
Homepage: https://github.com/Nealcly/MuTual Homepage: https://github.com/Nealcly/MuTual
@inproceedings{mutual,
title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning",
author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" ,
booktitle = "Proceedings of the 58th Conference of the Association for Computational Linguistics",
year = "2020",
publisher = "Association for Computational Linguistics",
}
""" """
import json import json
import zipfile import zipfile
...@@ -25,6 +17,17 @@ from lm_eval.metrics import mean ...@@ -25,6 +17,17 @@ from lm_eval.metrics import mean
from best_download import download_file from best_download import download_file
_CITATION = """
@inproceedings{mutual,
title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning",
author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" ,
booktitle = "Proceedings of the 58th Conference of the Association for Computational Linguistics",
year = "2020",
publisher = "Association for Computational Linguistics",
}
"""
class MuTualBase(Task): class MuTualBase(Task):
VERSION = 1 VERSION = 1
BASE_PATH = Path("data/mutual") BASE_PATH = Path("data/mutual")
......
...@@ -14,19 +14,22 @@ downloads even if you dont use it. we should try and only download the val set a ...@@ -14,19 +14,22 @@ downloads even if you dont use it. we should try and only download the val set a
not even bother with the train set. not even bother with the train set.
Homepage: https://ai.google.com/research/NaturalQuestions Homepage: https://ai.google.com/research/NaturalQuestions
@article{47761,
title = {Natural Questions: a Benchmark for Question Answering Research},
author = {Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
year = {2019},
journal = {Transactions of the Association of Computational Linguistics}
}
""" """
import random import random
from . common import HFTask from . common import HFTask
from itertools import islice from itertools import islice
_CITATION = """
@article{47761,
title={Natural Questions: a Benchmark for Question Answering Research},
author={Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
year={2019},
journal={Transactions of the Association of Computational Linguistics}
}
"""
class NaturalQs(HFTask): class NaturalQs(HFTask):
VERSION = 0 VERSION = 0
DATASET_PATH = "natural_questions" DATASET_PATH = "natural_questions"
......
...@@ -13,16 +13,19 @@ in the book. The questions, by design, are answered incorrectly by both a retrie ...@@ -13,16 +13,19 @@ in the book. The questions, by design, are answered incorrectly by both a retrie
based algorithm and a word co-occurrence algorithm. based algorithm and a word co-occurrence algorithm.
Homepage: https://allenai.org/data/open-book-qa Homepage: https://allenai.org/data/open-book-qa
"""
from lm_eval.base import MultipleChoiceTask
from .common import HFTask
_CITATION = """
@inproceedings{OpenBookQA2018, @inproceedings{OpenBookQA2018,
title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering}, title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal}, author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},
booktitle={EMNLP}, booktitle={EMNLP},
year={2018} year={2018}
} }
""" """
from lm_eval.base import MultipleChoiceTask
from .common import HFTask
class OpenBookQA(HFTask, MultipleChoiceTask): class OpenBookQA(HFTask, MultipleChoiceTask):
......
...@@ -9,13 +9,6 @@ including books, github repositories, webpages, chat logs, and medical, physics, ...@@ -9,13 +9,6 @@ including books, github repositories, webpages, chat logs, and medical, physics,
math, computer science, and philosophy papers. math, computer science, and philosophy papers.
Homepage: https://pile.eleuther.ai/ Homepage: https://pile.eleuther.ai/
@article{pile,
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
journal={arXiv preprint arXiv:2101.00027},
year={2020}
}
""" """
import os import os
...@@ -28,6 +21,16 @@ from ..utils import general_detokenize ...@@ -28,6 +21,16 @@ from ..utils import general_detokenize
from best_download import download_file from best_download import download_file
_CITATION = """
@article{pile,
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
journal={arXiv preprint arXiv:2101.00027},
year={2020}
}
"""
class PilePerplexityTask(PerplexityTask, abc.ABC): class PilePerplexityTask(PerplexityTask, abc.ABC):
VERSION = 1 VERSION = 1
......
...@@ -8,22 +8,25 @@ the physical knowledge of existing models. To what extent are current approaches ...@@ -8,22 +8,25 @@ the physical knowledge of existing models. To what extent are current approaches
actually learning about the world? actually learning about the world?
Homepage: https://yonatanbisk.com/piqa/ Homepage: https://yonatanbisk.com/piqa/
"""
import numpy as np
from lm_eval.base import MultipleChoiceTask, rf
from ..metrics import mean
from . common import HFTask
_CITATION = """
@inproceedings{Bisk2020, @inproceedings{Bisk2020,
author = {Yonatan Bisk and Rowan Zellers and author = {Yonatan Bisk and Rowan Zellers and
Ronan Le Bras and Jianfeng Gao Ronan Le Bras and Jianfeng Gao
and Yejin Choi}, and Yejin Choi},
title = {PIQA: Reasoning about Physical Commonsense in title = {PIQA: Reasoning about Physical Commonsense in
Natural Language}, Natural Language},
booktitle = {Thirty-Fourth AAAI Conference on booktitle = {Thirty-Fourth AAAI Conference on
Artificial Intelligence}, Artificial Intelligence},
year = {2020}, year = {2020},
} }
""" """
import numpy as np
from lm_eval.base import MultipleChoiceTask, rf
from ..metrics import mean
from . common import HFTask
class PiQA(HFTask, MultipleChoiceTask): class PiQA(HFTask, MultipleChoiceTask):
......
...@@ -13,7 +13,12 @@ this dataset in the intended way: in a zero-shot setting to probe models which ...@@ -13,7 +13,12 @@ this dataset in the intended way: in a zero-shot setting to probe models which
have been trained on data not specifically collected to succeed on PROST." have been trained on data not specifically collected to succeed on PROST."
Homepage: https://github.com/nala-cub/prost Homepage: https://github.com/nala-cub/prost
"""
from lm_eval.base import MultipleChoiceTask
from . common import HFTask
_CITATON = """
@inproceedings{aroca-ouellette-etal-2021-prost, @inproceedings{aroca-ouellette-etal-2021-prost,
title = "{PROST}: {P}hysical Reasoning about Objects through Space and Time", title = "{PROST}: {P}hysical Reasoning about Objects through Space and Time",
author = "Aroca-Ouellette, St{\'e}phane and author = "Aroca-Ouellette, St{\'e}phane and
...@@ -30,9 +35,6 @@ Homepage: https://github.com/nala-cub/prost ...@@ -30,9 +35,6 @@ Homepage: https://github.com/nala-cub/prost
} }
""" """
from lm_eval.base import MultipleChoiceTask
from . common import HFTask
class PROST(HFTask, MultipleChoiceTask): class PROST(HFTask, MultipleChoiceTask):
VERSION = 0 VERSION = 0
......
...@@ -14,14 +14,6 @@ the conclusion of the abstract and, presumably, answers the research question, ...@@ -14,14 +14,6 @@ the conclusion of the abstract and, presumably, answers the research question,
and (4) a yes/no/maybe answer which summarizes the conclusion. and (4) a yes/no/maybe answer which summarizes the conclusion.
Homepage: https://pubmedqa.github.io/ Homepage: https://pubmedqa.github.io/
@inproceedings{jin2019pubmedqa,
title={PubMedQA: A Dataset for Biomedical Research Question Answering},
author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua},
booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
pages={2567--2577},
year={2019}
}
""" """
import numpy as np import numpy as np
from .common import HFTask from .common import HFTask
...@@ -29,6 +21,17 @@ from lm_eval.base import rf ...@@ -29,6 +21,17 @@ from lm_eval.base import rf
from ..metrics import mean from ..metrics import mean
_CITATION = """
@inproceedings{jin2019pubmedqa,
title={PubMedQA: A Dataset for Biomedical Research Question Answering},
author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua},
booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
pages={2567--2577},
year={2019}
}
"""
class Pubmed_QA(HFTask): class Pubmed_QA(HFTask):
VERSION = 0 VERSION = 0
DATASET_PATH = "pubmed_qa" DATASET_PATH = "pubmed_qa"
......
...@@ -12,13 +12,6 @@ Machine Reading, Machine Reading of Biomedical Texts about Alzheimer's disease, ...@@ -12,13 +12,6 @@ Machine Reading, Machine Reading of Biomedical Texts about Alzheimer's disease,
and Entrance Exam. and Entrance Exam.
Homepage: http://nlp.uned.es/clef-qa/repository/qa4mre.php Homepage: http://nlp.uned.es/clef-qa/repository/qa4mre.php
@inproceedings{Peas2013QA4MRE2O,
title={QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation},
author={Anselmo Pe{\~n}as and Eduard H. Hovy and Pamela Forner and {\'A}lvaro Rodrigo and Richard F. E. Sutcliffe and Roser Morante},
booktitle={CLEF},
year={2013}
}
""" """
import os import os
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
...@@ -26,6 +19,16 @@ from best_download import download_file ...@@ -26,6 +19,16 @@ from best_download import download_file
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
_CITATION = """
@inproceedings{Peas2013QA4MRE2O,
title={QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation},
author={Anselmo Pe{\~n}as and Eduard H. Hovy and Pamela Forner and {\'A}lvaro Rodrigo and Richard F. E. Sutcliffe and Roser Morante},
booktitle={CLEF},
year={2013}
}
"""
class QA4MRE(MultipleChoiceTask): class QA4MRE(MultipleChoiceTask):
VERSION = 0 VERSION = 0
YEAR = None YEAR = None
......
...@@ -9,35 +9,38 @@ text. The questions are then answered by a separate set of NLP practitioners who ...@@ -9,35 +9,38 @@ text. The questions are then answered by a separate set of NLP practitioners who
provide supporting evidence to answers. provide supporting evidence to answers.
Homepage: https://allenai.org/data/qasper Homepage: https://allenai.org/data/qasper
"""
from collections import Counter
from math import exp
import random
import re
import string
from lm_eval.base import rf
from lm_eval.metrics import f1_score, mean
from .common import HFTask
_CITATION = """
@article{DBLP:journals/corr/abs-2105-03011, @article{DBLP:journals/corr/abs-2105-03011,
author = {Pradeep Dasigi and author = {Pradeep Dasigi and
Kyle Lo and Kyle Lo and
Iz Beltagy and Iz Beltagy and
Arman Cohan and Arman Cohan and
Noah A. Smith and Noah A. Smith and
Matt Gardner}, Matt Gardner},
title = {A Dataset of Information-Seeking Questions and Answers Anchored in title = {A Dataset of Information-Seeking Questions and Answers Anchored in
Research Papers}, Research Papers},
journal = {CoRR}, journal = {CoRR},
volume = {abs/2105.03011}, volume = {abs/2105.03011},
year = {2021}, year = {2021},
url = {https://arxiv.org/abs/2105.03011}, url = {https://arxiv.org/abs/2105.03011},
eprinttype = {arXiv}, eprinttype = {arXiv},
eprint = {2105.03011}, eprint = {2105.03011},
timestamp = {Fri, 14 May 2021 12:13:30 +0200}, timestamp = {Fri, 14 May 2021 12:13:30 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib}, biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib},
bibsource = {dblp computer science bibliography, https://dblp.org} bibsource = {dblp computer science bibliography, https://dblp.org}
} }
""" """
from collections import Counter
from math import exp
import random
import re
import string
from lm_eval.base import rf
from lm_eval.metrics import f1_score, mean
from .common import HFTask
def normalize_answer(s): def normalize_answer(s):
......
...@@ -9,21 +9,23 @@ questions to learn as much as possible about a hidden Wikipedia text, and (2) ...@@ -9,21 +9,23 @@ questions to learn as much as possible about a hidden Wikipedia text, and (2)
a teacher who answers the questions by providing short excerpts (spans) from the text. a teacher who answers the questions by providing short excerpts (spans) from the text.
Homepage: https://quac.ai/ Homepage: https://quac.ai/
@article{choi2018quac,
title={Quac: Question answering in context},
author={Choi, Eunsol and He, He and Iyyer, Mohit and Yatskar, Mark and Yih, Wen-tau and Choi, Yejin and Liang, Percy and Zettlemoyer, Luke},
journal={arXiv preprint arXiv:1808.07036},
year={2018}
}
""" """
import json import json
import os import os
from lm_eval.base import Task from lm_eval.base import Task
from ..utils import sh from ..utils import sh
_CITATION = """
@article{choi2018quac,
title={Quac: Question answering in context},
author={Choi, Eunsol and He, He and Iyyer, Mohit and Yatskar, Mark and Yih, Wen-tau and Choi, Yejin and Liang, Percy and Zettlemoyer, Luke},
journal={arXiv preprint arXiv:1808.07036},
year={2018}
}
"""
class QuAC(Task): class QuAC(Task):
VERSION = 0 VERSION = 0
......
...@@ -8,7 +8,16 @@ in China, which are designed for middle school and high school students. The dat ...@@ -8,7 +8,16 @@ in China, which are designed for middle school and high school students. The dat
can be served as the training and test sets for machine comprehension. can be served as the training and test sets for machine comprehension.
Homepage: https://www.cs.cmu.edu/~glai1/data/race/ Homepage: https://www.cs.cmu.edu/~glai1/data/race/
"""
import collections
import datasets
import numpy as np
from lm_eval.base import rf
from ..metrics import mean
from . common import HFTask
_CITATION = """
@article{lai2017large, @article{lai2017large,
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations}, title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard}, author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
...@@ -16,12 +25,6 @@ Homepage: https://www.cs.cmu.edu/~glai1/data/race/ ...@@ -16,12 +25,6 @@ Homepage: https://www.cs.cmu.edu/~glai1/data/race/
year={2017} year={2017}
} }
""" """
import collections
import datasets
import numpy as np
from lm_eval.base import rf
from ..metrics import mean
from . common import HFTask
class each: class each:
......
...@@ -6,7 +6,12 @@ SAT (Scholastic Aptitude Test) Analogy Questions is a dataset comprising 374 ...@@ -6,7 +6,12 @@ SAT (Scholastic Aptitude Test) Analogy Questions is a dataset comprising 374
multiple-choice analogy questions; 5 choices per question. multiple-choice analogy questions; 5 choices per question.
Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art)
"""
import os
from lm_eval.base import MultipleChoiceTask
_CITATION = """
@article{article, @article{article,
author = {Turney, Peter}, author = {Turney, Peter},
year = {2006}, year = {2006},
...@@ -18,8 +23,6 @@ Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) ...@@ -18,8 +23,6 @@ Homepage: https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art)
doi = {10.1162/coli.2006.32.3.379} doi = {10.1162/coli.2006.32.3.379}
} }
""" """
import os
from lm_eval.base import MultipleChoiceTask
class SATAnalogies(MultipleChoiceTask): class SATAnalogies(MultipleChoiceTask):
......
...@@ -8,13 +8,6 @@ with 4 answer options each. For the majority of the questions, an additional par ...@@ -8,13 +8,6 @@ with 4 answer options each. For the majority of the questions, an additional par
with supporting evidence for the correct answer is provided. with supporting evidence for the correct answer is provided.
Homepage: https://allenai.org/data/sciq Homepage: https://allenai.org/data/sciq
@inproceedings{Welbl2017CrowdsourcingMC,
title={Crowdsourcing Multiple Choice Science Questions},
author={Johannes Welbl and Nelson F. Liu and Matt Gardner},
booktitle={NUT@EMNLP},
year={2017}
}
""" """
import os import os
import json import json
...@@ -23,6 +16,16 @@ from lm_eval.base import MultipleChoiceTask ...@@ -23,6 +16,16 @@ from lm_eval.base import MultipleChoiceTask
from best_download import download_file from best_download import download_file
_CITATION = """
@inproceedings{Welbl2017CrowdsourcingMC,
title={Crowdsourcing Multiple Choice Science Questions},
author={Johannes Welbl and Nelson F. Liu and Matt Gardner},
booktitle={NUT@EMNLP},
year={2017}
}
"""
class SciQ(MultipleChoiceTask): class SciQ(MultipleChoiceTask):
VERSION = 0 VERSION = 0
# Multiple languages and multiple years # Multiple languages and multiple years
......
...@@ -12,15 +12,6 @@ To do well on SQuAD2.0, systems must not only answer questions when possible, bu ...@@ -12,15 +12,6 @@ To do well on SQuAD2.0, systems must not only answer questions when possible, bu
also determine when no answer is supported by the paragraph and abstain from answering. also determine when no answer is supported by the paragraph and abstain from answering.
Homepage: https://rajpurkar.github.io/SQuAD-explorer/ Homepage: https://rajpurkar.github.io/SQuAD-explorer/
@misc{rajpurkar2018know,
title={Know What You Don't Know: Unanswerable Questions for SQuAD},
author={Pranav Rajpurkar and Robin Jia and Percy Liang},
year={2018},
eprint={1806.03822},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
""" """
import datasets import datasets
from math import exp from math import exp
...@@ -31,6 +22,18 @@ from functools import partial ...@@ -31,6 +22,18 @@ from functools import partial
from packaging import version from packaging import version
_CITATION = """
@misc{rajpurkar2018know,
title={Know What You Don't Know: Unanswerable Questions for SQuAD},
author={Pranav Rajpurkar and Robin Jia and Percy Liang},
year={2018},
eprint={1806.03822},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
def _squad_metric(predictions, references): def _squad_metric(predictions, references):
squad_metric = datasets.load_metric("squad_v2") squad_metric = datasets.load_metric("squad_v2")
return squad_metric.compute(predictions=predictions, references=references) return squad_metric.compute(predictions=predictions, references=references)
......
...@@ -7,7 +7,12 @@ understanding, story generation, and script learning. This test requires a syste ...@@ -7,7 +7,12 @@ understanding, story generation, and script learning. This test requires a syste
to choose the correct ending to a four-sentence story. to choose the correct ending to a four-sentence story.
Homepage: https://cs.rochester.edu/nlp/rocstories/ Homepage: https://cs.rochester.edu/nlp/rocstories/
"""
import csv
from lm_eval.base import Task
_CITATION = """
@inproceedings{sharma-etal-2018-tackling, @inproceedings{sharma-etal-2018-tackling,
title = "Tackling the Story Ending Biases in The Story Cloze Test", title = "Tackling the Story Ending Biases in The Story Cloze Test",
author = "Sharma, Rishi and author = "Sharma, Rishi and
...@@ -25,8 +30,6 @@ Homepage: https://cs.rochester.edu/nlp/rocstories/ ...@@ -25,8 +30,6 @@ Homepage: https://cs.rochester.edu/nlp/rocstories/
abstract = "The Story Cloze Test (SCT) is a recent framework for evaluating story comprehension and script learning. There have been a variety of models tackling the SCT so far. Although the original goal behind the SCT was to require systems to perform deep language understanding and commonsense reasoning for successful narrative understanding, some recent models could perform significantly better than the initial baselines by leveraging human-authorship biases discovered in the SCT dataset. In order to shed some light on this issue, we have performed various data analysis and analyzed a variety of top performing models presented for this task. Given the statistics we have aggregated, we have designed a new crowdsourcing scheme that creates a new SCT dataset, which overcomes some of the biases. We benchmark a few models on the new dataset and show that the top-performing model on the original SCT dataset fails to keep up its performance. Our findings further signify the importance of benchmarking NLP systems on various evolving test sets.", abstract = "The Story Cloze Test (SCT) is a recent framework for evaluating story comprehension and script learning. There have been a variety of models tackling the SCT so far. Although the original goal behind the SCT was to require systems to perform deep language understanding and commonsense reasoning for successful narrative understanding, some recent models could perform significantly better than the initial baselines by leveraging human-authorship biases discovered in the SCT dataset. In order to shed some light on this issue, we have performed various data analysis and analyzed a variety of top performing models presented for this task. Given the statistics we have aggregated, we have designed a new crowdsourcing scheme that creates a new SCT dataset, which overcomes some of the biases. We benchmark a few models on the new dataset and show that the top-performing model on the original SCT dataset fails to keep up its performance. Our findings further signify the importance of benchmarking NLP systems on various evolving test sets.",
} }
""" """
import csv
from lm_eval.base import Task
class StoryCloze(Task): class StoryCloze(Task):
......
...@@ -8,18 +8,6 @@ understanding tasks. ...@@ -8,18 +8,6 @@ understanding tasks.
Homepage: https://super.gluebenchmark.com/ Homepage: https://super.gluebenchmark.com/
TODO: WSC requires free-form generation. TODO: WSC requires free-form generation.
@inproceedings{NEURIPS2019_4496bf24,
author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf},
volume = {32},
year = {2019}
}
""" """
import numpy as np import numpy as np
import sklearn import sklearn
...@@ -30,6 +18,21 @@ from ..metrics import mean, acc_all, metric_max_over_ground_truths ...@@ -30,6 +18,21 @@ from ..metrics import mean, acc_all, metric_max_over_ground_truths
from ..utils import general_detokenize from ..utils import general_detokenize
_CITATION = """
@inproceedings{NEURIPS2019_4496bf24,
author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf},
volume = {32},
year = {2019}
}
"""
class BoolQ(HFTask): class BoolQ(HFTask):
VERSION = 1 VERSION = 1
DATASET_PATH = "super_glue" DATASET_PATH = "super_glue"
......
...@@ -8,18 +8,6 @@ here to the SacreBLEU repo from which we've obtained the datasets: ...@@ -8,18 +8,6 @@ here to the SacreBLEU repo from which we've obtained the datasets:
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
Homepage: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py Homepage: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
""" """
import pycountry import pycountry
from pprint import pprint from pprint import pprint
...@@ -29,6 +17,21 @@ from lm_eval.base import Task, rf ...@@ -29,6 +17,21 @@ from lm_eval.base import Task, rf
from typing import List from typing import List
_CITATION = """
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
"""
sacrebleu_datasets = sacrebleu.DATASETS sacrebleu_datasets = sacrebleu.DATASETS
......
...@@ -8,16 +8,6 @@ and independently gathered evidence documents, six per question on average, that ...@@ -8,16 +8,6 @@ and independently gathered evidence documents, six per question on average, that
high quality distant supervision for answering the questions. high quality distant supervision for answering the questions.
Homepage: https://nlp.cs.washington.edu/triviaqa/ Homepage: https://nlp.cs.washington.edu/triviaqa/
@InProceedings{JoshiTriviaQA2017,
author = {Joshi, Mandar and Choi, Eunsol and Weld, Daniel S. and Zettlemoyer, Luke},
title = {TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension},
booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics},
month = {July},
year = {2017},
address = {Vancouver, Canada},
publisher = {Association for Computational Linguistics},
}
""" """
import os import os
import json import json
...@@ -28,6 +18,19 @@ from ..utils import sh ...@@ -28,6 +18,19 @@ from ..utils import sh
from best_download import download_file from best_download import download_file
_CITATION = """
@InProceedings{JoshiTriviaQA2017,
author = {Joshi, Mandar and Choi, Eunsol and Weld, Daniel S. and Zettlemoyer, Luke},
title = {TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension},
booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics},
month = {July},
year = {2017},
address = {Vancouver, Canada},
publisher = {Association for Computational Linguistics},
}
"""
class TriviaQA(Task): class TriviaQA(Task):
VERSION = 0 VERSION = 0
def download(self): def download(self):
......
...@@ -18,15 +18,6 @@ https://github.com/sylinrl/TruthfulQA#Fine-tuning-GPT-3-for-evaluation. Maybe ...@@ -18,15 +18,6 @@ https://github.com/sylinrl/TruthfulQA#Fine-tuning-GPT-3-for-evaluation. Maybe
we could try this? we could try this?
Homepage: https://github.com/sylinrl/TruthfulQA Homepage: https://github.com/sylinrl/TruthfulQA
@misc{lin2021truthfulqa,
title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
author={Stephanie Lin and Jacob Hilton and Owain Evans},
year={2021},
eprint={2109.07958},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
""" """
import csv import csv
import json import json
...@@ -40,6 +31,18 @@ from ..metrics import mean ...@@ -40,6 +31,18 @@ from ..metrics import mean
from datasets import load_metric from datasets import load_metric
_CITATION = """
@misc{lin2021truthfulqa,
title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
author={Stephanie Lin and Jacob Hilton and Owain Evans},
year={2021},
eprint={2109.07958},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
# The default QA preset prompt for all models. # The default QA preset prompt for all models.
QA_PROMPT = ( QA_PROMPT = (
"Q: What is human life expectancy in the United States?\n" "Q: What is human life expectancy in the United States?\n"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment