squad.py 7.12 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
"""
Know What You Don’t Know: Unanswerable Questions for SQuAD
https://arxiv.org/pdf/1806.03822.pdf

Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset,
consisting of questions posed by crowdworkers on a set of Wikipedia articles,
where the answer to every question is a segment of text, or span, from the
corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable
questions written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but
also determine when no answer is supported by the paragraph and abstain from answering.

Homepage: https://rajpurkar.github.io/SQuAD-explorer/
"""
16
import datasets
17
from math import exp
Jonathan Tow's avatar
Jonathan Tow committed
18
from lm_eval.base import rf, Task
Leo Gao's avatar
Leo Gao committed
19
from functools import partial
Leo Gao's avatar
Leo Gao committed
20
from packaging import version
Leo Gao's avatar
Leo Gao committed
21
22


23
24
25
26
27
28
29
30
31
32
33
34
_CITATION = """
@misc{rajpurkar2018know,
    title={Know What You Don't Know: Unanswerable Questions for SQuAD}, 
    author={Pranav Rajpurkar and Robin Jia and Percy Liang},
    year={2018},
    eprint={1806.03822},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
"""


Leo Gao's avatar
Leo Gao committed
35
36
37
38
39
40
41
42
43
44
def _squad_metric(predictions, references):
    squad_metric = datasets.load_metric("squad_v2")
    return squad_metric.compute(predictions=predictions, references=references)


def _squad_agg(key, items):
    predictions, references = zip(*items)

    return _squad_metric(predictions=predictions, references=references)[key]

Charles Foster's avatar
Charles Foster committed
45

Jonathan Tow's avatar
Jonathan Tow committed
46
class SQuAD2(Task):
Leo Gao's avatar
Leo Gao committed
47
    VERSION = 1
Charles Foster's avatar
Charles Foster committed
48
49
50
    DATASET_PATH = "squad_v2"
    DATASET_NAME = None

Leo Gao's avatar
Leo Gao committed
51
52
53
    # HF changed squad on us so we have to make sure we aren't running the old one
    assert version.parse(datasets.__version__) >= version.parse("1.11.0"), "datasets v1.11.0 or later required for SQuAD"

Charles Foster's avatar
Charles Foster committed
54
55
56
57
58
59
    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

60
61
62
    def has_test_docs(self):
        return False

Charles Foster's avatar
Charles Foster committed
63
    def training_docs(self):
Jonathan Tow's avatar
Jonathan Tow committed
64
        return self.dataset["train"]
Charles Foster's avatar
Charles Foster committed
65
66

    def validation_docs(self):
Jonathan Tow's avatar
Jonathan Tow committed
67
        return self.dataset["validation"]
Charles Foster's avatar
Charles Foster committed
68

69
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
70
        return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:'
71
72
73
74
75
76
77

    def doc_to_target(self, doc):
        answer_list = doc['answers']['text']
        if len(answer_list) > 0:
            answer = answer_list[0]
        else:
            answer = 'unanswerable'
78
        return " " + answer
Charles Foster's avatar
Charles Foster committed
79

Leo Gao's avatar
Leo Gao committed
80
81
82
    def construct_requests(self, doc, ctx):
        """ Uses RequestFactory to construct Requests and returns an iterable of 
        Requests which will be sent to the LM.
83

Leo Gao's avatar
Leo Gao committed
84
85
86
87
88
89
90
        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural 
            language description, as well as the few shot examples, and the question
            part of the document for `doc`. 
        """
91
        continuation = rf.greedy_until(ctx, ['\n'])
92
        is_unanswerable = rf.loglikelihood(ctx, " " + "unanswerable")
93
        return continuation, is_unanswerable
Leo Gao's avatar
Leo Gao committed
94
95
96
97
98
99
100
101
102
103
104
    
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a 
        dict where keys are the names of submetrics and values are the values of 
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
Leo Gao's avatar
Leo Gao committed
105
        continuation, (logprob_unanswerable, _) = results
106

107
108
        no_answer_probability = exp(logprob_unanswerable)
        
Leo Gao's avatar
Leo Gao committed
109
        predictions = {
110
            'id': doc['id'],
Leo Gao's avatar
Leo Gao committed
111
            'prediction_text': continuation,
112
            'no_answer_probability': no_answer_probability,
Leo Gao's avatar
Leo Gao committed
113
        }
114

Leo Gao's avatar
Leo Gao committed
115
        references = {
116
117
            'id': doc['id'],
            'answers': doc['answers'],
Leo Gao's avatar
Leo Gao committed
118
        }
119

Leo Gao's avatar
Leo Gao committed
120
121
122
123
124
125
126
127
128
129
        return { 
            'exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
            'f1': (predictions, references), #  The F-score of predicted tokens versus the gold answer
            'HasAns_exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
            'HasAns_f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
            'NoAns_exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
            'NoAns_f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
            'best_exact': (predictions, references), # Best exact match (with varying threshold)
            'best_f1': (predictions, references), # Best F1 (with varying threshold)
        }
Leo Gao's avatar
Leo Gao committed
130
131
132
133
134
135
136

    def aggregation(self):
        """
        :returns: {str: [float] -> float}
            A dictionary where keys are the names of submetrics and values are 
            functions that aggregate a list of metrics
        """
137
        return { 
Leo Gao's avatar
Leo Gao committed
138
139
140
141
142
143
144
145
            'exact': partial(_squad_agg, 'exact'), # Exact match (the normalized answer exactly match the gold answer)
            'f1': partial(_squad_agg, 'f1'), #  The F-score of predicted tokens versus the gold answer
            'HasAns_exact': partial(_squad_agg, 'HasAns_exact'), # Exact match (the normalized answer exactly match the gold answer)
            'HasAns_f1': partial(_squad_agg, 'HasAns_f1'), # The F-score of predicted tokens versus the gold answer
            'NoAns_exact': partial(_squad_agg, 'NoAns_exact'), # Exact match (the normalized answer exactly match the gold answer)
            'NoAns_f1': partial(_squad_agg, 'NoAns_f1'), # The F-score of predicted tokens versus the gold answer
            'best_exact': partial(_squad_agg, 'best_exact'), # Best exact match (with varying threshold)
            'best_f1': partial(_squad_agg, 'best_f1'), # Best F1 (with varying threshold)
146
        }
Leo Gao's avatar
Leo Gao committed
147
148
149
150
151
152
153

    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are 
            whether a higher value of the submetric is better
        """
154
155
156
157
158
159
160
161
162
163
        return { 
            'exact': True, # Exact match (the normalized answer exactly match the gold answer)
            'f1': True, #  The F-score of predicted tokens versus the gold answer
            'HasAns_exact': True, # Exact match (the normalized answer exactly match the gold answer)
            'HasAns_f1': True, # The F-score of predicted tokens versus the gold answer
            'NoAns_exact': True, # Exact match (the normalized answer exactly match the gold answer)
            'NoAns_f1': True, # The F-score of predicted tokens versus the gold answer
            'best_exact': True, # Best exact match (with varying threshold)
            'best_f1': True, # Best F1 (with varying threshold)
        }