squad.py 7.17 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
"""
Know What You Don’t Know: Unanswerable Questions for SQuAD
https://arxiv.org/pdf/1806.03822.pdf

Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset,
consisting of questions posed by crowdworkers on a set of Wikipedia articles,
where the answer to every question is a segment of text, or span, from the
corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable
questions written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but
also determine when no answer is supported by the paragraph and abstain from answering.

Homepage: https://rajpurkar.github.io/SQuAD-explorer/

@misc{rajpurkar2018know,
      title={Know What You Don't Know: Unanswerable Questions for SQuAD}, 
      author={Pranav Rajpurkar and Robin Jia and Percy Liang},
      year={2018},
      eprint={1806.03822},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}
"""
25
import datasets
26
27
28
from math import exp
from lm_eval.base import rf
from lm_eval.metrics import f1_score, mean
29
from . common import HFTask
Leo Gao's avatar
Leo Gao committed
30
from functools import partial
Leo Gao's avatar
Leo Gao committed
31
from packaging import version
Leo Gao's avatar
Leo Gao committed
32
33
34
35
36
37
38
39
40
41
42
43


def _squad_metric(predictions, references):
    squad_metric = datasets.load_metric("squad_v2")
    return squad_metric.compute(predictions=predictions, references=references)


def _squad_agg(key, items):
    predictions, references = zip(*items)

    return _squad_metric(predictions=predictions, references=references)[key]

Charles Foster's avatar
Charles Foster committed
44

Leo Gao's avatar
Leo Gao committed
45
class SQuAD2(HFTask):
Leo Gao's avatar
Leo Gao committed
46
    VERSION = 1
Charles Foster's avatar
Charles Foster committed
47
48
49
    DATASET_PATH = "squad_v2"
    DATASET_NAME = None

Leo Gao's avatar
Leo Gao committed
50
51
52
    # HF changed squad on us so we have to make sure we aren't running the old one
    assert version.parse(datasets.__version__) >= version.parse("1.11.0"), "datasets v1.11.0 or later required for SQuAD"

Charles Foster's avatar
Charles Foster committed
53
54
55
56
57
58
    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

59
60
61
    def has_test_docs(self):
        return False

Charles Foster's avatar
Charles Foster committed
62
    def training_docs(self):
63
        return self.data["train"]
Charles Foster's avatar
Charles Foster committed
64
65

    def validation_docs(self):
66
        return self.data["validation"]
Charles Foster's avatar
Charles Foster committed
67

68
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
69
        return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:'
70
71
72
73
74
75
76

    def doc_to_target(self, doc):
        answer_list = doc['answers']['text']
        if len(answer_list) > 0:
            answer = answer_list[0]
        else:
            answer = 'unanswerable'
77
        return " " + answer
Charles Foster's avatar
Charles Foster committed
78

Leo Gao's avatar
Leo Gao committed
79
80
81
    def construct_requests(self, doc, ctx):
        """ Uses RequestFactory to construct Requests and returns an iterable of 
        Requests which will be sent to the LM.
82

Leo Gao's avatar
Leo Gao committed
83
84
85
86
87
88
89
        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural 
            language description, as well as the few shot examples, and the question
            part of the document for `doc`. 
        """
90
        continuation = rf.greedy_until(ctx, ['\n'])
91
        is_unanswerable = rf.loglikelihood(ctx, " " + "unanswerable")
92
        return continuation, is_unanswerable
Leo Gao's avatar
Leo Gao committed
93
94
95
96
97
98
99
100
101
102
103
    
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a 
        dict where keys are the names of submetrics and values are the values of 
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
Leo Gao's avatar
Leo Gao committed
104
        continuation, (logprob_unanswerable, _) = results
105

106
107
        no_answer_probability = exp(logprob_unanswerable)
        
Leo Gao's avatar
Leo Gao committed
108
        predictions = {
109
            'id': doc['id'],
Leo Gao's avatar
Leo Gao committed
110
            'prediction_text': continuation,
111
            'no_answer_probability': no_answer_probability,
Leo Gao's avatar
Leo Gao committed
112
        }
113

Leo Gao's avatar
Leo Gao committed
114
        references = {
115
116
            'id': doc['id'],
            'answers': doc['answers'],
Leo Gao's avatar
Leo Gao committed
117
        }
118

Leo Gao's avatar
Leo Gao committed
119
120
121
122
123
124
125
126
127
128
        return { 
            'exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
            'f1': (predictions, references), #  The F-score of predicted tokens versus the gold answer
            'HasAns_exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
            'HasAns_f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
            'NoAns_exact': (predictions, references), # Exact match (the normalized answer exactly match the gold answer)
            'NoAns_f1': (predictions, references), # The F-score of predicted tokens versus the gold answer
            'best_exact': (predictions, references), # Best exact match (with varying threshold)
            'best_f1': (predictions, references), # Best F1 (with varying threshold)
        }
Leo Gao's avatar
Leo Gao committed
129
130
131
132
133
134
135

    def aggregation(self):
        """
        :returns: {str: [float] -> float}
            A dictionary where keys are the names of submetrics and values are 
            functions that aggregate a list of metrics
        """
136
        return { 
Leo Gao's avatar
Leo Gao committed
137
138
139
140
141
142
143
144
            'exact': partial(_squad_agg, 'exact'), # Exact match (the normalized answer exactly match the gold answer)
            'f1': partial(_squad_agg, 'f1'), #  The F-score of predicted tokens versus the gold answer
            'HasAns_exact': partial(_squad_agg, 'HasAns_exact'), # Exact match (the normalized answer exactly match the gold answer)
            'HasAns_f1': partial(_squad_agg, 'HasAns_f1'), # The F-score of predicted tokens versus the gold answer
            'NoAns_exact': partial(_squad_agg, 'NoAns_exact'), # Exact match (the normalized answer exactly match the gold answer)
            'NoAns_f1': partial(_squad_agg, 'NoAns_f1'), # The F-score of predicted tokens versus the gold answer
            'best_exact': partial(_squad_agg, 'best_exact'), # Best exact match (with varying threshold)
            'best_f1': partial(_squad_agg, 'best_f1'), # Best F1 (with varying threshold)
145
        }
Leo Gao's avatar
Leo Gao committed
146
147
148
149
150
151
152

    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are 
            whether a higher value of the submetric is better
        """
153
154
155
156
157
158
159
160
161
162
        return { 
            'exact': True, # Exact match (the normalized answer exactly match the gold answer)
            'f1': True, #  The F-score of predicted tokens versus the gold answer
            'HasAns_exact': True, # Exact match (the normalized answer exactly match the gold answer)
            'HasAns_f1': True, # The F-score of predicted tokens versus the gold answer
            'NoAns_exact': True, # Exact match (the normalized answer exactly match the gold answer)
            'NoAns_f1': True, # The F-score of predicted tokens versus the gold answer
            'best_exact': True, # Best exact match (with varying threshold)
            'best_f1': True, # Best F1 (with varying threshold)
        }