qasper.py 7.13 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
""" 
A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers
https://arxiv.org/abs/2105.03011

@article{DBLP:journals/corr/abs-2105-03011,
  author    = {Pradeep Dasigi and
               Kyle Lo and
               Iz Beltagy and
               Arman Cohan and
               Noah A. Smith and
               Matt Gardner},
  title     = {A Dataset of Information-Seeking Questions and Answers Anchored in
               Research Papers},
  journal   = {CoRR},
  volume    = {abs/2105.03011},
  year      = {2021},
  url       = {https://arxiv.org/abs/2105.03011},
  eprinttype = {arXiv},
  eprint    = {2105.03011},
  timestamp = {Fri, 14 May 2021 12:13:30 +0200},
  biburl    = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
25
from collections import Counter
26
from math import exp
27
28
import re
import string
29
from lm_eval.base import rf
30
from lm_eval.metrics import f1_score, mean
31
32
33
from .common import HFTask


34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
def normalize_answer(s):
    """
    Taken from the official evaluation script for v1.1 of the SQuAD dataset.
    Lower text and remove punctuation, articles and extra whitespace.
    """

    def remove_articles(text):
        return re.sub(r"\b(a|an|the)\b", " ", text)

    def white_space_fix(text):
        return " ".join(text.split())

    def remove_punc(text):
        exclude = set(string.punctuation)
        return "".join(ch for ch in text if ch not in exclude)

    def lower(text):
        return text.lower()

    return white_space_fix(remove_articles(remove_punc(lower(s))))


def categorise_answer(answer_blob):
    if answer_blob["unanswerable"]:
        answer = "unanswerable"
        answer_type = "unanswerable"
        return answer, answer_type
    elif answer_blob["yes_no"]:
        answer = "Yes"
        answer_type = "bool"
        return answer, answer_type
    elif answer_blob["free_form_answer"]:
        answer = answer_blob["free_form_answer"]
        answer_type = "free form answer"
        return answer, answer_type
    elif answer_blob["extractive_spans"]:
        answer = answer_blob["extractive_spans"]
        answer_type = "extractive spans"
        return answer, answer_type
    elif answer_blob["yes_no"] is False:
        answer = "No"
        answer_type = "bool"
        return answer, answer_type


def token_f1_score(prediction, ground_truth):
    """
    Taken from the official evaluation script for v1.1 of the SQuAD dataset.
    """
    prediction_tokens = normalize_answer(prediction).split()
    ground_truth_tokens = normalize_answer(ground_truth).split()
    common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
    num_same = sum(common.values())
    if num_same == 0:
        return 0
    precision = 1.0 * num_same / len(prediction_tokens)
    recall = 1.0 * num_same / len(ground_truth_tokens)
    f1 = (2 * precision * recall) / (precision + recall)
    return f1


def paragraph_f1_score(prediction, ground_truth):
    num_same = len(set(ground_truth).intersection(set(prediction)))
    if num_same == 0:
        return 0.0
    precision = num_same / len(prediction)
    recall = num_same / len(ground_truth)
    f1 = (2 * precision * recall) / (precision + recall)
    return f1


105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
class QASPER(HFTask):
    VERSION = 0
    DATASET_PATH = "qasper"
    DATASET_NAME = None

    def doc_to_text(self, doc):
        # this method is invoked by tests only
        return (
            "TITLE: "
            + doc["title"]
            + "\n"
            + "ABSTRACT: "
            + doc["abstract"]
            + "\n\n"
            + "Q: "
            + doc["question"]
            + "\n\n"
            + "A: "
        )

    def doc_to_target(self, doc):
        # this method is invoked by tests only
127
        return " " + doc["answer"]
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143

    def training_docs(self):
        for doc in self.data["train"]:
            yield from self.process_doc(doc)

    def validation_docs(self):
        for doc in self.data["train"]:
            yield from self.process_doc(doc)

    def process_doc(self, doc):
        """Given a `doc`, flatten it out so that each JSON blob
        contains exactly one question and one answer. Logic taken from
        the reference implementation available at
        https://github.com/allenai/qasper-led-baseline/blob/main/scripts/evaluator.py
        """
        obs_list = []
144
145
146
147
148
149
150
151
152
153
154
155
        for question, answer_list in zip(doc["qas"]["question"], doc["qas"]["answers"]):
            for answer_blob in answer_list["answer"]:
                answer, answer_type = categorise_answer(answer_blob)
                obs_list.append(
                    {
                        "title": doc["title"],
                        "abstract": doc["abstract"],
                        "question": question,
                        "answer": answer,
                        "answer_type": answer_type,
                    }
                )
156
157
158
        return obs_list

    def process_results(self, doc, results):
159
160
161
162
163
        res, unanswerable = results
        res_dict = {}

        # Handle unanswerability first
        unanswerable_gold = doc["answer_type"] == "unanswerable"
164
        unanswerable_pred = exp(unanswerable) > 1 - exp(unanswerable)
165
166
167
168
169
170
171
172
173
174
175
        res_dict["f1_un"] = (unanswerable_gold, unanswerable_pred)

        # Handle yes/no questions
        if doc["answer_type"] == "bool":
            ll_yes, ll_no = res
            gold = 1 if doc["answer"] == "yes" else 0
            pred = ll_yes > ll_no
            res_dict["f1_yn"] = (gold, pred)

        # Handle completions
        if doc["answer_type"] == "free form answer":
176
177
178
179
180
            res_dict["f1_ab"] = token_f1_score(res["answer"], doc["answer"])

        # Handle extraction
        if doc["answer_type"] == "extractive spans":
            res_dict["f1_ex"] = paragraph_f1_score(res["answer"], doc["answer"])
181
182
183
        return res_dict

    def aggregation(self):
184
        return {"f1_un": f1_score, "f1_yn": f1_score, "f1_ab": mean, "f1_ex": mean}
185
186
187
188
189
190
191
192
193
194
195
196

    def construct_requests(self, doc, ctx):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        """
197
198
        unanswerable = rf.loglikelihood(ctx, " " + "unanswerable")
        if doc["answer_type"] in ("free form answer", "extractive spans"):
199
            return rf.greedy_until(ctx, ["\n"]), unanswerable
200
201
202
        elif doc["answer_type"] in ("bool"):
            ll_yes, _ = rf.loglikelihood(ctx, " yes")
            ll_no, _ = rf.loglikelihood(ctx, " no")
203
204
205
206
207
208
209
210
211
212
213
            return ll_yes, ll_no, unanswerable
        else:
            return unanswerable

    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        return {"f1_un": True, "f1_yn": True, "f1_ab": True, "f1_ex": True}