coqa.py 5.76 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
"""
CoQA: A Conversational Question Answering Challenge
https://arxiv.org/pdf/1808.07042.pdf

CoQA is a large-scale dataset for building Conversational Question Answering 
systems. The goal of the CoQA challenge is to measure the ability of machines to 
understand a text passage and answer a series of interconnected questions that 
appear in a conversation.

Homepage: https://stanfordnlp.github.io/coqa/
11
12
"""
import json
Jonathan Tow's avatar
Jonathan Tow committed
13
import inspect
14
import transformers.data.metrics.squad_metrics as squad_metrics
Jonathan Tow's avatar
Jonathan Tow committed
15
import lm_eval.datasets.coqa.coqa
16
17
from lm_eval.base import Task, rf, mean
from itertools import zip_longest
Jonathan Tow's avatar
Jonathan Tow committed
18

19

20
_CITATION = """
21
22
23
24
25
26
27
28
29
@misc{reddy2018coqa,
    title={CoQA: A Conversational Question Answering Challenge},
    author={Siva Reddy and Danqi Chen and Christopher D. Manning},
    year={2018},
    eprint={1808.07042},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
"""
30

31

32
class CoQA(Task):
Leo Gao's avatar
Leo Gao committed
33
    VERSION = 1
Jonathan Tow's avatar
Jonathan Tow committed
34
35
    DATASET_PATH = inspect.getfile(lm_eval.datasets.coqa.coqa)
    DATASET_NAME = None
sdtblck's avatar
sdtblck committed
36

37
38
39
40
    def has_training_docs(self):
        return True

    def has_validation_docs(self):
Anish Thite's avatar
Anish Thite committed
41
        return True
Jason Phang's avatar
Jason Phang committed
42
43
44
45

    def has_test_docs(self):
        return False

46
    def training_docs(self):
Jonathan Tow's avatar
Jonathan Tow committed
47
        return self.dataset["train"]
48
49

    def validation_docs(self):
Jonathan Tow's avatar
Jonathan Tow committed
50
        return self.dataset["validation"]
51
52

    def test_docs(self):
Leo Gao's avatar
Leo Gao committed
53
        pass
54

Leo Gao's avatar
Leo Gao committed
55
    def doc_to_text(self, doc):
thefazzer's avatar
thefazzer committed
56
57
        # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1} 
        # and a question qi, the task is to predict the answer ai
58
        doc_text = doc["story"] + '\n\n'
Jonathan Tow's avatar
Jonathan Tow committed
59
60
61
        for (q, a) in zip_longest(doc["questions"]["input_text"], doc["answers"]["input_text"][:-1]):   # omit target answer ai
            question = f"Q: {q}\n\n"
            answer = f"A: {a}\n\n" if a is not None else "A:"
62
63
            doc_text += question + answer
        return doc_text
thefazzer's avatar
thefazzer committed
64
        
65
66
    @classmethod
    def get_answers(cls, doc, turn_id):
thefazzer's avatar
thefazzer committed
67
        # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers).
68
        answers = []
Jonathan Tow's avatar
Jonathan Tow committed
69
        answer_forturn = doc["answers"]["input_text"][turn_id - 1]
70
71
        answers.append(answer_forturn)
        
thefazzer's avatar
thefazzer committed
72
73
74
        additional_answers = doc.get("additional_answers")
        if additional_answers:
            for key in additional_answers:
Jonathan Tow's avatar
Jonathan Tow committed
75
                additional_answer_for_turn = additional_answers[key]["input_text"][turn_id - 1]
thefazzer's avatar
thefazzer committed
76
                if additional_answer_for_turn.lower() not in map(str.lower, answers):
77
78
                    answers.append(additional_answer_for_turn)
        return answers
thefazzer's avatar
thefazzer committed
79
    
thefazzer's avatar
thefazzer committed
80
81
82
83
84
85
86
87
88
89
90
91
92
    @classmethod
    def get_answer_choice(self, raw_text):
        # Function maps answers to CoQA answer categories
        # ~ 1/5 of the CoQA answers are Yes/No 
        # ~ 2/3 of the CoQA answers are span-based
        # (answers overlap with the passage ignoring punctuation and case mismatch)
        if raw_text == "unknown":
            return '0'
        if squad_metrics.normalize_answer(raw_text) == "yes":
            return '1'
        if squad_metrics.normalize_answer(raw_text) == "no":
            return '2'
        return '3' # Not a yes/no question
Leo Gao's avatar
Leo Gao committed
93

94
95
    @staticmethod
    def compute_scores(gold_list, pred):
thefazzer's avatar
thefazzer committed
96
97
        # tests for exact match and on the normalised answer (compute_exact)
        # test for overlap (compute_f1)
98
99
100
101
102
        f1_sum = 0.0
        em_sum = 0.0
        if len(gold_list) > 1:
            for i in range(len(gold_list)):
                gold_answers = gold_list[0:i] + gold_list[i + 1:]
thefazzer's avatar
thefazzer committed
103
                # predictions compared against (n) golds and take maximum
104
105
106
107
108
109
110
111
                em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers)
                f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers)
        else:
            em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
            f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list)

        return {'em': em_sum / max(1, len(gold_list)), 'f1': f1_sum / max(1, len(gold_list))}

thefazzer's avatar
thefazzer committed
112
113
114
    def doc_to_target(self, doc, turnid=None):
        # Default to prediction of last turn.
        if turnid is None:
Jonathan Tow's avatar
Jonathan Tow committed
115
116
            turnid = len(doc["questions"]["input_text"])
        raw_text = doc['answers']["input_text"][turnid - 1]
Leo Gao's avatar
Leo Gao committed
117
        return " " + raw_text
thefazzer's avatar
thefazzer committed
118

Leo Gao's avatar
Leo Gao committed
119
120
121
122
123
124
125
126
127
128
129
    def construct_requests(self, doc, ctx):
        """ Uses RequestFactory to construct Requests and returns an iterable of 
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural 
            language description, as well as the few shot examples, and the question
            part of the document for `doc`. 
        """
130
        cont_request = rf.greedy_until(ctx, ['\nQ:'])
131
        return cont_request
thefazzer's avatar
thefazzer committed
132

Leo Gao's avatar
Leo Gao committed
133
134
135
136
137
138
139
140
141
142
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a 
        dict where keys are the names of submetrics and values are the values of 
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
Jonathan Tow's avatar
Jonathan Tow committed
143
        turn_id = len(doc["questions"]["input_text"])
144
        gold_list = self.get_answers(doc, turn_id)
145
        pred = results[0].strip().split('\n')[0]
146

thefazzer's avatar
thefazzer committed
147
        scores = self.compute_scores(gold_list, pred)
148

thefazzer's avatar
thefazzer committed
149
        return {
thefazzer's avatar
thefazzer committed
150
151
            "f1": scores['f1'],
            "em": scores['em'],
thefazzer's avatar
thefazzer committed
152
        }
153
154

    def higher_is_better(self):
155
        return {
156
157
            "f1": True,
            "em": True,
158
        }
Leo Gao's avatar
Leo Gao committed
159

160
    def aggregation(self):
161
        return {
162
163
            "f1": mean,
            "em": mean,
Leo Gao's avatar
Leo Gao committed
164
        }