superglue.py 13 KB
Newer Older
Jason Phang's avatar
Jason Phang committed
1
2
3
4
5
"""
To-do:
    - WSC requires free-form generation
    - ReCoRD
"""
Jason Phang's avatar
Jason Phang committed
6
import numpy as np
7
8
import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics
Jason Phang's avatar
Jason Phang committed
9
from . common import HFTask, yesno
&'s avatar
& committed
10
11
from lm_eval.base import rf
from ..metrics import mean, acc_all, metric_max_over_ground_truths
Leo Gao's avatar
Fix  
Leo Gao committed
12
from ..utils import general_detokenize
Jason Phang's avatar
Jason Phang committed
13

Jason Phang's avatar
Jason Phang committed
14

15
class BoolQ(HFTask):
Leo Gao's avatar
Leo Gao committed
16
17
    DATASET_PATH = "super_glue"
    DATASET_NAME = "boolq"
Jason Phang's avatar
Jason Phang committed
18
19
20
21
22
23
24
25

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
26
        return False
Jason Phang's avatar
Jason Phang committed
27
28

    def fewshot_description(self):
29
        # TODO: figure out actual description
Jason Phang's avatar
Jason Phang committed
30
31
        return "Read the following passages and answer each question with a yes or a no."

Leo Gao's avatar
Update  
Leo Gao committed
32
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
33
        return f"{doc['passage']}\nQuestion: {doc['question']}\nAnswer:"
Leo Gao's avatar
Update  
Leo Gao committed
34
35
    
    def doc_to_target(self, doc):
36
        return " " + yesno(doc['label']) 
Jason Phang's avatar
Jason Phang committed
37

38
    def construct_requests(self, doc, ctx):
Leo Gao's avatar
Update  
Leo Gao committed
39

40
        ll_yes, _ = rf.loglikelihood(ctx, ' yes')
Jason Phang's avatar
Jason Phang committed
41
        ll_no, _ = rf.loglikelihood(ctx, ' no')
Leo Gao's avatar
Update  
Leo Gao committed
42
43
44
45
46
47
48
49
50

        return ll_yes, ll_no

    def process_results(self, doc, results):
        ll_yes, ll_no = results
        gold = doc["label"]

        acc = 1. if (ll_yes > ll_no) == gold else 0.

51
52
53
54
55
56
57
58
59
60
61
62
63
        return {
            "acc": acc
        }
    
    def higher_is_better(self):
        return {
            "acc": True
        }
    
    def aggregation(self):
        return {
            "acc": mean
        }
Jason Phang's avatar
Jason Phang committed
64

Jason Phang's avatar
Jason Phang committed
65

66
class CommitmentBank(HFTask):
Leo Gao's avatar
Leo Gao committed
67
68
    DATASET_PATH = "super_glue"
    DATASET_NAME = "cb"
Jason Phang's avatar
Jason Phang committed
69
70
71
72
73
74
75
76

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
77
        return False
Jason Phang's avatar
Jason Phang committed
78

thefazzer's avatar
thefazzer committed
79
    def fewshot_description(self):
80
        # TODO: figure out actual description
Jason Phang's avatar
Jason Phang committed
81
82
        return "Given a premise and a hypothesis, classify whether the author of the premise is committed" \
            "to the truth of the hypothesis. The three possible labels are true, false or neither."
thefazzer's avatar
thefazzer committed
83

84
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
85
        return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format(
Jason Phang's avatar
Jason Phang committed
86
87
88
            doc["premise"],
            doc["hypothesis"],
        )
89

thefazzer's avatar
thefazzer committed
90
    def doc_to_target(self, doc):
91
92
93
        # True = entailment
        # False = contradiction
        # Neither = neutral
Leo Gao's avatar
Leo Gao committed
94
        return " {}".format({0: "True", 1: "Neither", 2: "False"}[doc["label"]])
Jason Phang's avatar
Jason Phang committed
95

thefazzer's avatar
thefazzer committed
96
    def construct_requests(self, doc, ctx):
Leo Gao's avatar
Leo Gao committed
97
98
99
        ll_true, _ = rf.loglikelihood(ctx, ' True')
        ll_neither, _ = rf.loglikelihood(ctx, ' Neither')
        ll_false, _ = rf.loglikelihood(ctx, ' False')
100

thefazzer's avatar
thefazzer committed
101
102
103
104
        return ll_true, ll_neither, ll_false

    def process_results(self, doc, results):
        gold = doc["label"]
thefazzer's avatar
thefazzer committed
105
106
        pred = np.argmax(results)
        acc = 1. if pred == gold else 0.
Jason Phang's avatar
Jason Phang committed
107

thefazzer's avatar
thefazzer committed
108
        return {
thefazzer's avatar
thefazzer committed
109
110
            "acc": acc,
            "f1": (pred, gold)
thefazzer's avatar
thefazzer committed
111
112
113
114
        }
    
    def higher_is_better(self):
        return {
115
116
            "acc": True,
            "f1": True
thefazzer's avatar
thefazzer committed
117
        }
Jason Phang's avatar
Jason Phang committed
118
119
120
121
122
123
124
125
126
127
128

    @classmethod
    def cb_multi_fi(cls, items):
        preds, golds = zip(*items)
        preds = np.array(preds)
        golds = np.array(golds)
        f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0)
        f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1)
        f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2)
        avg_f1 = mean([f11, f12, f13])
        return avg_f1
thefazzer's avatar
thefazzer committed
129
130
131
    
    def aggregation(self):
        return {
thefazzer's avatar
thefazzer committed
132
            "acc": mean,
Jason Phang's avatar
Jason Phang committed
133
            "f1": self.cb_multi_fi,
thefazzer's avatar
thefazzer committed
134
        }
Jason Phang's avatar
Jason Phang committed
135

Jason Phang's avatar
Jason Phang committed
136

137
class Copa(HFTask):
Leo Gao's avatar
Leo Gao committed
138
139
    DATASET_PATH = "super_glue"
    DATASET_NAME = "copa"
Jason Phang's avatar
Jason Phang committed
140
141
142
143
144
145
146
147

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
148
        return False
Jason Phang's avatar
Jason Phang committed
149

thefazzer's avatar
thefazzer committed
150
    def fewshot_description(self):
151
        # TODO: figure out actual description
Jason Phang's avatar
Jason Phang committed
152
153
        return "Given a premise and one alternative with a causal relation to the premise and another without," \
            "choose the more plausible alternative"
thefazzer's avatar
thefazzer committed
154

155
    def doc_to_text(self, doc):
Jason Phang's avatar
Jason Phang committed
156
        # Drop the period
Jason Phang's avatar
Jason Phang committed
157
158
159
160
        connector = {
            "cause": "because",
            "effect": "therefore",
        }[doc["question"]]
161
        return doc["premise"].strip()[:-1] + f" {connector}"
Jason Phang's avatar
Jason Phang committed
162

thefazzer's avatar
thefazzer committed
163
    def doc_to_target(self, doc):
164
165
        correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]
        # Connect the sentences
166
        return " " + self.convert_choice(correct_choice)
thefazzer's avatar
thefazzer committed
167
168

    def construct_requests(self, doc, ctx):
thefazzer's avatar
thefazzer committed
169
170
        choice1 = " " + self.convert_choice(doc["choice1"])
        choice2 = " " + self.convert_choice(doc["choice2"])
thefazzer's avatar
thefazzer committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        
        ll_choice1, _ = rf.loglikelihood(ctx, choice1)
        ll_choice2, _ = rf.loglikelihood(ctx, choice2)

        return ll_choice1, ll_choice2

    def process_results(self, doc, results):
        gold = doc["label"]
        pred = np.argmax(results)
        acc = 1. if pred == gold else 0.

        return {
            "acc": acc
        }
    
    def higher_is_better(self):
        return {
            "acc": True
        }
    
    def aggregation(self):
        return {
            "acc": mean
        }
Jason Phang's avatar
Jason Phang committed
195
196
197
198
199
200

    @staticmethod
    def convert_choice(choice):
        return choice[0].lower() + choice[1:]


201
class MultiRC(HFTask):
Leo Gao's avatar
Leo Gao committed
202
203
    DATASET_PATH = "super_glue"
    DATASET_NAME = "multirc"
Jason Phang's avatar
multirc  
Jason Phang committed
204
205
206
207
208
209
210
211

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
212
        return False
Jason Phang's avatar
multirc  
Jason Phang committed
213
214

    def fewshot_description(self):
215
        # TODO: figure out actual description
Jason Phang's avatar
multirc  
Jason Phang committed
216
217
        return "READING COMPREHENSION ANSWER KEY"

218
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
219
        return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:"
220
221

    def doc_to_target(self, doc):
Leo Gao's avatar
Leo Gao committed
222
        return " " + self.format_answer(answer=doc["answer"], label=doc["label"])
Jason Phang's avatar
multirc  
Jason Phang committed
223
224
225

    @staticmethod
    def format_answer(answer, label):
Leo Gao's avatar
Fix  
Leo Gao committed
226
        label_str = "yes" if label else "no"
Leo Gao's avatar
Leo Gao committed
227
        return f"{label_str}, {answer}"
Jason Phang's avatar
multirc  
Jason Phang committed
228

thefazzer's avatar
thefazzer committed
229
230
231
232
233
234
235
236
237
238
239
    def construct_requests(self, doc, ctx):
        true_choice = self.format_answer(answer=doc["answer"], label=True)
        false_choice = self.format_answer(answer=doc["answer"], label=False)
        
        ll_true_choice, _ = rf.loglikelihood(ctx, f' {true_choice}')
        ll_false_choice, _ = rf.loglikelihood(ctx, f' {false_choice}')

        return ll_true_choice, ll_false_choice

    def process_results(self, doc, results):
        pred = np.argmax(results)
Jason Phang's avatar
multirc  
Jason Phang committed
240
        return {
thefazzer's avatar
thefazzer committed
241
242
243
244
245
246
247
248
249
250
251
            "acc": (pred, doc)
        }
    
    def higher_is_better(self):
        return {
            "acc": True
        }
    
    def aggregation(self):
        return {
            "acc": acc_all
Jason Phang's avatar
multirc  
Jason Phang committed
252
253
        }

Jason Phang's avatar
Jason Phang committed
254
255
256
257
258
259
260
261
262
263
264
265

class ReCoRD(HFTask):
    DATASET_PATH = "super_glue"
    DATASET_NAME = "record"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
Leo Gao's avatar
Leo Gao committed
266
        return False
Jason Phang's avatar
Jason Phang committed
267

268
269
270
271
    def fewshot_description(self):
        # TODO: figure out actual description
        return ""

Jason Phang's avatar
Jason Phang committed
272
273
274
    def training_docs(self):
        # In ReCoRD, each doc manifests multiple "examples" in the context of few shot example packing.
        # Each doc consists of multiple answer candidates, each of which is scored yes/no.
275
276
277
        if self._training_docs is None:
            self._training_docs = []
            for doc in self.data["train"]:
Jason Phang's avatar
Jason Phang committed
278
                self._training_docs.append(self._process_doc(doc))
279
280
281
        return self._training_docs

    def validation_docs(self):
Jason Phang's avatar
Jason Phang committed
282
283
284
285
286
287
288
289
290
291
292
293
        # See: training_docs
        for doc in self.data["validation"]:
            yield self._process_doc(doc)

    @classmethod
    def _process_doc(cls, doc):
        return {
            "passage": doc["passage"],
            "query": doc["query"],
            "entities": sorted(list(set(doc["entities"]))),
            "answers": sorted(list(set(doc["answers"]))),
        }
Jason Phang's avatar
Jason Phang committed
294
295
296
297
298
299
300
301
302
303
304
305
306

    def doc_to_text(self, doc):
        initial_text, *highlights = doc["passage"].strip().split("\n@highlight\n")
        text = initial_text + "\n\n"
        for highlight in highlights:
            text += f"  - {highlight}.\n"
        return text

    @classmethod
    def format_answer(cls, query, entity):
        return f'  - {query}'.replace("@placeholder", entity)

    def doc_to_target(self, doc):
Jason Phang's avatar
Jason Phang committed
307
308
        # We only output the first correct entity in a doc
        return self.format_answer(query=doc["query"], entity=doc["answers"][0])
Jason Phang's avatar
Jason Phang committed
309
310
311
312

    def construct_requests(self, doc, ctx):
        requests = [
            rf.loglikelihood(ctx, self.format_answer(query=doc["query"], entity=entity))
Jason Phang's avatar
Jason Phang committed
313
            for entity in doc["entities"]
Jason Phang's avatar
Jason Phang committed
314
315
316
317
318
319
320
321
        ]
        return requests

    def process_results(self, doc, results):
        # ReCoRD's evaluation is actually deceptively simple:
        # - Pick the maximum likelihood prediction entity
        # - Evaluate the accuracy and token F1 PER EXAMPLE
        # - Average over all examples
Jason Phang's avatar
Jason Phang committed
322
        max_idx = np.argmax(np.array([result[0] for result in results]))
Leo Gao's avatar
Leo Gao committed
323

Jason Phang's avatar
Jason Phang committed
324
        prediction = doc["entities"][max_idx]
Jason Phang's avatar
Jason Phang committed
325
        gold_label_set = doc["answers"]
Jason Phang's avatar
Jason Phang committed
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
        f1 = metric_max_over_ground_truths(squad_metrics.compute_f1, prediction, gold_label_set)
        em = metric_max_over_ground_truths(squad_metrics.compute_exact, prediction, gold_label_set)

        return {
            "f1": f1,
            "em": em,
        }

    def higher_is_better(self):
        return {
            "f1": True,
            "em": True,
        }

    def aggregation(self):
        return {
            "f1": mean,
            "em": mean,
        }


347
class WordsInContext(HFTask):
Leo Gao's avatar
Leo Gao committed
348
349
    DATASET_PATH = "super_glue"
    DATASET_NAME = "wic"
Jason Phang's avatar
Jason Phang committed
350
351
352
353
354
355
356
357

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
358
        return False
Jason Phang's avatar
Jason Phang committed
359

360
361
362
363
    def fewshot_description(self):
        # TODO: figure out actual description
        return ""

364
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
365
366
        return "Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the" \
               " two sentences above?\nAnswer:".format(
Jason Phang's avatar
Jason Phang committed
367
368
369
370
                    doc["sentence1"],
                    doc["sentence2"],
                    doc["sentence1"][doc["start1"]:doc["end1"]],
                )
371
372
373

    def doc_to_target(self, doc):
        return " {}".format({0: "no", 1: "yes"}[doc["label"]])
Jason Phang's avatar
Jason Phang committed
374

Jason Phang's avatar
Jason Phang committed
375
376
377
378
379
    def construct_requests(self, doc, ctx):
        ll_yes, _ = rf.loglikelihood(ctx, ' yes')
        ll_no, _ = rf.loglikelihood(ctx, ' no')

        return ll_yes, ll_no
380

Jason Phang's avatar
Jason Phang committed
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
    def process_results(self, doc, results):
        ll_yes, ll_no = results
        gold = doc["label"]

        acc = 1. if (ll_yes > ll_no) == gold else 0.

        return {
            "acc": acc
        }

    def higher_is_better(self):
        return {
            "acc": True
        }

    def aggregation(self):
        return {
            "acc": mean
        }
Jason Phang's avatar
Jason Phang committed
400
401


402
class SGWinogradSchemaChallenge(HFTask):
Jason Phang's avatar
wsc  
Jason Phang committed
403
404
    # Note: This implementation differs from Fig G.32 because this is the SuperGLUE,
    #       binary version of the task.
Leo Gao's avatar
Leo Gao committed
405
406
    DATASET_PATH = "super_glue"
    DATASET_NAME = "wsc"
Jason Phang's avatar
Jason Phang committed
407
408
409
410
411
412
413
414

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
415
        return False
Jason Phang's avatar
Jason Phang committed
416
417
418
419

    def training_docs(self):
        if self.has_training_docs():
            if self._training_docs is None:
Jason Phang's avatar
Jason Phang committed
420
                # GPT-3 Paper's format only uses positive examples for fewshot "training"
Jason Phang's avatar
Jason Phang committed
421
422
                self._training_docs = [
                    doc for doc in
Jason Phang's avatar
Jason Phang committed
423
                    self.data["train"]
Jason Phang's avatar
Jason Phang committed
424
425
426
427
428
429
430
431
432
433
                    if doc["label"]
                ]
            return self._training_docs

    def fewshot_description(self):
        return "Final Exam with Answer Key\n" \
           "Instructions: Please carefully read the following passages. " \
           "For each passage, you must identify which noun the pronoun marked in *bold*" \
           " refers to.\n====="

434
    def doc_to_text(self, doc):
Jason Phang's avatar
Jason Phang committed
435
        raw_passage = doc["text"]
Jonathan Tow's avatar
Jonathan Tow committed
436
437
438
        # NOTE: HuggingFace span indices are word-based not character-based.
        pre = " ".join(raw_passage.split()[:doc["span2_index"]])
        post = raw_passage[len(pre) + len(doc["span2_text"]) + 1:]
Leo Gao's avatar
Leo Gao committed
439
        passage = general_detokenize(pre + " *{}*".format(doc['span2_text']) + post)
Jason Phang's avatar
wsc  
Jason Phang committed
440
        noun = doc["span1_text"]
Jason Phang's avatar
Jason Phang committed
441
442
443
        pronoun = doc["span2_text"]
        text = (
            f"Passage: {passage}\n"
Jason Phang's avatar
wsc  
Jason Phang committed
444
            + f"Question: In the passage above, does the pronoun \"*{pronoun}*\" refer to \"*{noun}*\"?\n"
Jason Phang's avatar
Jason Phang committed
445
446
447
448
            + "Answer:"
        )
        return text

449
    def doc_to_target(self, doc):
Leo Gao's avatar
Leo Gao committed
450
        return " " + yesno(doc['label'])
451

Leo Gao's avatar
Leo Gao committed
452
    def construct_requests(self, doc, ctx):
Jason Phang's avatar
wsc  
Jason Phang committed
453
454
455
456
457

        ll_yes, _ = rf.loglikelihood(ctx, ' yes')
        ll_no, _ = rf.loglikelihood(ctx, ' no')

        return ll_yes, ll_no
458

Jason Phang's avatar
Jason Phang committed
459
    def process_results(self, doc, results):
Jason Phang's avatar
wsc  
Jason Phang committed
460
461
462
463
464
465
466
467
        ll_yes, ll_no = results
        gold = doc["label"]

        acc = 1. if (ll_yes > ll_no) == gold else 0.

        return {
            "acc": acc
        }
Anish Thite's avatar
Anish Thite committed
468

Leo Gao's avatar
Leo Gao committed
469
    def higher_is_better(self):
Jason Phang's avatar
Jason Phang committed
470
471
472
473
474
475
476
477
        return {
            "acc": True
        }

    def aggregation(self):
        return {
            "acc": mean
        }