superglue.py 8.9 KB
Newer Older
Jason Phang's avatar
Jason Phang committed
1
"""
2
3
4
5
6
7
8
9
10
SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems
https://w4ngatang.github.io/static/papers/superglue.pdf

SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language
understanding tasks.

Homepage: https://super.gluebenchmark.com/

TODO: WSC requires free-form generation.
Jason Phang's avatar
Jason Phang committed
11
"""
Jason Phang's avatar
Jason Phang committed
12
import numpy as np
13
14
import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics
jon-tow's avatar
jon-tow committed
15
from lm_eval.base import rf, PromptSourceTask
Jonathan Tow's avatar
Jonathan Tow committed
16
17
from lm_eval.metrics import mean, acc_all, metric_max_over_ground_truths, yesno
from lm_eval.utils import general_detokenize
Jason Phang's avatar
Jason Phang committed
18

Jason Phang's avatar
Jason Phang committed
19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
_CITATION = """
@inproceedings{NEURIPS2019_4496bf24,
    author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
    booktitle = {Advances in Neural Information Processing Systems},
    editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
    pages = {},
    publisher = {Curran Associates, Inc.},
    title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
    url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf},
    volume = {32},
    year = {2019}
}
"""


jon-tow's avatar
jon-tow committed
35
class BoolQ(PromptSourceTask):
36
    VERSION = 1
Leo Gao's avatar
Leo Gao committed
37
38
    DATASET_PATH = "super_glue"
    DATASET_NAME = "boolq"
Jason Phang's avatar
Jason Phang committed
39
40
41
42
43
44
45
46

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
47
        return False
Jason Phang's avatar
Jason Phang committed
48

Jonathan Tow's avatar
Jonathan Tow committed
49
50
51
52
53
54
55
56
    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

57
58
59
60
61
62
63
64
65
    def higher_is_better(self):
        return {
            "acc": True
        }
    
    def aggregation(self):
        return {
            "acc": mean
        }
Jason Phang's avatar
Jason Phang committed
66

Jason Phang's avatar
Jason Phang committed
67

jon-tow's avatar
jon-tow committed
68
class CommitmentBank(PromptSourceTask):
thomasw21's avatar
thomasw21 committed
69
    VERSION = 1
Leo Gao's avatar
Leo Gao committed
70
71
    DATASET_PATH = "super_glue"
    DATASET_NAME = "cb"
Jason Phang's avatar
Jason Phang committed
72
73
74
75
76
77
78
79

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
80
        return False
Jason Phang's avatar
Jason Phang committed
81

Jonathan Tow's avatar
Jonathan Tow committed
82
83
84
85
86
87
88
89
    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

thefazzer's avatar
thefazzer committed
90
91
    def process_results(self, doc, results):
        gold = doc["label"]
thefazzer's avatar
thefazzer committed
92
93
        pred = np.argmax(results)
        acc = 1. if pred == gold else 0.
Jason Phang's avatar
Jason Phang committed
94

thefazzer's avatar
thefazzer committed
95
        return {
thefazzer's avatar
thefazzer committed
96
97
            "acc": acc,
            "f1": (pred, gold)
thefazzer's avatar
thefazzer committed
98
99
100
101
        }
    
    def higher_is_better(self):
        return {
102
103
            "acc": True,
            "f1": True
thefazzer's avatar
thefazzer committed
104
        }
Jason Phang's avatar
Jason Phang committed
105
106
107
108
109
110
111
112
113
114
115

    @classmethod
    def cb_multi_fi(cls, items):
        preds, golds = zip(*items)
        preds = np.array(preds)
        golds = np.array(golds)
        f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0)
        f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1)
        f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2)
        avg_f1 = mean([f11, f12, f13])
        return avg_f1
thefazzer's avatar
thefazzer committed
116
117
118
    
    def aggregation(self):
        return {
thefazzer's avatar
thefazzer committed
119
            "acc": mean,
Jason Phang's avatar
Jason Phang committed
120
            "f1": self.cb_multi_fi,
thefazzer's avatar
thefazzer committed
121
        }
Jason Phang's avatar
Jason Phang committed
122

Jason Phang's avatar
Jason Phang committed
123

jon-tow's avatar
jon-tow committed
124
class Copa(PromptSourceTask):
Leo Gao's avatar
Leo Gao committed
125
    VERSION = 0
Leo Gao's avatar
Leo Gao committed
126
127
    DATASET_PATH = "super_glue"
    DATASET_NAME = "copa"
Jason Phang's avatar
Jason Phang committed
128
129
130
131
132
133
134
135

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
136
        return False
Jason Phang's avatar
Jason Phang committed
137

Jonathan Tow's avatar
Jonathan Tow committed
138
139
140
141
142
143
144
145
    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

thefazzer's avatar
thefazzer committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
    def process_results(self, doc, results):
        gold = doc["label"]
        pred = np.argmax(results)
        acc = 1. if pred == gold else 0.

        return {
            "acc": acc
        }
    
    def higher_is_better(self):
        return {
            "acc": True
        }
    
    def aggregation(self):
        return {
            "acc": mean
        }
Jason Phang's avatar
Jason Phang committed
164
165
166
167
168
169

    @staticmethod
    def convert_choice(choice):
        return choice[0].lower() + choice[1:]


jon-tow's avatar
jon-tow committed
170
class MultiRC(PromptSourceTask):
171
    VERSION = 1
Leo Gao's avatar
Leo Gao committed
172
173
    DATASET_PATH = "super_glue"
    DATASET_NAME = "multirc"
Jason Phang's avatar
multirc  
Jason Phang committed
174
175
176
177
178
179
180
181

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
182
        return False
Jason Phang's avatar
multirc  
Jason Phang committed
183

Jonathan Tow's avatar
Jonathan Tow committed
184
185
186
187
188
189
190
191
    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

thefazzer's avatar
thefazzer committed
192
    def process_results(self, doc, results):
thomasw21's avatar
thomasw21 committed
193
194
        ll_true_choice, ll_false_choice = results
        pred = ll_true_choice > ll_false_choice
Jason Phang's avatar
multirc  
Jason Phang committed
195
        return {
thefazzer's avatar
thefazzer committed
196
197
198
199
200
201
202
203
204
205
206
            "acc": (pred, doc)
        }
    
    def higher_is_better(self):
        return {
            "acc": True
        }
    
    def aggregation(self):
        return {
            "acc": acc_all
Jason Phang's avatar
multirc  
Jason Phang committed
207
208
        }

Jason Phang's avatar
Jason Phang committed
209

jon-tow's avatar
jon-tow committed
210
class ReCoRD(PromptSourceTask):
Leo Gao's avatar
Leo Gao committed
211
    VERSION = 0
Jason Phang's avatar
Jason Phang committed
212
213
214
215
216
217
218
219
220
221
    DATASET_PATH = "super_glue"
    DATASET_NAME = "record"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
Leo Gao's avatar
Leo Gao committed
222
        return False
Jason Phang's avatar
Jason Phang committed
223
224
225
226

    def training_docs(self):
        # In ReCoRD, each doc manifests multiple "examples" in the context of few shot example packing.
        # Each doc consists of multiple answer candidates, each of which is scored yes/no.
227
228
        if self._training_docs is None:
            self._training_docs = []
Jonathan Tow's avatar
Jonathan Tow committed
229
            for doc in self.dataset["train"]:
Jason Phang's avatar
Jason Phang committed
230
                self._training_docs.append(self._process_doc(doc))
231
232
233
        return self._training_docs

    def validation_docs(self):
Jason Phang's avatar
Jason Phang committed
234
        # See: training_docs
Jonathan Tow's avatar
Jonathan Tow committed
235
        for doc in self.dataset["validation"]:
Jason Phang's avatar
Jason Phang committed
236
237
238
239
240
241
242
243
244
245
            yield self._process_doc(doc)

    @classmethod
    def _process_doc(cls, doc):
        return {
            "passage": doc["passage"],
            "query": doc["query"],
            "entities": sorted(list(set(doc["entities"]))),
            "answers": sorted(list(set(doc["answers"]))),
        }
Jason Phang's avatar
Jason Phang committed
246
247
248
249
250
251

    def process_results(self, doc, results):
        # ReCoRD's evaluation is actually deceptively simple:
        # - Pick the maximum likelihood prediction entity
        # - Evaluate the accuracy and token F1 PER EXAMPLE
        # - Average over all examples
jon-tow's avatar
jon-tow committed
252
253

        # TODO (jon-tow): Look at result
Jason Phang's avatar
Jason Phang committed
254
        max_idx = np.argmax(np.array([result[0] for result in results]))
Leo Gao's avatar
Leo Gao committed
255

Jason Phang's avatar
Jason Phang committed
256
        prediction = doc["entities"][max_idx]
Jason Phang's avatar
Jason Phang committed
257
        gold_label_set = doc["answers"]
Jason Phang's avatar
Jason Phang committed
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
        f1 = metric_max_over_ground_truths(squad_metrics.compute_f1, prediction, gold_label_set)
        em = metric_max_over_ground_truths(squad_metrics.compute_exact, prediction, gold_label_set)

        return {
            "f1": f1,
            "em": em,
        }

    def higher_is_better(self):
        return {
            "f1": True,
            "em": True,
        }

    def aggregation(self):
        return {
            "f1": mean,
            "em": mean,
        }


jon-tow's avatar
jon-tow committed
279
class WordsInContext(PromptSourceTask):
Leo Gao's avatar
Leo Gao committed
280
    VERSION = 0
Leo Gao's avatar
Leo Gao committed
281
282
    DATASET_PATH = "super_glue"
    DATASET_NAME = "wic"
Jason Phang's avatar
Jason Phang committed
283
284
285
286
287
288
289
290

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
291
        return False
Jason Phang's avatar
Jason Phang committed
292

Jonathan Tow's avatar
Jonathan Tow committed
293
294
295
296
297
298
299
300
    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

Jason Phang's avatar
Jason Phang committed
301
302
303
304
305
306
307
308
309
    def higher_is_better(self):
        return {
            "acc": True
        }

    def aggregation(self):
        return {
            "acc": mean
        }
Jason Phang's avatar
Jason Phang committed
310
311


jon-tow's avatar
jon-tow committed
312
class SGWinogradSchemaChallenge(PromptSourceTask):
Leo Gao's avatar
Leo Gao committed
313
    VERSION = 0
Jason Phang's avatar
wsc  
Jason Phang committed
314
315
    # Note: This implementation differs from Fig G.32 because this is the SuperGLUE,
    #       binary version of the task.
Leo Gao's avatar
Leo Gao committed
316
317
    DATASET_PATH = "super_glue"
    DATASET_NAME = "wsc"
Jason Phang's avatar
Jason Phang committed
318
319
320
321
322
323
324
325

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
326
        return False
Jason Phang's avatar
Jason Phang committed
327
328
329
330

    def training_docs(self):
        if self.has_training_docs():
            if self._training_docs is None:
Jason Phang's avatar
Jason Phang committed
331
                # GPT-3 Paper's format only uses positive examples for fewshot "training"
Jason Phang's avatar
Jason Phang committed
332
333
                self._training_docs = [
                    doc for doc in
Jonathan Tow's avatar
Jonathan Tow committed
334
                    self.dataset["train"]
Jason Phang's avatar
Jason Phang committed
335
336
337
338
                    if doc["label"]
                ]
            return self._training_docs

Jonathan Tow's avatar
Jonathan Tow committed
339
340
341
    def validation_docs(self):
        return self.dataset["validation"]

Leo Gao's avatar
Leo Gao committed
342
    def higher_is_better(self):
Jason Phang's avatar
Jason Phang committed
343
344
345
346
347
348
349
350
        return {
            "acc": True
        }

    def aggregation(self):
        return {
            "acc": mean
        }