kobest.py 8.47 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
"""
KOBEST
https://arxiv.org/abs/2204.04541

A well-formulated benchmark plays a critical role in spurring advancements 
in the natural language processing (NLP) field, as it allows objective and
precise evaluation of diverse models. As modern language models (LMs) have 
become more elaborate and sophisticated, more difficult benchmarks that require
linguistic knowledge and reasoning have been proposed. However, most of these
benchmarks only support English, and great effort is necessary to construct
benchmarks for other low resource languages. To this end, we propose a new
benchmark named Korean balanced evaluation of significant tasks (KoBEST),
which consists of five Korean-language downstream tasks. Professional Korean
linguists designed the tasks that require advanced Korean linguistic knowledge.
Moreover, our data is purely annotated by humans and thoroughly reviewed to
guarantee high data quality. We also provide baseline models and human performance
results. Our dataset is available on the Huggingface.

Homepage: https://huggingface.co/datasets/skt/kobest_v1
"""

import numpy as np
from lm_eval.base import MultipleChoiceTask, rf, Task
kabbi159's avatar
kabbi159 committed
24
from lm_eval.metrics import macro_f1_score, mean
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55


class BoolQ(Task):
    VERSION = 0
    DATASET_PATH = "skt/kobest_v1"
    DATASET_NAME = "boolq"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

    def test_docs(self):
        return self.dataset["test"]

    def doc_to_text(self, doc):
        return "{} 질문: {} 답변: ".format(doc["paragraph"], doc["question"])

    def doc_to_target(self, doc):
Jaesun Park's avatar
Jaesun Park committed
56
57
        return " {}".format({0: "아니오", 1: "예"}[doc["label"]])

58
    def construct_requests(self, doc, ctx):
Jaesun Park's avatar
Jaesun Park committed
59
60
        ll_no, _ = rf.loglikelihood(ctx, " 아니오")
        ll_yes, _ = rf.loglikelihood(ctx, " 예")
61
62
63
64
65
66
67

        return ll_no, ll_yes

    def process_results(self, doc, results):
        pred = np.argmax(results)
        gold = doc["label"]
        return {
68
69
            "acc": pred == gold,
            "macro_f1": (gold, pred)
70
71
72
73
        }

    def higher_is_better(self):
        return {
74
75
            "acc": True,
            "macro_f1": True
76
77
78
79
        }

    def aggregation(self):
        return {
80
81
            "acc": mean,
            "macro_f1": macro_f1_score
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
        }


class COPA(Task):
    VERSION = 0
    DATASET_PATH = "skt/kobest_v1"
    DATASET_NAME = "copa"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

    def test_docs(self):
        return self.dataset["test"]

    def doc_to_text(self, doc):
        '''
        Connector: “왜냐하면” if Question is “원인” else “그래서”
        Format: “{Premise} {Connector} {Answer Alternative}”
        '''
        connector = {
            "원인": "왜냐하면",
            "결과": "그래서",
        }[doc["question"].strip()]

        return doc["premise"] + f" {connector}"

    def doc_to_target(self, doc):
        correct_choice = doc["alternative_1"] if doc["label"] == 0 else doc["alternative_2"]

        return " " + correct_choice
        
    def construct_requests(self, doc, ctx):
        ll_choice1, _ = rf.loglikelihood(ctx, " "+doc["alternative_1"])
        ll_choice2, _ = rf.loglikelihood(ctx, " "+doc["alternative_2"])

        return ll_choice1, ll_choice2

    def process_results(self, doc, results):
        pred = np.argmax(results)
        gold = doc["label"]
        return {
137
138
            "acc": pred == gold,
            "macro_f1": (gold, pred)
139
140
141
142
        }

    def higher_is_better(self):
        return {
143
144
            "acc": True,
            "macro_f1": True
145
146
        }

147

148
149
    def aggregation(self):
        return {
150
151
            "acc": mean,
            "macro_f1": macro_f1_score
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
        }

class WiC(Task):
    VERSION = 0
    DATASET_PATH = "skt/kobest_v1"
    DATASET_NAME = "wic"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

    def test_docs(self):
        return self.dataset["test"]

    def doc_to_text(self, doc):
        return "문장1: {} 문장2: {} 두 문장에서 {}가 같은 뜻으로 쓰였나?".format(doc["context_1"], doc["context_2"], doc["word"])

    def doc_to_target(self, doc):
Jaesun Park's avatar
Jaesun Park committed
183
184
        return " {}".format({0: "아니오", 1: "예"}[doc["label"]])

185
186
187
188
189
190
191
192
193
194
    def construct_requests(self, doc, ctx):
        ll_no, _ = rf.loglikelihood(ctx, " 아니오")
        ll_yes, _ = rf.loglikelihood(ctx, " 예")

        return ll_no, ll_yes

    def process_results(self, doc, results):
        pred = np.argmax(results)
        gold = doc["label"]
        return {
195
196
            "acc": pred == gold,
            "macro_f1": (gold, pred)
197
198
199
200
        }

    def higher_is_better(self):
        return {
201
202
            "acc": True,
            "macro_f1": True
203
204
205
206
        }

    def aggregation(self):
        return {
207
208
            "acc": mean,
            "macro_f1": macro_f1_score
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
        }


class HellaSwag(MultipleChoiceTask):
    VERSION = 0
    DATASET_PATH = "skt/kobest_v1"
    DATASET_NAME = "hellaswag"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(map(self._process_doc, self.dataset["train"]))
        return self._training_docs

    def validation_docs(self):
        return map(self._process_doc, self.dataset["validation"])

    def test_docs(self):
        return map(self._process_doc, self.dataset["test"])

    def _process_doc(self, doc):
        out_doc = {
            "query": "문장: {}".format(doc["context"]),
            "choices": [doc["ending_1"], doc["ending_2"], doc["ending_3"], doc["ending_4"]],
            "gold": int(doc['label']),
        }
        return out_doc

    def doc_to_text(self, doc):
        return doc["query"]

    def process_results(self, doc, results):
        pred = np.argmax(results)
        gold = doc["gold"]
        return {
252
253
            "acc": pred == gold,
            "macro_f1": (gold, pred)
254
255
256
257
        }

    def higher_is_better(self):
        return {
258
259
            "acc": True,
            "macro_f1": True
260
261
262
263
        }

    def aggregation(self):
        return {
264
265
            "acc": mean,
            "macro_f1": macro_f1_score
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
        }


class SentiNeg(Task):
    VERSION = 0
    DATASET_PATH = "skt/kobest_v1"
    DATASET_NAME = "sentineg"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def training_docs(self):
        if self._training_docs is None:
            self._training_docs = list(self.dataset["train"])
        return self._training_docs

    def validation_docs(self):
        return self.dataset["validation"]

    def test_docs(self):
        return self.dataset["test"]

    def doc_to_text(self, doc):
        return "문장: {} 긍부정:".format(doc["sentence"])

    def doc_to_target(self, doc):
Jaesun Park's avatar
Jaesun Park committed
298
299
        return " {}".format({0: "부정", 1: "긍정"}[doc["label"]])

300
301
302
303
304
305
306
307
308
309
    def construct_requests(self, doc, ctx):
        ll_no, _ = rf.loglikelihood(ctx, " 부정")
        ll_yes, _ = rf.loglikelihood(ctx, " 긍정")

        return ll_no, ll_yes

    def process_results(self, doc, results):
        pred = np.argmax(results)
        gold = doc["label"]
        return {
310
311
            "acc": pred == gold,
            "macro_f1": (gold, pred)
312
313
314
315
        }

    def higher_is_better(self):
        return {
316
317
            "acc": True,
            "macro_f1": True
318
319
320
321
        }

    def aggregation(self):
        return {
322
323
            "acc": mean,
            "macro_f1": macro_f1_score
324
        }