headqa.py 1.12 KB
Newer Older
1
from . common import HFTask
2
from lm_eval.base import MultipleChoiceTask
3

4

5
class HeadQABase(HFTask, MultipleChoiceTask):
Leo Gao's avatar
Leo Gao committed
6
    VERSION = 0
7
8
9
10
11
12
13
14
15
16
17
    DATASET_PATH = "head_qa"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

18
19
20
21
22
23
24
25
    def _convert_standard(self, doc):
        out_doc = {
            "id": doc["qid"],
            "query": "Question: " + doc["qtext"] + "\nAnswer:",
            "choices": [answer["atext"] for answer in doc["answers"]],
            "gold": int(doc["ra"]) - 1,
        }
        return out_doc
26

27
28
29
    def fewshot_description(self):
        # TODO: figure out description
        return ""
30

31
32
    def doc_to_text(self, doc):
        return doc["query"]
33
34
35
36
37

class HeadQAEn(HeadQABase):
    DATASET_NAME = "en"

class HeadQAEs(HeadQABase):
38
39
40
41
42
43
44
    DATASET_NAME = "es"

# for backwards compatibility
class HeadQAEsDeprecated(HeadQABase):
    DATASET_NAME = "es"

    print("WARNING: headqa is deprecated. Please use headqa_es or headqa_en instead. See https://github.com/EleutherAI/lm-evaluation-harness/pull/240 for more info.")