headqa.py 1.17 KB
Newer Older
1
from . common import HFTask
2
from lm_eval.base import MultipleChoiceTask
3

4
5

class HeadQA(HFTask, MultipleChoiceTask):
6
7
8
9
10
11
12
13
14
15
16
17
    DATASET_PATH = "head_qa"
    DATASET_NAME = None

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

18
19
20
21
22
23
24
25
    def _convert_standard(self, doc):
        out_doc = {
            "id": doc["qid"],
            "query": "Question: " + doc["qtext"] + "\nAnswer:",
            "choices": [answer["atext"] for answer in doc["answers"]],
            "gold": int(doc["ra"]) - 1,
        }
        return out_doc
26

27
28
29
    def _load_docs(self, docs):
        for doc in docs:
            yield self._convert_standard(doc)
30

31
32
33
    def training_docs(self):
        docs = super().training_docs()
        return self._load_docs(docs)
34

35
36
37
    def validation_docs(self):
        docs = super().validation_docs()
        return self._load_docs(docs)
38

39
40
41
    def test_docs(self):
        docs = super().test_docs()
        return self._load_docs(docs)
42

43
44
45
    def fewshot_description(self):
        # TODO: figure out description
        return ""
46

47
48
    def doc_to_text(self, doc):
        return doc["query"]