mathqa.py 1.29 KB
Newer Older
1
from . common import HFTask
2
from lm_eval.base import mean, rf, MultipleChoiceTask
3
import re
4
5

class MathQA(HFTask, MultipleChoiceTask):
6
7
8
9
10
11
12
13
14
15
16
17
    DATASET_PATH = "math_qa"
    DATASET_NAME = None

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

18
    def _convert_standard(self, doc):
19

20
        answer_idx = ['a', 'b', 'c', 'd', 'e'].index(doc['correct'])
Anthony DiPofi's avatar
Anthony DiPofi committed
21
        choices = [c[4:].rstrip(" ,") for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc['options'])]
22

23
        out_doc = {
24
25
26
            "query": "Question: " + doc['Problem'] +"\nAnswer:",
            "choices": choices,
            "gold": answer_idx,
27
28
        }
        return out_doc
29

30
31
32
    def _load_docs(self, docs):
        for record in docs:
            yield self._convert_standard(record)
33

34
35
36
    def training_docs(self):
        docs = super().training_docs()
        return self._load_docs(docs)
37

38
39
40
    def validation_docs(self):
        docs = super().validation_docs()
        return self._load_docs(docs)
41

42
43
44
    def test_docs(self):
        docs = super().test_docs()
        return self._load_docs(docs)
45

46
47
48
    def fewshot_description(self):
        # TODO: figure out description
        return ""
49

50
51
    def doc_to_text(self, doc):
        return doc["query"]