race.py 2.01 KB
Newer Older
Leo Gao's avatar
Leo Gao committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from . common import HFNLPTask
from ..utils_stream import X, each, apply, join, filt, one
import collections
import nlp


class RACE(HFNLPTask):
    NLP_PATH = "race"
    NLP_NAME = "high"

    cache = {}

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def _collate_data(self, set):
        if set in self.cache: return self.cache[set]
        # One big issue with HF's implementation of this dataset: it makes a
        # separate document for each question; meanwhile, in the GPT3 paper it
        # is shown that one document is made per passage.

        r = collections.defaultdict(list)
        for item in nlp.load_dataset(path=self.NLP_PATH, name=self.NLP_NAME)[set]:
            r[item['article']].append(item)
        
        res = list(r.values() >> each(lambda x: {
            'article': x[0]['article'],
            'problems': x >> each(lambda y: {
                'question': y['question'],
                'answer': y['answer'],
                'options': y['options'],
            })
        }))

        self.cache[set] = res
        return res

    def training_docs(self):
        return self._collate_data("train")

    def validation_docs(self):
        return self._collate_data("validation")

    def test_docs(self):
        return self._collate_data("test")

    def fewshot_description(self):
        # TODO: figure out description
        return ""

    def doc_to_text(self, doc, include_target=True):
        r = "Article:\n" + doc['article'] + '\n\n'

Leo Gao's avatar
Leo Gao committed
60
61
62
63
64
        r += doc['problems'] >> apply(enumerate) >> each(
            lambda x: 'Q: ' + x[1]['question'] + '\n\nA:' 
            + ((' ' + x[1]['options'][['A', 'B', 'C', 'D'].index(x[1]['answer'])]) \
                if x[0] != len(doc['problems']) - 1 or include_target else '')) \
            >> join('\n\n')
Leo Gao's avatar
Leo Gao committed
65
66
67
68
69
70

        return r

    def evaluate(self, docs, lm, provide_description, num_fewshot):
        # TODO: implement
        raise NotImplementedError()