"results/paper_data/ScaleLoad/gt-ib-sw-Load-0m-1.json" did not exist on "ed2318f143950eb6a1309c6f9d95df62356247ef"
openbookqa.py 2.05 KB
Newer Older
Leo Gao's avatar
Leo Gao committed
1
2
# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted.

Charles Foster's avatar
Charles Foster committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import numpy as np
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
from tqdm import auto as tqdm_lib
from . common import HFTask, simple_accuracy_metric, yesno

class OpenBookQA(HFTask):
    DATASET_PATH = "openbookqa"
    DATASET_NAME = "main"

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return True

    def has_test_docs(self):
        return True

    def training_docs(self):
        if self.has_training_docs():
            if self._training_docs is None:
                self._training_docs = list(self.data["train"])
            return self._training_docs

    def validation_docs(self):
        if self.has_validation_docs():
            return self.data["validation"]

    def test_docs(self):
        if self.has_test_docs():
            return self.data["test"]

    def fewshot_description(self):
        return "Text of the question prompt\nText of the answer completion"

    def doc_to_text(self, doc, include_target=True):
        text = doc['question_stem'] + '\n'
        if include_target:
            letter_answer = doc['answerKey']
            if letter_answer == 'A':
                index = 0
            elif letter_answer == 'B':
                index = 1
            elif letter_answer == 'C':
                index = 2
            elif letter_answer == 'D':
                index = 3
            else:
                raise ValueError("OpenBookQA from HF datasets contained an invalid answer key")
            text += doc['choices']['text'][index] + '.'
        return text

56
57
58
59
60
    # TODO: Implement evaluation code

    # ***IMPORTANT***: this evaluation function needs to be written for the new framework. 
    # For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py. 
    # Remove this comment when the evaluation code is implemented.