sat.py 3.89 KB
Newer Older
Leo Gao's avatar
Leo Gao committed
1
2
# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted.

3
4
5
6
import json
import random
import os
from lm_eval.base import Dataset
nicholaskross's avatar
nicholaskross committed
7
8
9
from tqdm import auto as tqdm_lib
from . common import simple_accuracy_metric
import numpy as np
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from ..utils import sh


class SATAnalogies(Dataset):    
    def __init__(self):
        super().__init__()

    def download(self):
        # We should be using a checksum here.
        # The canonical sha256 hash is below:
        # 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc
        if not os.path.exists('data/sat') and os.path.exists('data/sat/SAT-package-V3.txt'):
            raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.')

    def has_training_docs(self):
        return False

    def has_validation_docs(self):
        return False

    def has_test_docs(self):
        return True

    def training_docs(self):
        return []

    def validation_docs(self):
        return []

    def test_docs(self):
        data = []

        with open("data/sat/SAT-package-V3.txt", "r") as f:
            lines = f.read().splitlines() 
            record = []
            for line in lines:
                if len(line) == 0 and record:
                    data.append(record)
                    record = []
                elif len(line) > 0 and line[0] == '#':
                    continue
                else:
                    record.append(line)
            data.append(record)

        docs = []

        for record in data:
            source = record[-8]
            query = record[-7]
            choices = record[-6:-1]
            answer_key = record[-1]

            doc = {
                'source': source,
                'query': query,
                'choices': choices,
                'answer_key': answer_key,
            }
            docs.append(doc)

        return docs

    
    def fewshot_description(self):
        # This format is ONLY for the purposes of deduplication. For the task evaluation, we'll need to find a new strategy,
        # to meet the needs of this particular task.
        return "first thing is to second thing as\nthird thing is to fourth thing\nfifth thing is to sixth thing\nseventh thing is to eighth thing\nninth thing is to tenth thing\neleventh thing is to twelfth thing\nanswer which is either a b c d or e"

    def doc_to_text(self, doc, include_target=True):
        # SAT Analogies is currently only writing out full examples. Partial evaluation needs implementing.
        format_qn = lambda x: x[0] + ' is to ' + x[1]

        query = doc['query']
        choices = doc['choices']
        answer = doc['answer_key']

        query_words = query.split(' ')[:2]
        text = format_qn(query_words) + ' as' + '\n'

        for choice in choices:
            choice_words = choice.split(' ')[:2]
            text += format_qn(choice_words) + '\n'

        if include_target:
            text += answer

        return text

    def evaluate(self, docs, lm):
nicholaskross's avatar
nicholaskross committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        golds = [doc["answer_key"] for doc in docs]
        preds = []
        for doc in tqdm_lib.tqdm(docs):
            ctx = self.fewshot_context(
                doc=doc,
                num_fewshot=1,
                provide_description=None,
                # unless Dataset evaluate()s should get num_fewshot/ provide_description
            )
            probs_before_numpy = []
            for choice in doc["choices"]:
                this_choice = " " + choice
                probs_before_numpy.append(lm.loglikelihood(ctx, this_choice))
            probs = np.array(probs_before_numpy)
            preds.append(np.argmax(probs))

        return simple_accuracy_metric(preds=preds, golds=golds)