base.py 9.22 KB
Newer Older
Leo Gao's avatar
Leo Gao committed
1
2
import abc
import random
thefazzer's avatar
thefazzer committed
3
import numpy as np
Jonathan Tow's avatar
Jonathan Tow committed
4
import sklearn
Leo Gao's avatar
Leo Gao committed
5
import math
Jason Phang's avatar
gpt3  
Jason Phang committed
6

Jason Phang's avatar
Jason Phang committed
7

Leo Gao's avatar
Leo Gao committed
8
9
class LM(abc.ABC):
    @abc.abstractmethod
Leo Gao's avatar
Leo Gao committed
10
    def loglikelihood(self, requests):
Leo Gao's avatar
Leo Gao committed
11
12
13
        """Compute log-likelihood of generating a continuation from a context.
        Downstream tasks should attempt to use loglikelihood instead of other 
        LM calls whenever possible.
Jason Phang's avatar
gpt3  
Jason Phang committed
14

Leo Gao's avatar
Leo Gao committed
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
        :param requests: list
            A list of pairs (context, continuation)
            context: str
                Context string
            continuation: str
                The continuation over which log likelihood will be calculated. If 
                there is a word boundary, the space should be in the continuation. 
                For example, context="hello" continuation=" world" is correct.
        :return: list
            A list of pairs (logprob, isgreedy)
            logprob: float
                The log probability of `contination`
            isgreedy:
                Whether `contination` would be generated by greedy sampling from `context`
        """
        pass

    @abc.abstractmethod
Leo Gao's avatar
Update  
Leo Gao committed
33
    def greedy_until(self, requests):
Leo Gao's avatar
Leo Gao committed
34
35
36
37
38
39
40
41
        """Generate greedily until a stopping sequence

        :param requests: list
            A list of pairs (context, until)
            context: str
                Context string
            until: str
                The string sequence to generate until. This string sequence may 
Leo Gao's avatar
Leo Gao committed
42
                span across multiple tokens, or may be part of one token.
Leo Gao's avatar
Leo Gao committed
43
44
45
46
        :return: list
            A list of strings continuation
            continuation: str
                The generated continuation.
Jason Phang's avatar
gpt3  
Jason Phang committed
47
        """
Leo Gao's avatar
Leo Gao committed
48
49
        pass

Jason Phang's avatar
gpt3  
Jason Phang committed
50
51
52
53
54
55
56
57
58
59
60
    @classmethod
    def create_from_arg_string(cls, arg_string):
        """Constructor method, in case models need additional arguments
        e.g. OpenAI API engine, paths for loading, other params

        :param arg_string: str
            Left up to individual model class to handle

        """
        return cls()

Leo Gao's avatar
Leo Gao committed
61

62
class Task(abc.ABC):
Leo Gao's avatar
Leo Gao committed
63
64
    def __init__(self):
        self.download()
65
        self._training_docs = None
sdtblck's avatar
sdtblck committed
66
67
68
69
70

    def download(self):
        """Downloads the task dataset if necessary"""
        pass

71
72
    @abc.abstractmethod
    def has_training_docs(self):
Jason Phang's avatar
checkin  
Jason Phang committed
73
        """Whether the task has a training set"""
74
        pass
75

76
77
    @abc.abstractmethod
    def has_validation_docs(self):
Jason Phang's avatar
checkin  
Jason Phang committed
78
79
80
81
82
83
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
84
85
        pass

Leo Gao's avatar
Leo Gao committed
86
    def training_docs(self):
Jason Phang's avatar
checkin  
Jason Phang committed
87
88
89
90
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
Leo Gao's avatar
Leo Gao committed
91
        return []
92

Leo Gao's avatar
Leo Gao committed
93
    def validation_docs(self):
94
95
96
97
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
Leo Gao's avatar
Leo Gao committed
98
        return []
99

Leo Gao's avatar
Leo Gao committed
100
    def test_docs(self):
101
102
103
104
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
Leo Gao's avatar
Leo Gao committed
105
        return []
Leo Gao's avatar
Leo Gao committed
106

107
    def fewshot_examples(self, k):
108
109
110
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())
        return random.sample(self._training_docs, k)
Leo Gao's avatar
Leo Gao committed
111
112

    @abc.abstractmethod
Leo Gao's avatar
Update  
Leo Gao committed
113
114
115
116
117
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
Leo Gao's avatar
Leo Gao committed
118
        pass
Leo Gao's avatar
Leo Gao committed
119
120

    @abc.abstractmethod
121
    def construct_requests(self, doc, ctx):
Leo Gao's avatar
Leo Gao committed
122
123
124
        """ Uses RequestFactory to construct Requests and returns an iterable of 
        Requests which will be sent to the LM.

125
126
        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
Leo Gao's avatar
Leo Gao committed
127
        :param ctx: str
128
129
130
            The context string, generated by fewshot_context. This includes the natural 
            language description, as well as the few shot examples, and the question
            part of the document for `doc`. 
Leo Gao's avatar
Leo Gao committed
131
        """
Leo Gao's avatar
Leo Gao committed
132
        pass
133

Leo Gao's avatar
Leo Gao committed
134
    @abc.abstractmethod
Leo Gao's avatar
Leo Gao committed
135
    def process_results(self, doc, results):
Leo Gao's avatar
Update  
Leo Gao committed
136
        """Take a single document and the LM results and evaluates, returning a 
137
138
        dict where keys are the names of submetrics and values are the values of 
        the metric for that one document
Leo Gao's avatar
Leo Gao committed
139
140
141
142
143

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
Jason Phang's avatar
checkin  
Jason Phang committed
144
        """
Leo Gao's avatar
Leo Gao committed
145
        pass
Jason Phang's avatar
gpt3  
Jason Phang committed
146

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [float] -> float}
            A dictionary where keys are the names of submetrics and values are 
            functions that aggregate a list of metrics
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are 
            whether a higher value of the submetric is better
        """
        pass

Jason Phang's avatar
Jason Phang committed
165
    def fewshot_description(self):
Jason Phang's avatar
checkin  
Jason Phang committed
166
167
        return ""

Jason Phang's avatar
Jason Phang committed
168
    def fewshot_context(self, doc, num_fewshot, provide_description):
Jason Phang's avatar
Jason Phang committed
169
        raw_description = self.fewshot_description()
Jason Phang's avatar
Jason Phang committed
170
        description = (raw_description + "\n===\n\n") if provide_description and raw_description else ""
171

172
173
174
175
176
177
        if num_fewshot == 0:
            labeled_examples = ""
        else:
            labeled_examples = "\n\n".join(
                [self.doc_to_text(doc) + self.doc_to_target(doc) for doc in self.fewshot_examples(k=num_fewshot)]
            ) + "\n\n"
Leo Gao's avatar
Update  
Leo Gao committed
178
179

        example = self.doc_to_text(doc).strip()
Leo Gao's avatar
Leo Gao committed
180
181
182
183
184
185
        return description + labeled_examples + example


def mean(arr):
    return sum(arr) / len(arr)

Jason Phang's avatar
Jason Phang committed
186

Jonathan Tow's avatar
Jonathan Tow committed
187
188
189
def median(arr):
    return arr[len(arr) // 2]

Jason Phang's avatar
Jason Phang committed
190

Jonathan Tow's avatar
Jonathan Tow committed
191
192
193
194
195
196
def matthews_corrcoef(items):
    unzipped_list = list(zip(*items))
    golds = unzipped_list[0]
    preds = unzipped_list[1]
    return sklearn.metrics.matthews_corrcoef(golds, preds)

Jason Phang's avatar
Jason Phang committed
197

thefazzer's avatar
thefazzer committed
198
199
200
201
def f1_score(items):
    unzipped_list = list(zip(*items))
    golds = unzipped_list[0]
    preds = unzipped_list[1]
Jonathan Tow's avatar
Jonathan Tow committed
202
    fscore = sklearn.metrics.f1_score(golds, preds)
Leo Gao's avatar
Leo Gao committed
203
204

    return np.max(fscore)
thefazzer's avatar
thefazzer committed
205

Jason Phang's avatar
Jason Phang committed
206

thefazzer's avatar
thefazzer committed
207
208
209
210
211
def acc_all(items):
    # Only count as correct if all answers are labeled correctly for each question
    question_scoring_dict = {}
    preds = list(zip(*items))[0]
    docs = list(zip(*items))[1]
Jason Phang's avatar
Jason Phang committed
212
213

    for doc, pred in zip(docs, preds):
thefazzer's avatar
thefazzer committed
214
215
216
        question_id = doc["idx"]["question"]
        if question_id not in question_scoring_dict:
            question_scoring_dict[question_id] = []
217
218
219

        gold_label = doc["label"] == 1
        question_scoring_dict[question_id].append(gold_label == pred)
thefazzer's avatar
thefazzer committed
220
221
222
223
            
    acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
    return acc

Jason Phang's avatar
Jason Phang committed
224
225
226
227
228
229
230
231
232
233

def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
    """Compute max metric between prediction and each ground truth."""
    scores_for_ground_truths = []
    for ground_truth in ground_truths:
        score = metric_fn(prediction, ground_truth)
        scores_for_ground_truths.append(score)
    return max(scores_for_ground_truths)


Leo Gao's avatar
Leo Gao committed
234
235
236
def perplexity(items):
    return math.exp(-mean(items))

237
req_ret_lens = {
Leo Gao's avatar
Leo Gao committed
238
    'loglikelihood': 2,
239
240
}

Leo Gao's avatar
Leo Gao committed
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
import os
import json
import hashlib
from sqlitedict import SqliteDict

def hash_args(args):
    dat = b""
    for arg in args:
        assert isinstance(arg, str) or isinstance(arg, int)
        dat += str(arg).encode()
        dat += b"\0"
    return hashlib.sha256(dat).hexdigest()


class CachingLM:
    def __init__(self, lm, cache_db):
        self.lm = lm
        self.cache_db = cache_db
        os.makedirs(os.path.dirname(cache_db), exist_ok=True)
        self.dbdict = SqliteDict(cache_db, autocommit=True)

    def __getattr__(self, attr):
        def fn(requests):
            res = []
            remaining_reqs = []
            
            # figure out which ones are cached and which ones are new
            for req in requests:
                hsh = attr + '_' + hash_args(req)
                if hsh in self.dbdict:
                    ob = self.dbdict[hsh]

                    assert ob is not None

                    res.append(ob)
                else:
                    res.append(None)
                    remaining_reqs.append(req)
            
            # actually run the LM
            rem_res = getattr(self.lm, attr)(remaining_reqs)

            # stick the new ones back into the list and also cache any of the new ones
            resptr = 0
            for req, r in zip(remaining_reqs, rem_res):
                while res[resptr] is not None: resptr += 1

                res[resptr] = r

                # caching
                hsh = attr + '_' + hash_args(req)
                self.dbdict[hsh] = r
                

            return res
        return fn

Jason Phang's avatar
Jason Phang committed
298

299
300
301
302
class Request:
    def __init__(self, type, args, index=None):
        if type not in req_ret_lens.keys():
            raise NotImplementedError('The request type {} is not implemented!'.format(type))
Leo Gao's avatar
Leo Gao committed
303

304
305
306
307
308
309
310
311
312
313
314
        self.type = type
        self.args = args
        self.index = index
    
    def __iter__(self):
        i = 0
        for i in range(req_ret_lens[self.type]):
            yield Request(self.type, self.args, i)
    
    def __getitem__(self, i):
        return Request(self.type, self.args, i)
Leo Gao's avatar
Leo Gao committed
315

Jason Phang's avatar
Jason Phang committed
316

Leo Gao's avatar
Leo Gao committed
317
318
class RequestFactory:
    def __getattr__(self, attr):
Leo Gao's avatar
Update  
Leo Gao committed
319
320
        def fn(*args):
            return Request(attr, args)
Leo Gao's avatar
Leo Gao committed
321
322
323
324
        return fn


rf = RequestFactory()