gpt3.py 6.99 KB
Newer Older
Jason Phang's avatar
gpt3  
Jason Phang committed
1
import os
Jason Phang's avatar
Jason Phang committed
2
import numpy as np
Jason Phang's avatar
gpt3  
Jason Phang committed
3
import transformers
4
from lm_eval.base import BaseLM
Jason Phang's avatar
lib  
Jason Phang committed
5
from lm_eval import utils
Leo Gao's avatar
Leo Gao committed
6
from tqdm import tqdm
Leo Gao's avatar
Leo Gao committed
7
import time
Leo Gao's avatar
Leo Gao committed
8
9
10


def get_result(response, ctxlen):
11
12
13
14
15
16
17
18
19
20
21
22
    """Process results from OpenAI API response.

    :param response: dict
        OpenAI API Response
    :param ctxlen: int
        Length of context (so we can slice them away and only keep the predictions)
    :return:
        continuation_logprobs: np.array
            Log probabilities of continuation tokens
        is_greedy: bool
            whether argmax matches given continuation exactly
    """
Leo Gao's avatar
Leo Gao committed
23
24
25
26
27
28
29
30
31
32
33
34
35
    is_greedy = True
    logprobs = response["logprobs"]["token_logprobs"]
    continuation_logprobs = sum(logprobs[ctxlen:])

    for i in range(ctxlen, len(response["logprobs"]["tokens"])):
        token = response["logprobs"]["tokens"][i]
        top_tokens = response["logprobs"]["top_logprobs"][i]
        top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
        if top_token != token:
            is_greedy = False
            break
    
    return continuation_logprobs, is_greedy
Jason Phang's avatar
gpt3  
Jason Phang committed
36
37


Leo Gao's avatar
Leo Gao committed
38
def oa_completion(**kwargs):
39
    """ Query OpenAI API for completion.
Leo Gao's avatar
Leo Gao committed
40

41
42
43
    Retry with back-off until they respond
    """
    import openai
Leo Gao's avatar
Leo Gao committed
44
45
46
47
48
    backoff_time = 3
    while True:
        try:
            return openai.Completion.create(**kwargs)
        except openai.error.OpenAIError:
Leo Gao's avatar
Leo Gao committed
49
50
            import traceback
            traceback.print_exc()
Leo Gao's avatar
Leo Gao committed
51
52
53
54
            time.sleep(backoff_time)
            backoff_time *= 1.5


Leo Gao's avatar
Leo Gao committed
55
class GPT3LM(BaseLM):
Leo Gao's avatar
Leo Gao committed
56
    REQ_CHUNK_SIZE = 20
Jason Phang's avatar
Jason Phang committed
57
58
59
60
61
62
63
64
65

    def __init__(self, engine, truncate=False):
        """

        :param engine: str
            OpenAI API engine (e.g. davinci)
        :param truncate: bool
            Truncate input if too long (if False and input is too long, throw error)
        """
Leo Gao's avatar
Leo Gao committed
66
        super().__init__()
67

Jason Phang's avatar
Jason Phang committed
68
        import openai
Jason Phang's avatar
gpt3  
Jason Phang committed
69
        self.engine = engine
70
        self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained('gpt2')
Leo Gao's avatar
Leo Gao committed
71

72
        self.vocab_size = self.tokenizer.vocab_size
Leo Gao's avatar
Leo Gao committed
73

Leo Gao's avatar
Leo Gao committed
74
75
        # to make the annoying "Using pad_token, but it is not set yet." error go away
        self.tokenizer.pad_token = "<|endoftext|>"
Leo Gao's avatar
Leo Gao committed
76
        assert self.tokenizer.encode('hello\n\nhello') == [31373, 198, 198, 31373]
Jason Phang's avatar
Jason Phang committed
77
        self.truncate = truncate
Jason Phang's avatar
Jason Phang committed
78
        self.end_of_text_token_id = self.tokenizer.convert_tokens_to_ids(["<|endoftext|>"])[0]
Jason Phang's avatar
Jason Phang committed
79

Jason Phang's avatar
gpt3  
Jason Phang committed
80
81
        # Read from environment variable OPENAI_API_SECRET_KEY
        openai.api_key = os.environ["OPENAI_API_SECRET_KEY"]
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105

    @property
    def eot_token_id(self):
        return self.tokenizer.eos_token_id

    @property
    def max_length(self):
        # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token
        return 2048

    @property
    def max_gen_toks(self):
        return 256

    @property
    def batch_size(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

    @property
    def device(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

106
107
108
109
110
    def tok_encode(self, string: str):
        return self.tokenizer.encode(string, add_special_tokens=False)
    
    def tok_decode(self, tokens):
        return self.tokenizer.decode(tokens)
Leo Gao's avatar
Leo Gao committed
111

112
    def _loglikelihood_tokens(self, requests, disable_tqdm=False):
Leo Gao's avatar
Leo Gao committed
113
114
        res = []

115
        def _collate(x):
Leo Gao's avatar
Leo Gao committed
116
117
118
            # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because
            # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations
            # we care about and so we need some kind of backup for when it isn't
Leo Gao's avatar
Leo Gao committed
119
            toks = x[1] + x[2]
120
            return -len(toks), tuple(toks)
121
122
        
        reord = utils.Reorderer(requests, _collate)
Jason Phang's avatar
Jason Phang committed
123

124
        for chunk in tqdm(list(utils.chunks(reord.get_reordered(), self.REQ_CHUNK_SIZE)), disable=disable_tqdm):
Leo Gao's avatar
Leo Gao committed
125
126
            inps = []
            ctxlens = []
Leo Gao's avatar
Leo Gao committed
127
            for cache_key, context_enc, continuation_enc in chunk:
128
129
130
131
                # max_length+1 because the API takes up to 2049 tokens, including the first context token
                inp = (context_enc + continuation_enc)[-(self.max_length+1):]
                # TODO: the logic is much simpler if we just look at the length of continuation tokens
                ctxlen = len(context_enc) - max(0, len(context_enc) + len(continuation_enc) - (self.max_length+1))
Leo Gao's avatar
Leo Gao committed
132
133
134
135

                inps.append(inp)
                ctxlens.append(ctxlen)

Leo Gao's avatar
Leo Gao committed
136
            response = oa_completion(
Leo Gao's avatar
Leo Gao committed
137
138
139
140
141
142
143
                engine=self.engine,
                prompt=inps,
                echo=True,
                max_tokens=0, temperature=0.,
                logprobs=10,
            )

Leo Gao's avatar
Leo Gao committed
144
            for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip(response.choices, ctxlens, chunk):
Leo Gao's avatar
Leo Gao committed
145
146
147
148
149
                answer = get_result(resp, ctxlen)

                res.append(answer)

                # partial caching
Leo Gao's avatar
Leo Gao committed
150
151
                if cache_key is not None:
                    self.cache_hook.add_partial("loglikelihood", cache_key, answer)
Jason Phang's avatar
Jason Phang committed
152

153
        return reord.get_original(res)
Leo Gao's avatar
Leo Gao committed
154
155

    def greedy_until(self, requests):
156
157
        if not requests:
            return []
Leo Gao's avatar
Leo Gao committed
158
159
        res = []

160
        def _collate(x):
161
            toks = self.tok_encode(x[0])
162
            return len(toks), x[0]
163
164
165
        
        reord = utils.Reorderer(requests, _collate)

Leo Gao's avatar
Leo Gao committed
166
167
168
169
170
171
172
173
174
175
        def sameuntil_chunks(xs, size):
            ret = []
            lastuntil = xs[0][1]
            for x in xs:
                if len(ret) >= size or x[1] != lastuntil:
                    yield ret, lastuntil
                    ret = []
                    lastuntil = x[1]
                ret.append(x)
            
176
177
            if ret:
                yield ret, lastuntil
Leo Gao's avatar
Leo Gao committed
178

179
        # todo: more intelligent batching for heterogeneous `until`
180
        for chunk, until in tqdm(list(sameuntil_chunks(reord.get_reordered(), self.REQ_CHUNK_SIZE))):
Leo Gao's avatar
Leo Gao committed
181
182
            inps = []
            for context, _ in chunk:
183
184
                context_enc = self.tok_encode(context)
                inp = context_enc[-(self.max_length - self.max_gen_toks):]
Leo Gao's avatar
Leo Gao committed
185
                inps.append(inp)
Leo Gao's avatar
Leo Gao committed
186

Leo Gao's avatar
Leo Gao committed
187
            response = oa_completion(
Leo Gao's avatar
Leo Gao committed
188
                engine=self.engine,
Leo Gao's avatar
Leo Gao committed
189
                prompt=inps,
190
                max_tokens=self.max_gen_toks, 
Leo Gao's avatar
Leo Gao committed
191
192
                temperature=0.,
                logprobs=10,
193
                stop=until,
Leo Gao's avatar
Leo Gao committed
194
            )
Leo Gao's avatar
Leo Gao committed
195

196
            for resp, (context, until_) in zip(response.choices, chunk):
Leo Gao's avatar
Leo Gao committed
197
                s = resp['text']
Leo Gao's avatar
Leo Gao committed
198

199
                for term in until_:
Leo Gao's avatar
Leo Gao committed
200
                    s = s.split(term)[0]
Leo Gao's avatar
Leo Gao committed
201

Leo Gao's avatar
Leo Gao committed
202
                # partial caching
203
                self.cache_hook.add_partial("greedy_until", (context, until_), s)
Leo Gao's avatar
Leo Gao committed
204
                
Leo Gao's avatar
Leo Gao committed
205
                res.append(s)
Leo Gao's avatar
Leo Gao committed
206
        
Leo Gao's avatar
Leo Gao committed
207
        return reord.get_original(res)
208
209
210
211
212
213
214
215

    def _model_call(self, inps):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

    def _model_generate(self, context, max_length, eos_token_id):
        # Isn't used because we override greedy_until
        raise NotImplementedError()