openai_completions.py 17.1 KB
Newer Older
1
import copy
Jason Phang's avatar
gpt3  
Jason Phang committed
2
import os
lintangsutawika's avatar
update  
lintangsutawika committed
3
from collections import defaultdict
4
from importlib.util import find_spec
5
from typing import List, Literal, Optional, Tuple
6

Leo Gao's avatar
Leo Gao committed
7
from tqdm import tqdm
lintangsutawika's avatar
update  
lintangsutawika committed
8

9
import lm_eval.models.utils
lintangsutawika's avatar
lintangsutawika committed
10
from lm_eval import utils
11
from lm_eval.api.model import LM, TemplateLM
12
from lm_eval.api.registry import register_model
13
14
from lm_eval.models.utils import retry_on_specific_exceptions
from lm_eval.utils import eval_logger
Leo Gao's avatar
Leo Gao committed
15

lintangsutawika's avatar
update  
lintangsutawika committed
16

Baber Abbasi's avatar
Baber Abbasi committed
17
def get_result(response, ctxlen: int) -> Tuple[float, bool]:
lintangsutawika's avatar
lintangsutawika committed
18
19
20
21
22
23
24
25
26
27
28
29
30
    """Process results from OpenAI API response.

    :param response: dict
        OpenAI API Response
    :param ctxlen: int
        Length of context (so we can slice them away and only keep the predictions)
    :return:
        continuation_logprobs: np.array
            Log probabilities of continuation tokens
        is_greedy: bool
            whether argmax matches given continuation exactly
    """
    is_greedy = True
Baber Abbasi's avatar
Baber Abbasi committed
31
    logprobs = response.logprobs.token_logprobs
lintangsutawika's avatar
lintangsutawika committed
32
33
    continuation_logprobs = sum(logprobs[ctxlen:])

Baber Abbasi's avatar
Baber Abbasi committed
34
35
36
    for i in range(ctxlen, len(response.logprobs.token_logprobs)):
        token = response.logprobs.token_logprobs[i]
        top_tokens = response.logprobs.top_logprobs[i]
lintangsutawika's avatar
lintangsutawika committed
37
38
39
40
41
42
43
44
        top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
        if top_token != token:
            is_greedy = False
            break

    return continuation_logprobs, is_greedy


45
def oa_completion(client, chat: bool = False, **kwargs):
lintangsutawika's avatar
lintangsutawika committed
46
47
48
49
    """Query OpenAI API for completion.

    Retry with back-off until they respond
    """
50
    if not find_spec("openai") or not find_spec("tiktoken"):
lintangsutawika's avatar
lintangsutawika committed
51
        raise Exception(
52
53
            "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. "
            "Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`"
lintangsutawika's avatar
lintangsutawika committed
54
        )
55
56
    else:
        import openai
lintangsutawika's avatar
lintangsutawika committed
57

58
59
60
61
62
63
64
65
66
67
68
    def _exception_callback(e: Exception, sleep_time: float) -> None:
        import traceback

        traceback.print_exc()

    @retry_on_specific_exceptions(
        on_exceptions=[openai.OpenAIError],
        max_retries=None,  # retry forever, consider changing
        on_exception_callback=_exception_callback,
    )
    def completion():
69
70
71
72
        if chat:
            return client.chat.completions.create(**kwargs)
        else:
            return client.completions.create(**kwargs)
lintangsutawika's avatar
lintangsutawika committed
73

74
    return completion()
lintangsutawika's avatar
lintangsutawika committed
75
76


77
@register_model("openai-completions", "local-completions")
78
class OpenaiCompletionsLM(TemplateLM):
Baber Abbasi's avatar
Baber Abbasi committed
79
    _DEFAULT_MAX_LENGTH = 2048
lintangsutawika's avatar
lintangsutawika committed
80
81
82

    def __init__(
        self,
83
        model: str,
84
85
86
        base_url: str = None,
        tokenizer: Optional[str] = None,
        tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken",
lintangsutawika's avatar
lintangsutawika committed
87
        truncate: bool = False,
JessicaOjo's avatar
JessicaOjo committed
88
        chat: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
89
        max_gen_toks: int = 256,
lintangsutawika's avatar
lintangsutawika committed
90
        batch_size: int = 1,
Baber Abbasi's avatar
Baber Abbasi committed
91
92
        seed: int = 1234,
        max_length: Optional[int] = None,
lintangsutawika's avatar
lintangsutawika committed
93
94
95
96
    ) -> None:
        """

        :param engine: str
97
            OpenAI API engine (e.g. gpt-3.5-turbo-instruct)
lintangsutawika's avatar
lintangsutawika committed
98
99
100
101
        :param truncate: bool
            Truncate input if too long (if False and input is too long, throw error)
        """
        super().__init__()
Baber Abbasi's avatar
Baber Abbasi committed
102
        self.seed = seed
lintangsutawika's avatar
lintangsutawika committed
103
        try:
104
105
            import openai  # noqa: E401
            import tiktoken
lintangsutawika's avatar
lintangsutawika committed
106
107
108
        except ModuleNotFoundError:
            raise Exception(
                "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
109
    please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`",
lintangsutawika's avatar
lintangsutawika committed
110
            )
Baber Abbasi's avatar
Baber Abbasi committed
111
        self.model = model
112
113
        self.base_url = base_url
        self.tokenizer_backend = tokenizer_backend
lintangsutawika's avatar
lintangsutawika committed
114
        self.truncate = truncate
JessicaOjo's avatar
JessicaOjo committed
115
        self.chat = chat
116
        self._batch_size = int(batch_size)
Baber Abbasi's avatar
Baber Abbasi committed
117
118
        self._max_gen_toks = max_gen_toks
        self._max_length = max_length
lintangsutawika's avatar
lintangsutawika committed
119

120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
        # if we have a local model, use HF tokenizer over tiktoken
        if self.tokenizer_backend == "huggingface":
            import transformers  # noqa: E401

            self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                tokenizer if tokenizer else self.model
            )
            self.vocab_size = self.tokenizer.vocab
            self.end_of_text_token_id = self.tokenizer.eos_token
        elif self.tokenizer_backend == "tiktoken":
            if self.base_url:
                eval_logger.warning(
                    f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. "
                    "Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken."
                )

            self.tokenizer = tiktoken.encoding_for_model(self.model)
            self.vocab_size = self.tokenizer.n_vocab
            self.end_of_text_token_id = self.tokenizer.eot_token
        else:
            raise ValueError(
                f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}"
            )

144
        # Read from environment variable OPENAI_API_KEY
145
        # Set to EMPTY for local
Baber Abbasi's avatar
Baber Abbasi committed
146
        openai.api_key = os.environ["OPENAI_API_KEY"]
147
148
149
150
        if self.base_url:
            self.client = openai.OpenAI(base_url=self.base_url)
        else:
            self.client = openai.OpenAI()
lintangsutawika's avatar
lintangsutawika committed
151
152
153
154
155
156
157

    @property
    def eot_token_id(self):
        return self.end_of_text_token_id

    @property
    def max_length(self) -> int:
Baber Abbasi's avatar
Baber Abbasi committed
158
159
160
161
        if self._max_length:
            return self._max_length
        else:
            return self._DEFAULT_MAX_LENGTH
lintangsutawika's avatar
lintangsutawika committed
162
163
164

    @property
    def max_gen_toks(self) -> int:
Baber Abbasi's avatar
Baber Abbasi committed
165
        return self._max_gen_toks
lintangsutawika's avatar
lintangsutawika committed
166
167

    @property
168
169
    def batch_size(self) -> int:
        return self._batch_size
lintangsutawika's avatar
lintangsutawika committed
170
171
172
173
174
175

    @property
    def device(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

176
    def tok_encode(self, string: str, **kwargs) -> List[int]:
lintangsutawika's avatar
lintangsutawika committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
        return self.tokenizer.encode(string)

    def tok_decode(self, tokens: List[int]) -> str:
        return self.tokenizer.decode(tokens)

    def _loglikelihood_tokens(
        self, requests, disable_tqdm: bool = False
    ) -> List[Tuple[float, bool]]:
        res = []

        def _collate(x):
            # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because
            # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations
            # we care about, and so we need some kind of backup for when it isn't
            toks = x[1] + x[2]
            return -len(toks), tuple(toks)

        re_ord = utils.Reorderer(requests, _collate)

        for chunk in tqdm(
197
            list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)),
lintangsutawika's avatar
lintangsutawika committed
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
            disable=disable_tqdm,
        ):
            inps = []
            ctxlens = []
            for cache_key, context_enc, continuation_enc in chunk:
                # max_length+1 because the API takes up to 2049 tokens, including the first context token
                inp = (context_enc + continuation_enc)[-(self.max_length + 1) :]
                # TODO: the logic is much simpler if we just look at the length of continuation tokens
                ctxlen = len(context_enc) - max(
                    0, len(context_enc) + len(continuation_enc) - (self.max_length + 1)
                )

                inps.append(inp)
                ctxlens.append(ctxlen)

            response = oa_completion(
214
                client=self.client,
Baber Abbasi's avatar
Baber Abbasi committed
215
                model=self.model,
JessicaOjo's avatar
JessicaOjo committed
216
                chat=self.chat,
lintangsutawika's avatar
lintangsutawika committed
217
218
219
220
221
                prompt=inps,
                echo=True,
                max_tokens=0,
                temperature=0.0,
                logprobs=10,
Baber Abbasi's avatar
Baber Abbasi committed
222
                seed=self.seed,
lintangsutawika's avatar
lintangsutawika committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
            )

            for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip(
                response.choices, ctxlens, chunk
            ):
                answer = get_result(resp, ctxlen)

                res.append(answer)

                # partial caching
                if cache_key is not None:
                    self.cache_hook.add_partial("loglikelihood", cache_key, answer)
        return re_ord.get_original(res)

237
    def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
lintangsutawika's avatar
lintangsutawika committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
        if not requests:
            return []
        res = []
        requests = [req.args for req in requests]

        def _collate(x):
            toks = self.tok_encode(x[0])
            return len(toks), x[0]

        re_ord = utils.Reorderer(requests, _collate)

        def sameuntil_chunks(xs, size):
            ret = []
            lastuntil = xs[0][1]
            for x in xs:
                if len(ret) >= size or x[1] != lastuntil:
                    yield ret, lastuntil
                    ret = []
                    lastuntil = x[1]
                ret.append(x)

            if ret:
                yield ret, lastuntil

        # todo: more intelligent batching for heterogeneous `until`
        for chunk, request_args in tqdm(
264
265
            list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)),
            disable=disable_tqdm,
lintangsutawika's avatar
lintangsutawika committed
266
267
        ):
            inps = []
268
            self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks)
lintangsutawika's avatar
lintangsutawika committed
269
270
271
272
273
            for context, _ in chunk:
                context_enc = self.tok_encode(context)
                inp = context_enc[-(self.max_length - self.max_gen_toks) :]
                inps.append(inp)

274
            until = request_args.get("until", ["<|endoftext|>"])
Baber Abbasi's avatar
Baber Abbasi committed
275
            request_args["temperature"] = request_args.get("temperature", 0)
lintangsutawika's avatar
lintangsutawika committed
276
277

            response = oa_completion(
278
                client=self.client,
279
                model=self.model,
lintangsutawika's avatar
lintangsutawika committed
280
281
282
                prompt=inps,
                max_tokens=self.max_gen_toks,
                stop=until,
Baber Abbasi's avatar
Baber Abbasi committed
283
                seed=self.seed,
284
285
286
                **{
                    k: v
                    for k, v in request_args.items()
287
                    if k not in {"do_sample", "max_gen_toks", "until"}
288
                },
lintangsutawika's avatar
lintangsutawika committed
289
290
            )
            for resp, (context, args_) in zip(response.choices, chunk):
Baber Abbasi's avatar
Baber Abbasi committed
291
                s = getattr(resp, "text")
lintangsutawika's avatar
lintangsutawika committed
292

Baber Abbasi's avatar
Baber Abbasi committed
293
                until_ = until
lintangsutawika's avatar
lintangsutawika committed
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314

                for term in until_:
                    if len(term) > 0:
                        s = s.split(term)[0]

                # partial caching
                self.cache_hook.add_partial(
                    "generate_until", (context, {"until": until_}), s
                )

                res.append(s)
        return re_ord.get_original(res)

    def _model_call(self, inps):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

    def _model_generate(self, context, max_length, eos_token_id):
        # Isn't used because we override generate_until
        raise NotImplementedError()

315
316
317
    def loglikelihood_rolling(
        self, requests, disable_tqdm: bool = False
    ) -> List[float]:
lintangsutawika's avatar
lintangsutawika committed
318
319
        loglikelihoods = []

320
        for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
lintangsutawika's avatar
lintangsutawika committed
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
            rolling_token_windows = list(
                map(
                    utils.make_disjoint_window,
                    utils.get_rolling_token_windows(
                        token_list=self.tok_encode(string),
                        prefix_token=self.eot_token_id,
                        max_seq_len=self.max_length,
                        context_len=1,
                    ),
                )
            )

            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
            rolling_token_windows = [(None,) + x for x in rolling_token_windows]

            string_nll = self._loglikelihood_tokens(
                rolling_token_windows,
                disable_tqdm=True,
            )

            # discard is_greedy
            string_nll = [x[0] for x in string_nll]

            string_nll = sum(string_nll)
            loglikelihoods.append(string_nll)
        return loglikelihoods


349
@register_model("openai-chat-completions", "local-chat-completions")
350
class OpenaiChatCompletionsLM(LM):
351
    def __init__(
352
353
354
355
356
        self,
        model: str = "gpt-3.5-turbo",  # GPT model or Local model using HuggingFace model paths
        base_url: str = None,
        truncate: bool = False,
        **kwargs,
357
    ) -> None:
358
359
        """

lintangsutawika's avatar
lintangsutawika committed
360
        :param model: str
361
362
363
            Implements an OpenAI-style chat completion API for
            accessing both OpenAI OR locally-hosted models using
            HuggingFace Tokenizer
lintangsutawika's avatar
lintangsutawika committed
364
            OpenAI API model (e.g. gpt-3.5-turbo)
365
            using the **gen_kwargs passed on init
366
367
368
369
370
        :param truncate: bool
            Truncate input if too long (if False and input is too long, throw error)
        """
        super().__init__()
        try:
371
            import openai  # noqa: E401
372
373
374
375
376
        except ModuleNotFoundError:
            raise Exception(
                "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
    please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`",
            )
lintangsutawika's avatar
lintangsutawika committed
377
        self.model = model
378
        self.base_url = base_url
379
        self.truncate = truncate
380

381
        # Read from environment variable OPENAI_API_KEY
382
383
384
385
386
        # Set to EMPTY for local
        if self.base_url:
            self.client = openai.OpenAI(base_url=self.base_url)
        else:
            self.client = openai.OpenAI()  # openai.AsyncOpenAI()
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406

    @property
    def max_length(self) -> int:
        # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token
        return 2048

    @property
    def max_gen_toks(self) -> int:
        return 256

    @property
    def batch_size(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

    @property
    def device(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

407
    def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
lintangsutawika's avatar
update  
lintangsutawika committed
408
409
        res = defaultdict(list)
        re_ords = {}
410

lintangsutawika's avatar
update  
lintangsutawika committed
411
412
413
        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
414
        grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
lintangsutawika's avatar
update  
lintangsutawika committed
415
416
        for key, reqs in grouper.get_grouped().items():
            # within each set of reqs for given kwargs, we reorder by token length, descending.
417
418
419
            re_ords[key] = utils.Reorderer(
                [req.args for req in reqs], lambda x: (-len(x[0]), x[0])
            )
420

421
        pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
lintangsutawika's avatar
update  
lintangsutawika committed
422
        for key, re_ord in re_ords.items():
423
424
            # n needs to be 1 because messages in
            # chat completion are not batch but
425
            # is regarded as a single conversation.
426
            chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1)
lintangsutawika's avatar
update  
lintangsutawika committed
427
428
429
430
            for chunk in chunks:
                contexts, all_gen_kwargs = zip(*chunk)
                inps = [{"role": "user", "content": context} for context in contexts]

431
432
                gen_kwargs = all_gen_kwargs[0]
                until = None
Baber Abbasi's avatar
Baber Abbasi committed
433
                if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict):
434
435
                    if "do_sample" in kwargs.keys():
                        kwargs.pop("do_sample")
436
437
438
439
440
441
                    if "until" in kwargs.keys():
                        until = kwargs.pop("until")
                        if isinstance(until, str):
                            until = [kwargs]
                        elif not isinstance(until, list):
                            raise ValueError(
442
                                f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}"
443
                            )
Baber Abbasi's avatar
Baber Abbasi committed
444
445
                        kwargs["stop"] = until
                    kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks)
446
447
                else:
                    raise ValueError(
448
                        f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}"
449
450
                    )

451
452
453
454
455
456
                response = oa_completion(
                    client=self.client,
                    chat=True,
                    messages=inps,
                    model=self.model,
                    **kwargs,
lintangsutawika's avatar
update  
lintangsutawika committed
457
                )
458

459
460
                for resp, (context, args_) in zip(response.choices, chunk):
                    s = resp.message.content
461

462
463
464
465
                    if until is not None:
                        for term in until:
                            if len(term) > 0:
                                s = s.split(term)[0]
lintangsutawika's avatar
update  
lintangsutawika committed
466

467
                    res[key].append(s)
lintangsutawika's avatar
update  
lintangsutawika committed
468

469
470
471
472
473
                    self.cache_hook.add_partial(
                        "generate_until", (context, {"until": until}), s
                    )
                    pbar.update(1)
            # reorder this group of results back to original unsorted form
lintangsutawika's avatar
update  
lintangsutawika committed
474
475
476
            res[key] = re_ord.get_original(res[key])

        pbar.close()
477

lintangsutawika's avatar
update  
lintangsutawika committed
478
        return grouper.get_original(res)
479

480
    def loglikelihood(self, requests, disable_tqdm: bool = False):
481
482
        raise NotImplementedError("No support for logits.")

483
    def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
484
        raise NotImplementedError("No support for logits.")