openai_completions.py 16.8 KB
Newer Older
1
import copy
Jason Phang's avatar
gpt3  
Jason Phang committed
2
import os
lintangsutawika's avatar
update  
lintangsutawika committed
3
from collections import defaultdict
4
from importlib.util import find_spec
5
from typing import List, Literal, Optional, Tuple
6

Leo Gao's avatar
Leo Gao committed
7
from tqdm import tqdm
lintangsutawika's avatar
update  
lintangsutawika committed
8

9
import lm_eval.models.utils
lintangsutawika's avatar
lintangsutawika committed
10
from lm_eval import utils
11
from lm_eval.api.model import LM, TemplateLM
12
from lm_eval.api.registry import register_model
13
14
from lm_eval.models.utils import retry_on_specific_exceptions
from lm_eval.utils import eval_logger
Leo Gao's avatar
Leo Gao committed
15

lintangsutawika's avatar
update  
lintangsutawika committed
16

17
def get_result(response) -> Tuple[float, bool]:
lintangsutawika's avatar
lintangsutawika committed
18
19
20
21
22
23
24
25
26
27
28
    """Process results from OpenAI API response.

    :param response: dict
        OpenAI API Response
    :return:
        continuation_logprobs: np.array
            Log probabilities of continuation tokens
        is_greedy: bool
            whether argmax matches given continuation exactly
    """
    is_greedy = True
Baber Abbasi's avatar
Baber Abbasi committed
29
    logprobs = response.logprobs.token_logprobs
30
    continuation_logprobs = sum(logprobs)
lintangsutawika's avatar
lintangsutawika committed
31

32
    for i in range(len(response.logprobs.token_logprobs)):
Baber Abbasi's avatar
Baber Abbasi committed
33
34
        token = response.logprobs.token_logprobs[i]
        top_tokens = response.logprobs.top_logprobs[i]
lintangsutawika's avatar
lintangsutawika committed
35
36
37
38
39
40
41
42
        top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
        if top_token != token:
            is_greedy = False
            break

    return continuation_logprobs, is_greedy


43
def oa_completion(client, chat: bool = False, **kwargs):
lintangsutawika's avatar
lintangsutawika committed
44
45
46
47
    """Query OpenAI API for completion.

    Retry with back-off until they respond
    """
48
    if not find_spec("openai") or not find_spec("tiktoken"):
lintangsutawika's avatar
lintangsutawika committed
49
        raise Exception(
50
51
            "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. "
            "Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`"
lintangsutawika's avatar
lintangsutawika committed
52
        )
53
54
    else:
        import openai
lintangsutawika's avatar
lintangsutawika committed
55

56
57
58
59
60
61
62
63
64
65
66
    def _exception_callback(e: Exception, sleep_time: float) -> None:
        import traceback

        traceback.print_exc()

    @retry_on_specific_exceptions(
        on_exceptions=[openai.OpenAIError],
        max_retries=None,  # retry forever, consider changing
        on_exception_callback=_exception_callback,
    )
    def completion():
67
68
69
70
        if chat:
            return client.chat.completions.create(**kwargs)
        else:
            return client.completions.create(**kwargs)
lintangsutawika's avatar
lintangsutawika committed
71

72
    return completion()
lintangsutawika's avatar
lintangsutawika committed
73
74


75
@register_model("openai-completions", "local-completions")
76
class OpenaiCompletionsLM(TemplateLM):
Baber Abbasi's avatar
Baber Abbasi committed
77
    _DEFAULT_MAX_LENGTH = 2048
lintangsutawika's avatar
lintangsutawika committed
78
79
80

    def __init__(
        self,
81
        model: str,
82
83
84
        base_url: str = None,
        tokenizer: Optional[str] = None,
        tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken",
lintangsutawika's avatar
lintangsutawika committed
85
        truncate: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
86
        max_gen_toks: int = 256,
lintangsutawika's avatar
lintangsutawika committed
87
        batch_size: int = 1,
Baber Abbasi's avatar
Baber Abbasi committed
88
89
        seed: int = 1234,
        max_length: Optional[int] = None,
lintangsutawika's avatar
lintangsutawika committed
90
91
92
93
    ) -> None:
        """

        :param engine: str
94
            OpenAI API engine (e.g. gpt-3.5-turbo-instruct)
lintangsutawika's avatar
lintangsutawika committed
95
96
97
98
        :param truncate: bool
            Truncate input if too long (if False and input is too long, throw error)
        """
        super().__init__()
Baber Abbasi's avatar
Baber Abbasi committed
99
        self.seed = seed
lintangsutawika's avatar
lintangsutawika committed
100
        try:
101
102
            import openai  # noqa: E401
            import tiktoken
lintangsutawika's avatar
lintangsutawika committed
103
104
105
        except ModuleNotFoundError:
            raise Exception(
                "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
106
    please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`",
lintangsutawika's avatar
lintangsutawika committed
107
            )
Baber Abbasi's avatar
Baber Abbasi committed
108
        self.model = model
109
110
        self.base_url = base_url
        self.tokenizer_backend = tokenizer_backend
lintangsutawika's avatar
lintangsutawika committed
111
        self.truncate = truncate
112
        self._batch_size = int(batch_size)
Baber Abbasi's avatar
Baber Abbasi committed
113
114
        self._max_gen_toks = max_gen_toks
        self._max_length = max_length
lintangsutawika's avatar
lintangsutawika committed
115

116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
        # if we have a local model, use HF tokenizer over tiktoken
        if self.tokenizer_backend == "huggingface":
            import transformers  # noqa: E401

            self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                tokenizer if tokenizer else self.model
            )
            self.vocab_size = self.tokenizer.vocab
            self.end_of_text_token_id = self.tokenizer.eos_token
        elif self.tokenizer_backend == "tiktoken":
            if self.base_url:
                eval_logger.warning(
                    f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. "
                    "Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken."
                )

            self.tokenizer = tiktoken.encoding_for_model(self.model)
            self.vocab_size = self.tokenizer.n_vocab
            self.end_of_text_token_id = self.tokenizer.eot_token
        else:
            raise ValueError(
                f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}"
            )

140
        # Read from environment variable OPENAI_API_KEY
141
        # Set to EMPTY for local
Baber Abbasi's avatar
Baber Abbasi committed
142
        openai.api_key = os.environ["OPENAI_API_KEY"]
143
144
145
146
        if self.base_url:
            self.client = openai.OpenAI(base_url=self.base_url)
        else:
            self.client = openai.OpenAI()
lintangsutawika's avatar
lintangsutawika committed
147
148
149
150
151
152
153

    @property
    def eot_token_id(self):
        return self.end_of_text_token_id

    @property
    def max_length(self) -> int:
Baber Abbasi's avatar
Baber Abbasi committed
154
155
156
157
        if self._max_length:
            return self._max_length
        else:
            return self._DEFAULT_MAX_LENGTH
lintangsutawika's avatar
lintangsutawika committed
158
159
160

    @property
    def max_gen_toks(self) -> int:
Baber Abbasi's avatar
Baber Abbasi committed
161
        return self._max_gen_toks
lintangsutawika's avatar
lintangsutawika committed
162
163

    @property
164
165
    def batch_size(self) -> int:
        return self._batch_size
lintangsutawika's avatar
lintangsutawika committed
166
167
168
169
170
171

    @property
    def device(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

172
    def tok_encode(self, string: str, **kwargs) -> List[int]:
lintangsutawika's avatar
lintangsutawika committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
        return self.tokenizer.encode(string)

    def tok_decode(self, tokens: List[int]) -> str:
        return self.tokenizer.decode(tokens)

    def _loglikelihood_tokens(
        self, requests, disable_tqdm: bool = False
    ) -> List[Tuple[float, bool]]:
        res = []

        def _collate(x):
            # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because
            # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations
            # we care about, and so we need some kind of backup for when it isn't
            toks = x[1] + x[2]
            return -len(toks), tuple(toks)

        re_ord = utils.Reorderer(requests, _collate)

        for chunk in tqdm(
193
            list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)),
lintangsutawika's avatar
lintangsutawika committed
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
            disable=disable_tqdm,
        ):
            inps = []
            ctxlens = []
            for cache_key, context_enc, continuation_enc in chunk:
                # max_length+1 because the API takes up to 2049 tokens, including the first context token
                inp = (context_enc + continuation_enc)[-(self.max_length + 1) :]
                # TODO: the logic is much simpler if we just look at the length of continuation tokens
                ctxlen = len(context_enc) - max(
                    0, len(context_enc) + len(continuation_enc) - (self.max_length + 1)
                )

                inps.append(inp)
                ctxlens.append(ctxlen)

            response = oa_completion(
210
                client=self.client,
Baber Abbasi's avatar
Baber Abbasi committed
211
                model=self.model,
lintangsutawika's avatar
lintangsutawika committed
212
213
214
215
                prompt=inps,
                max_tokens=0,
                temperature=0.0,
                logprobs=10,
Baber Abbasi's avatar
Baber Abbasi committed
216
                seed=self.seed,
lintangsutawika's avatar
lintangsutawika committed
217
218
219
220
221
            )

            for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip(
                response.choices, ctxlens, chunk
            ):
222
                answer = get_result(resp)
lintangsutawika's avatar
lintangsutawika committed
223
224
225
226
227
228
229
230

                res.append(answer)

                # partial caching
                if cache_key is not None:
                    self.cache_hook.add_partial("loglikelihood", cache_key, answer)
        return re_ord.get_original(res)

231
    def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
lintangsutawika's avatar
lintangsutawika committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
        if not requests:
            return []
        res = []
        requests = [req.args for req in requests]

        def _collate(x):
            toks = self.tok_encode(x[0])
            return len(toks), x[0]

        re_ord = utils.Reorderer(requests, _collate)

        def sameuntil_chunks(xs, size):
            ret = []
            lastuntil = xs[0][1]
            for x in xs:
                if len(ret) >= size or x[1] != lastuntil:
                    yield ret, lastuntil
                    ret = []
                    lastuntil = x[1]
                ret.append(x)

            if ret:
                yield ret, lastuntil

        # todo: more intelligent batching for heterogeneous `until`
        for chunk, request_args in tqdm(
258
259
            list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)),
            disable=disable_tqdm,
lintangsutawika's avatar
lintangsutawika committed
260
261
        ):
            inps = []
262
            self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks)
lintangsutawika's avatar
lintangsutawika committed
263
264
265
266
267
            for context, _ in chunk:
                context_enc = self.tok_encode(context)
                inp = context_enc[-(self.max_length - self.max_gen_toks) :]
                inps.append(inp)

268
            until = request_args.get("until", ["<|endoftext|>"])
Baber Abbasi's avatar
Baber Abbasi committed
269
            request_args["temperature"] = request_args.get("temperature", 0)
lintangsutawika's avatar
lintangsutawika committed
270
271

            response = oa_completion(
272
                client=self.client,
273
                model=self.model,
lintangsutawika's avatar
lintangsutawika committed
274
275
276
                prompt=inps,
                max_tokens=self.max_gen_toks,
                stop=until,
Baber Abbasi's avatar
Baber Abbasi committed
277
                seed=self.seed,
278
279
280
                **{
                    k: v
                    for k, v in request_args.items()
281
                    if k not in {"do_sample", "max_gen_toks", "until"}
282
                },
lintangsutawika's avatar
lintangsutawika committed
283
284
            )
            for resp, (context, args_) in zip(response.choices, chunk):
Baber Abbasi's avatar
Baber Abbasi committed
285
                s = getattr(resp, "text")
lintangsutawika's avatar
lintangsutawika committed
286

Baber Abbasi's avatar
Baber Abbasi committed
287
                until_ = until
lintangsutawika's avatar
lintangsutawika committed
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308

                for term in until_:
                    if len(term) > 0:
                        s = s.split(term)[0]

                # partial caching
                self.cache_hook.add_partial(
                    "generate_until", (context, {"until": until_}), s
                )

                res.append(s)
        return re_ord.get_original(res)

    def _model_call(self, inps):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

    def _model_generate(self, context, max_length, eos_token_id):
        # Isn't used because we override generate_until
        raise NotImplementedError()

309
310
311
    def loglikelihood_rolling(
        self, requests, disable_tqdm: bool = False
    ) -> List[float]:
lintangsutawika's avatar
lintangsutawika committed
312
313
        loglikelihoods = []

314
        for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
lintangsutawika's avatar
lintangsutawika committed
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
            rolling_token_windows = list(
                map(
                    utils.make_disjoint_window,
                    utils.get_rolling_token_windows(
                        token_list=self.tok_encode(string),
                        prefix_token=self.eot_token_id,
                        max_seq_len=self.max_length,
                        context_len=1,
                    ),
                )
            )

            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
            rolling_token_windows = [(None,) + x for x in rolling_token_windows]

            string_nll = self._loglikelihood_tokens(
                rolling_token_windows,
                disable_tqdm=True,
            )

            # discard is_greedy
            string_nll = [x[0] for x in string_nll]

            string_nll = sum(string_nll)
            loglikelihoods.append(string_nll)
        return loglikelihoods


343
@register_model("openai-chat-completions", "local-chat-completions")
344
class OpenaiChatCompletionsLM(LM):
345
    def __init__(
346
347
348
349
350
        self,
        model: str = "gpt-3.5-turbo",  # GPT model or Local model using HuggingFace model paths
        base_url: str = None,
        truncate: bool = False,
        **kwargs,
351
    ) -> None:
352
353
        """

lintangsutawika's avatar
lintangsutawika committed
354
        :param model: str
355
356
357
            Implements an OpenAI-style chat completion API for
            accessing both OpenAI OR locally-hosted models using
            HuggingFace Tokenizer
lintangsutawika's avatar
lintangsutawika committed
358
            OpenAI API model (e.g. gpt-3.5-turbo)
359
            using the **gen_kwargs passed on init
360
361
362
363
364
        :param truncate: bool
            Truncate input if too long (if False and input is too long, throw error)
        """
        super().__init__()
        try:
365
            import openai  # noqa: E401
366
367
368
369
370
        except ModuleNotFoundError:
            raise Exception(
                "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
    please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`",
            )
lintangsutawika's avatar
lintangsutawika committed
371
        self.model = model
372
        self.base_url = base_url
373
        self.truncate = truncate
374

375
        # Read from environment variable OPENAI_API_KEY
376
377
378
379
380
        # Set to EMPTY for local
        if self.base_url:
            self.client = openai.OpenAI(base_url=self.base_url)
        else:
            self.client = openai.OpenAI()  # openai.AsyncOpenAI()
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400

    @property
    def max_length(self) -> int:
        # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token
        return 2048

    @property
    def max_gen_toks(self) -> int:
        return 256

    @property
    def batch_size(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

    @property
    def device(self):
        # Isn't used because we override _loglikelihood_tokens
        raise NotImplementedError()

401
    def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
lintangsutawika's avatar
update  
lintangsutawika committed
402
403
        res = defaultdict(list)
        re_ords = {}
404

lintangsutawika's avatar
update  
lintangsutawika committed
405
406
407
        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
408
        grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
lintangsutawika's avatar
update  
lintangsutawika committed
409
410
        for key, reqs in grouper.get_grouped().items():
            # within each set of reqs for given kwargs, we reorder by token length, descending.
411
412
413
            re_ords[key] = utils.Reorderer(
                [req.args for req in reqs], lambda x: (-len(x[0]), x[0])
            )
414

415
        pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
lintangsutawika's avatar
update  
lintangsutawika committed
416
        for key, re_ord in re_ords.items():
417
418
            # n needs to be 1 because messages in
            # chat completion are not batch but
419
            # is regarded as a single conversation.
420
            chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1)
lintangsutawika's avatar
update  
lintangsutawika committed
421
422
423
424
            for chunk in chunks:
                contexts, all_gen_kwargs = zip(*chunk)
                inps = [{"role": "user", "content": context} for context in contexts]

425
426
                gen_kwargs = all_gen_kwargs[0]
                until = None
Baber Abbasi's avatar
Baber Abbasi committed
427
                if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict):
428
429
                    if "do_sample" in kwargs.keys():
                        kwargs.pop("do_sample")
430
431
432
                    if "until" in kwargs.keys():
                        until = kwargs.pop("until")
                        if isinstance(until, str):
433
                            until = [until]
434
435
                        elif not isinstance(until, list):
                            raise ValueError(
436
                                f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}"
437
                            )
Baber Abbasi's avatar
Baber Abbasi committed
438
439
                        kwargs["stop"] = until
                    kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks)
440
441
                else:
                    raise ValueError(
442
                        f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}"
443
444
                    )

445
446
447
448
449
450
                response = oa_completion(
                    client=self.client,
                    chat=True,
                    messages=inps,
                    model=self.model,
                    **kwargs,
lintangsutawika's avatar
update  
lintangsutawika committed
451
                )
452

453
454
                for resp, (context, args_) in zip(response.choices, chunk):
                    s = resp.message.content
455

456
457
458
459
                    if until is not None:
                        for term in until:
                            if len(term) > 0:
                                s = s.split(term)[0]
lintangsutawika's avatar
update  
lintangsutawika committed
460

461
                    res[key].append(s)
lintangsutawika's avatar
update  
lintangsutawika committed
462

463
464
465
466
467
                    self.cache_hook.add_partial(
                        "generate_until", (context, {"until": until}), s
                    )
                    pbar.update(1)
            # reorder this group of results back to original unsorted form
lintangsutawika's avatar
update  
lintangsutawika committed
468
469
470
            res[key] = re_ord.get_original(res[key])

        pbar.close()
471

lintangsutawika's avatar
update  
lintangsutawika committed
472
        return grouper.get_original(res)
473

474
    def loglikelihood(self, requests, disable_tqdm: bool = False):
475
476
        raise NotImplementedError("No support for logits.")

477
    def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
478
        raise NotImplementedError("No support for logits.")