model.py 20.3 KB
Newer Older
1
import abc
2
3
4
import hashlib
import json
import logging
haileyschoelkopf's avatar
haileyschoelkopf committed
5
import os
6
from typing import TYPE_CHECKING, Any, Iterable, Optional, Type, TypeVar, Union
7

8
from tqdm import tqdm
9

10
from lm_eval import utils
Baber's avatar
Baber committed
11
12
13
14
15
from lm_eval.api.instance import Instance
from lm_eval.api.types import (
    LoglikelihoodInput,
    LoglikelihoodOutput,
)
16

lintangsutawika's avatar
lintangsutawika committed
17

18
19
20
21
22
23
if TYPE_CHECKING:
    from sqlitedict import SqliteDict

    from lm_eval.api.instance import Instance


Lintang Sutawika's avatar
Lintang Sutawika committed
24
eval_logger = logging.getLogger(__name__)
25

26
27
T = TypeVar("T", bound="LM")

28
29

class LM(abc.ABC):
Ethan Smith's avatar
Ethan Smith committed
30
    def __init__(self) -> None:
31
32
33
34
35
        """Defines the interface that should be implemented by all LM subclasses.
        LMs are assumed to take text (strings) as input and yield strings as output
        (inputs/outputs should be tokenization-agnostic.)

        """
36
37
38
        # set rank and world size to a single process, by default.
        self._rank = 0
        self._world_size = 1
39
        self.cache_hook: "CacheHook" = CacheHook(None)
40
41

    @abc.abstractmethod
Baber's avatar
Baber committed
42
    def loglikelihood(self, requests: list[Instance]) -> list[tuple[float, bool]]:
43
44
45
46
        """Compute log-likelihood of generating a continuation from a context.
        Downstream tasks should attempt to use loglikelihood instead of other
        LM calls whenever possible.

baberabb's avatar
baberabb committed
47
48
49
        :param requests: list[Instance]
            A list of Instance objects, with property `args` which returns a tuple (context, continuation).
            `context: str`
50
51
                Context string. Implementations of LM must be able to handle an
                empty context string.
baberabb's avatar
baberabb committed
52
            `continuation: str`
53
54
55
                The continuation over which log likelihood will be calculated. If
                there is a word boundary, the space should be in the continuation.
                For example, context="hello" continuation=" world" is correct.
baberabb's avatar
baberabb committed
56
57

        :return: list[tuple[float, bool]]
58
            A list of pairs (logprob, isgreedy)
baberabb's avatar
baberabb committed
59
60
61
62
            `logprob: float`
                The log probability of `continuation`.
            `isgreedy`:
                Whether `continuation` would be generated by greedy sampling from `context`.
63
64
65
66
        """
        pass

    @abc.abstractmethod
Baber's avatar
Baber committed
67
    def loglikelihood_rolling(self, requests: list[Instance]) -> list[float]:
68
69
70
71
72
73
74
75
76
77
        """Compute full log-likelihood of a string, with no truncation, for perplexity computation
        - We will use the full max context length of the model.
        - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
        the max context length.
        - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
          which may simply concatenate multiple documents together.
        - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
          multiple chunks, the last input will still a full-sized context.
          Example:
            Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
78
            Prefix: BOS/EOS
79
80
81
            Max context length: 4
            Resulting input/prediction pairs:

82
                INPUT:  BOS   0   1   2
83
84
85
86
87
88
89
90
91
92
93
94
                PRED:     0   1   2   3

                INPUT:    3   4   5   6
                PRED:     4   5   6   7

                INPUT:    5   6   7   8
                PRED:             8   9

          Observe that:
            1. Each token is predicted exactly once
            2. For the last pair, we provide the full context, but only score the last two tokens

baberabb's avatar
baberabb committed
95
        :param requests: list[Instance]
96
            A list of Instance objects with property `args` which returns a tuple (context,).
97
            string: str
98
99
100
                String for which we are computing overall loglikelihood
        :return: list[tuple[float]]
            A list of tuples (logprob,)
101
            logprob: float
102
103
                The log probability of `context` conditioned on the BOS/EOS token.
                Can also be overridden for custom cases by `prefix_token_id`.
104
105
106
107
108
        """
        pass

    # TODO: Add an optional max length
    @abc.abstractmethod
Baber's avatar
Baber committed
109
    def generate_until(self, requests: list[Instance]) -> list[str]:
110
111
        """Generate greedily until a stopping sequence

baberabb's avatar
baberabb committed
112
        :param requests: list[Instance]
Baber Abbasi's avatar
Baber Abbasi committed
113
            A list of Instance objects with property `args` which returns a tuple (context, gen_kwargs).
114
115
            context: str
                Context string
Baber Abbasi's avatar
Baber Abbasi committed
116
117
            gen_kwargs: dict
                A dictionary of keyword arguments to pass to the generation function e.g. top_k, until, etc.
baberabb's avatar
baberabb committed
118
        :return: list[str]
Baber Abbasi's avatar
Baber Abbasi committed
119
            A list of model generated continuations.
120
121
122
123
124
            continuation: str
                The generated continuation.
        """
        pass

Baber Abbasi's avatar
Baber Abbasi committed
125
    def apply_chat_template(
126
        self, chat_history: list[dict[str, str]], add_generation_prompt=True
Baber Abbasi's avatar
Baber Abbasi committed
127
    ) -> str:
KonradSzafer's avatar
KonradSzafer committed
128
129
130
131
132
133
        """
        Defines how to transform few-shot examples provided as chat history into a format that can be used as input to the LM.

        :param chat_history: list[dict[str, str]]
            A list of dictionaries with keys 'role' and 'content'.
            Values are strings representing the role name and the content of the message, respectively.
Baber Abbasi's avatar
Baber Abbasi committed
134
135
        :param add_generation_prompt: bool
            Whether to append an assistant gen prefix (for e.g. <|assistant|>) to the assistant messages in the chat history. False if prefilling an assistant message.
KonradSzafer's avatar
KonradSzafer committed
136
137
138
139
140
141
142
        :return: str
            A string representing the chat history in a format that can be used as input to the LM.
        """
        raise NotImplementedError(
            "To use this model with chat templates, please implement the 'apply_chat_template' method for your model type."
        )

143
    @classmethod
144
145
146
147
148
149
150
151
152
153
154
155
156
    def create_from_arg_string(
        cls: Type[T], arg_string: str, additional_config: Optional[dict] = None
    ) -> T:
        """
        Creates an instance of the LM class using the given argument string and additional config.

        Parameters:
        - arg_string: A string containing arguments in the format key1=value1,key2=value2.
        - additional_config: Optional dictionary containing additional configuration parameters.

        Returns:
        - Instance of the LM class.
        """
157
158
159
160
        additional_config = {} if additional_config is None else additional_config
        args = utils.simple_parse_args_string(arg_string)
        args2 = {k: v for k, v in additional_config.items() if v is not None}
        return cls(**args, **args2)
haileyschoelkopf's avatar
haileyschoelkopf committed
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
    @classmethod
    def create_from_arg_obj(
        cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None
    ) -> T:
        """
        Creates an instance of the LM class using the given arg_obj

        Parameters:
        - arg_obj: A dict containing arguments in the format key1=value1,key2=value2.
        - additional_config: Optional dictionary containing additional configuration parameters.

        Returns:
        - Instance of the LM class.
        """

177
        additional_config = additional_config or {} | {
178
179
180
181
182
            k: v for k, v in additional_config.items() if v is not None
        }

        return cls(**arg_dict, **additional_config)

haileyschoelkopf's avatar
haileyschoelkopf committed
183
184
185
186
187
    @property
    def rank(self):
        # used in the case of parallelism. Hardcoded to
        # ensure no errors arise using API models which do
        # not support multi-device parallelism nor expect it.
188
        return self._rank
haileyschoelkopf's avatar
haileyschoelkopf committed
189
190
191
192
193
194

    @property
    def world_size(self):
        # used in the case of parallelism. Hardcoded to
        # ensure no errors arise using API models which do
        # not support multi-device parallelism nor expect it.
195
        return self._world_size
haileyschoelkopf's avatar
haileyschoelkopf committed
196

KonradSzafer's avatar
KonradSzafer committed
197
198
199
200
201
202
203
204
205
206
    @property
    def tokenizer_name(self) -> str:
        """Must be defined for LM subclasses which implement Chat Templating.
        Should return the name of the tokenizer or chat template used.
        Used only to properly fingerprint caches when requests are being cached with `--cache_requests`, otherwise not used.
        """
        raise NotImplementedError(
            "To use this model with chat templates, please implement the 'tokenizer_name' property."
        )

207
208
209
210
    def chat_template(self, chat_template: Union[bool, str] = False) -> Optional[str]:
        """Returns the chat template structure for user/assistant messages if a template is provided.
        This method is intended to be overridden in a subclass to define a specific chat template format.
        For models that do not support chat templates, this method returns None by default.
KonradSzafer's avatar
KonradSzafer committed
211
        """
212
213

        return ""
KonradSzafer's avatar
KonradSzafer committed
214

215
    def set_cache_hook(self, cache_hook: "CacheHook") -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
216
217
218
219
        self.cache_hook = cache_hook


### SQLite-based caching of LM responses
220
def hash_args(attr: str, args: Iterable[Any]) -> str:
haileyschoelkopf's avatar
haileyschoelkopf committed
221
222
223
224
225
    dat = json.dumps([attr] + list(args))
    return hashlib.sha256(dat.encode("utf-8")).hexdigest()


class CacheHook:
226
    def __init__(self, cachinglm: Optional["CachingLM"]) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
227
        if cachinglm is None:
228
            self.dbdict: Optional["SqliteDict"] = None
haileyschoelkopf's avatar
haileyschoelkopf committed
229
230
231
232
            return

        self.dbdict = cachinglm.dbdict

233
    def add_partial(self, attr: str, req: Iterable[Any], res: Any) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
234
235
236
237
238
239
240
        if self.dbdict is None:
            return
        hsh = hash_args(attr, req)
        self.dbdict[hsh] = res


class CachingLM:
241
    def __init__(self, lm: LM, cache_db: str) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
242
243
244
245
246
247
248
        """LM wrapper that returns cached results if they exist, and uses the underlying LM if not.

        :param lm: LM
            Underlying LM
        :param cache_db: str
            Path to cache db
        """
249
250
251
252
        from sqlitedict import SqliteDict

        self.lm: LM = lm
        self.cache_db: str = cache_db
haileyschoelkopf's avatar
haileyschoelkopf committed
253
254
255
256
257
258
259
        if os.path.dirname(cache_db):
            os.makedirs(os.path.dirname(cache_db), exist_ok=True)
        self.dbdict = SqliteDict(cache_db, autocommit=True)

        # add hook to lm
        lm.set_cache_hook(self.get_cache_hook())

260
    def __getattr__(self, attr: str) -> Any:
haileyschoelkopf's avatar
haileyschoelkopf committed
261
        lm_attr = getattr(self.lm, attr)
Baber Abbasi's avatar
Baber Abbasi committed
262
263
        if attr not in ["loglikelihood", "loglikelihood_rolling", "generate_until"]:
            eval_logger.debug(f"Passing through attribute '{attr}' to underlying LM")
haileyschoelkopf's avatar
haileyschoelkopf committed
264
265
            return lm_attr

266
        def _fn(requests: list["Instance"]) -> list["Instance"]:
haileyschoelkopf's avatar
haileyschoelkopf committed
267
268
269
270
            res = []
            remaining_reqs = []
            warned = False
            # figure out which ones are cached and which ones are new
271
272
273
            eval_logger.info(
                f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..."
            )
274
            for req in tqdm(requests, desc="Checking cached requests"):
haileyschoelkopf's avatar
haileyschoelkopf committed
275
                hsh = hash_args(attr, req.args)
276
                if attr == "generate_until" and req.args[1].get("do_sample", False):
haileyschoelkopf's avatar
haileyschoelkopf committed
277
278
279
280
                    # when we are doing non-greedy generation, don't use the cache
                    # (else every "randomly sampled" generation would be identical for repeats > 1).
                    if not warned:
                        eval_logger.warning(
281
                            f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests."
haileyschoelkopf's avatar
haileyschoelkopf committed
282
283
284
285
286
287
288
289
290
291
292
293
294
                        )
                        warned = True
                    res.append(None)
                    remaining_reqs.append(req)
                elif hsh in self.dbdict:
                    ob = self.dbdict[hsh]

                    assert ob is not None

                    res.append(ob)
                else:
                    res.append(None)
                    remaining_reqs.append(req)
295
296
297
            eval_logger.info(
                f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}"
            )
298
299
300
301
302
            if remaining_reqs:
                # actually run the LM on the requests that do not have cached results
                rem_res = getattr(self.lm, attr)(remaining_reqs)
            else:
                rem_res = []
haileyschoelkopf's avatar
haileyschoelkopf committed
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318

            # stick the new ones back into the list and also cache any of the new ones
            resptr = 0
            for req, r in zip(remaining_reqs, rem_res):
                while res[resptr] is not None:
                    resptr += 1

                res[resptr] = r

                # caching
                hsh = hash_args(attr, req.args)
                self.dbdict[hsh] = r
            self.dbdict.commit()

            return res

319
        return _fn
haileyschoelkopf's avatar
haileyschoelkopf committed
320

321
    def get_cache_hook(self) -> "CacheHook":
haileyschoelkopf's avatar
haileyschoelkopf committed
322
        return CacheHook(self)
323
324
325
326
327
328
329
330


class TemplateLM(LM):
    """
    A class acting as intermediary between the LM base class
    and boilerplate often included in other LM subclasses.
    """

331
332
    tokenizer = None

333
334
335
336
337
    @property
    @abc.abstractmethod
    def eot_token_id(self):
        pass

338
339
340
    @property
    def prefix_token_id(self):
        # it is used as prefix for loglikelihood
341
        return self.eot_token_id
342

343
    @abc.abstractmethod
344
    def tok_encode(self, string: str, **kwargs) -> list[int]:
Baber Abbasi's avatar
Baber Abbasi committed
345
346
347
        """
        Tokenize a string using the model's tokenizer and return a list of token IDs.
        """
348
349
350
        pass

    @abc.abstractmethod
351
352
353
    def _loglikelihood_tokens(
        self, requests: list["Instance"], **kwargs
    ) -> list[tuple[float, bool]]:
354
355
        pass

Baber Abbasi's avatar
Baber Abbasi committed
356
357
    def _encode_pair(
        self, context: str, continuation: str
358
359
360
    ) -> tuple[list[int], list[int]]:
        import transformers

361
362
363
364
365
        n_spaces = len(context) - len(context.rstrip())
        if n_spaces > 0:
            continuation = context[-n_spaces:] + continuation
            context = context[:-n_spaces]

Lintang Sutawika's avatar
Lintang Sutawika committed
366
        model_class = getattr(self, "AUTO_MODEL_CLASS", None)
367

Lintang Sutawika's avatar
Lintang Sutawika committed
368
369
370
371
372
373
374
375
376
        if model_class == transformers.AutoModelForSeq2SeqLM:
            context_enc = self.tok_encode(context)
            continuation_enc = self.tok_encode(continuation, add_special_tokens=False)
        else:
            whole_enc = self.tok_encode(context + continuation)
            context_enc = self.tok_encode(context)

            context_enc_len = len(context_enc)
            continuation_enc = whole_enc[context_enc_len:]
377
378
379

        return context_enc, continuation_enc

380
    def loglikelihood(
381
382
        self, requests: list["Instance"], disable_tqdm: bool = False
    ) -> list[tuple[float, bool]]:
383
        new_reqs = []
Baber's avatar
Baber committed
384
385
386
        for context, continuation in (
            (req.args.context, req.args.continuation) for req in requests
        ):
387
            if context == "":
388
                # BOS or EOS as context
389
                context_enc, continuation_enc = (
390
                    [self.prefix_token_id],
391
392
393
394
395
396
397
                    self.tok_encode(continuation),
                )
            else:
                context_enc, continuation_enc = self._encode_pair(context, continuation)

            new_reqs.append(((context, continuation), context_enc, continuation_enc))

398
        return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)
399
400

    @abc.abstractmethod
401
    def loglikelihood_rolling(
Baber's avatar
Baber committed
402
        self, requests: list[Instance], disable_tqdm: bool = False
403
    ) -> list[float]:
404
405
406
        pass

    @abc.abstractmethod
Baber's avatar
Baber committed
407
408
409
    def generate_until(
        self, requests: list[Instance], disable_tqdm: bool = False
    ) -> list[str]:
410
        pass
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455

    def chat_template(self, chat_template: Union[bool, str] = False) -> Optional[str]:
        """
        Set and get the appropriate chat template for the model.
        This method sets the tokenizer's chat_template and returns the template string for reproducibility.

        The template selection logic is adapted from the Transformers library's `apply_chat_template`
        method in the Tokenizer class. The original implementation can be found at:
        https://github.com/huggingface/transformers/blob/fc35907f95459d7a6c5281dfadd680b6f7b620e3/src/transformers/tokenization_utils_base.py#L1687

        This method ensures that the right template is chosen based on the following:
        0. If the model has no 'tokenizer' attribute: assumes that there is only a single possible chat template, handled on the model provider side internally. Returns the empty string.
        1. If the model's tokenizer has multiple templates:
            a. Use the specified template if it exists in the dictionary.
            b. Use the default template from the list if no specific template is provided.
            c. Raise an error if no default template exists and no specific template is provided.
        2. If the model's tokenizer has a single template or no template:
            a. Use the tokenizer's chat template if available.
            b. Fall back to the default chat template if no tokenizer chat template exists.

        Args:
            chat_template (Union[bool, str]): Specifies the chat template to use.
                - If False or None, no template is applied.
                - If True, the default or only available template is used.
                - If a string, the template with the matching name is used.

        Returns:
            Optional[str]: The selected chat template, or None if no template is applied.
        """
        if self.tokenizer is None:
            return ""

        if chat_template is False or chat_template is None:
            eval_logger.warning(
                "model.chat_template was called with the chat_template set to False or None. "
                "Therefore no chat template will be applied. Make sure this is an intended behavior."
            )
            return None

        # Convert boolean chat_template to None to ensure compatibility with the adapted logic
        if isinstance(chat_template, bool):
            chat_template = None
        using_default_template = False

        # First, handle the cases when the model has a dict of multiple templates
456
457
458
459
460
461
        try:
            template = (
                self.tokenizer.chat_template or self.tokenizer.default_chat_template
            )
        except AttributeError:
            return None
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511

        if isinstance(template, dict):
            using_default_dict = self.tokenizer.chat_template is None

            if chat_template is not None:
                if chat_template in template:
                    selected_template = template[chat_template]
                    if using_default_dict:
                        using_default_template = True
                else:
                    raise ValueError(
                        f"The specified chat template '{chat_template}' is not available. "
                        f"Available template names are {sorted(template.keys())}."
                    )
            else:
                # If user didn't pass a chat template, use the default template from the dict
                if "default" in template:
                    selected_template = template["default"]
                    using_default_template = True
                else:
                    raise ValueError(
                        "This model has multiple chat templates with no default specified! Please either pass a chat "
                        "template or the name of the template you wish to use to the `chat_template` argument. Available "
                        f"template names are {sorted(template.keys())}."
                    )

        # Cases when the model has a single template or no template
        else:
            # priority: `chat_template` argument > `tokenizer.chat_template` > `tokenizer.default_chat_template
            if isinstance(chat_template, str):
                eval_logger.warning(
                    "Chat template name provided, but the tokenizer's chat template is not a dictionary. "
                    "Using the tokenizer's chat template or the default template instead."
                )
            if self.tokenizer.chat_template is not None:
                selected_template = self.tokenizer.chat_template
            else:
                selected_template = self.tokenizer.default_chat_template
                using_default_template = True

        if using_default_template:
            eval_logger.warning(
                "No chat template is set for this tokenizer, falling back to a default class-level template. This is "
                "very error-prone, because models are often trained with templates different from the class default! "
                "Default chat templates are a legacy feature and will be removed in Transformers v4.43, at which "
                "point any code depending on them will stop working. We recommend setting a valid chat template before "
                "then to ensure that this model continues working without issues."
            )

        return selected_template