model.py 11.6 KB
Newer Older
1
import abc
2
3
4
import hashlib
import json
import logging
haileyschoelkopf's avatar
haileyschoelkopf committed
5
import os
6
from typing import List, Optional, Tuple, Type, TypeVar
7

haileyschoelkopf's avatar
haileyschoelkopf committed
8
from sqlitedict import SqliteDict
9
from tqdm import tqdm
10

11
from lm_eval import utils
12

lintangsutawika's avatar
lintangsutawika committed
13

14
eval_logger = logging.getLogger("lm-eval")
15

16
17
T = TypeVar("T", bound="LM")

18
19

class LM(abc.ABC):
Ethan Smith's avatar
Ethan Smith committed
20
    def __init__(self) -> None:
21
22
23
24
25
        """Defines the interface that should be implemented by all LM subclasses.
        LMs are assumed to take text (strings) as input and yield strings as output
        (inputs/outputs should be tokenization-agnostic.)

        """
26
27
28
        # set rank and world size to a single process, by default.
        self._rank = 0
        self._world_size = 1
haileyschoelkopf's avatar
haileyschoelkopf committed
29
        self.cache_hook = CacheHook(None)
30
31

    @abc.abstractmethod
baberabb's avatar
baberabb committed
32
    def loglikelihood(self, requests) -> List[Tuple[float, bool]]:
33
34
35
36
        """Compute log-likelihood of generating a continuation from a context.
        Downstream tasks should attempt to use loglikelihood instead of other
        LM calls whenever possible.

baberabb's avatar
baberabb committed
37
38
39
        :param requests: list[Instance]
            A list of Instance objects, with property `args` which returns a tuple (context, continuation).
            `context: str`
40
41
                Context string. Implementations of LM must be able to handle an
                empty context string.
baberabb's avatar
baberabb committed
42
            `continuation: str`
43
44
45
                The continuation over which log likelihood will be calculated. If
                there is a word boundary, the space should be in the continuation.
                For example, context="hello" continuation=" world" is correct.
baberabb's avatar
baberabb committed
46
47

        :return: list[tuple[float, bool]]
48
            A list of pairs (logprob, isgreedy)
baberabb's avatar
baberabb committed
49
50
51
52
            `logprob: float`
                The log probability of `continuation`.
            `isgreedy`:
                Whether `continuation` would be generated by greedy sampling from `context`.
53
54
55
56
        """
        pass

    @abc.abstractmethod
57
    def loglikelihood_rolling(self, requests) -> List[Tuple[float]]:
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
        """Compute full log-likelihood of a string, with no truncation, for perplexity computation
        - We will use the full max context length of the model.
        - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
        the max context length.
        - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
          which may simply concatenate multiple documents together.
        - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
          multiple chunks, the last input will still a full-sized context.
          Example:
            Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
            Prefix: EOT
            Max context length: 4
            Resulting input/prediction pairs:

                INPUT:  EOT   0   1   2
                PRED:     0   1   2   3

                INPUT:    3   4   5   6
                PRED:     4   5   6   7

                INPUT:    5   6   7   8
                PRED:             8   9

          Observe that:
            1. Each token is predicted exactly once
            2. For the last pair, we provide the full context, but only score the last two tokens

baberabb's avatar
baberabb committed
85
        :param requests: list[Instance]
86
            A list of Instance objects with property `args` which returns a tuple (context,).
87
            string: str
88
89
90
                String for which we are computing overall loglikelihood
        :return: list[tuple[float]]
            A list of tuples (logprob,)
91
            logprob: float
92
                The log probability of `context` conditioned on the EOT token.
93
94
95
96
97
        """
        pass

    # TODO: Add an optional max length
    @abc.abstractmethod
98
    def generate_until(self, requests) -> List[str]:
99
100
        """Generate greedily until a stopping sequence

baberabb's avatar
baberabb committed
101
102
        :param requests: list[Instance]
            A list of Instance objects with property `args` which returns a tuple (context, until).
103
104
105
106
107
            context: str
                Context string
            until: [str]
                The string sequences to generate until. These string sequences
                may each span across multiple tokens, or may be part of one token.
baberabb's avatar
baberabb committed
108
        :return: list[str]
109
110
111
112
113
114
115
            A list of strings continuation
            continuation: str
                The generated continuation.
        """
        pass

    @classmethod
116
117
118
119
120
121
122
123
124
125
126
127
128
    def create_from_arg_string(
        cls: Type[T], arg_string: str, additional_config: Optional[dict] = None
    ) -> T:
        """
        Creates an instance of the LM class using the given argument string and additional config.

        Parameters:
        - arg_string: A string containing arguments in the format key1=value1,key2=value2.
        - additional_config: Optional dictionary containing additional configuration parameters.

        Returns:
        - Instance of the LM class.
        """
129
130
131
132
        additional_config = {} if additional_config is None else additional_config
        args = utils.simple_parse_args_string(arg_string)
        args2 = {k: v for k, v in additional_config.items() if v is not None}
        return cls(**args, **args2)
haileyschoelkopf's avatar
haileyschoelkopf committed
133

134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
    @classmethod
    def create_from_arg_obj(
        cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None
    ) -> T:
        """
        Creates an instance of the LM class using the given arg_obj

        Parameters:
        - arg_obj: A dict containing arguments in the format key1=value1,key2=value2.
        - additional_config: Optional dictionary containing additional configuration parameters.

        Returns:
        - Instance of the LM class.
        """

        additional_config = {} if additional_config is None else additional_config
        additional_config = {
            k: v for k, v in additional_config.items() if v is not None
        }

        return cls(**arg_dict, **additional_config)

haileyschoelkopf's avatar
haileyschoelkopf committed
156
157
158
159
160
    @property
    def rank(self):
        # used in the case of parallelism. Hardcoded to
        # ensure no errors arise using API models which do
        # not support multi-device parallelism nor expect it.
161
        return self._rank
haileyschoelkopf's avatar
haileyschoelkopf committed
162
163
164
165
166
167

    @property
    def world_size(self):
        # used in the case of parallelism. Hardcoded to
        # ensure no errors arise using API models which do
        # not support multi-device parallelism nor expect it.
168
        return self._world_size
haileyschoelkopf's avatar
haileyschoelkopf committed
169

Ethan Smith's avatar
Ethan Smith committed
170
    def set_cache_hook(self, cache_hook) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
171
172
173
174
175
176
177
178
179
180
        self.cache_hook = cache_hook


### SQLite-based caching of LM responses
def hash_args(attr, args):
    dat = json.dumps([attr] + list(args))
    return hashlib.sha256(dat.encode("utf-8")).hexdigest()


class CacheHook:
Ethan Smith's avatar
Ethan Smith committed
181
    def __init__(self, cachinglm) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
182
183
184
185
186
187
        if cachinglm is None:
            self.dbdict = None
            return

        self.dbdict = cachinglm.dbdict

Ethan Smith's avatar
Ethan Smith committed
188
    def add_partial(self, attr, req, res) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
189
190
191
192
193
194
195
        if self.dbdict is None:
            return
        hsh = hash_args(attr, req)
        self.dbdict[hsh] = res


class CachingLM:
Ethan Smith's avatar
Ethan Smith committed
196
    def __init__(self, lm, cache_db) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
        """LM wrapper that returns cached results if they exist, and uses the underlying LM if not.

        :param lm: LM
            Underlying LM
        :param cache_db: str
            Path to cache db
        """
        self.lm = lm
        self.cache_db = cache_db
        if os.path.dirname(cache_db):
            os.makedirs(os.path.dirname(cache_db), exist_ok=True)
        self.dbdict = SqliteDict(cache_db, autocommit=True)

        # add hook to lm
        lm.set_cache_hook(self.get_cache_hook())

    def __getattr__(self, attr):
        lm_attr = getattr(self.lm, attr)
        if not callable(lm_attr):
            return lm_attr

        def fn(requests):
            res = []
            remaining_reqs = []
            warned = False
            # figure out which ones are cached and which ones are new
223
224
225
            eval_logger.info(
                f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..."
            )
226
            for req in tqdm(requests, desc="Checking cached requests"):
haileyschoelkopf's avatar
haileyschoelkopf committed
227
                hsh = hash_args(attr, req.args)
228
                if attr == "generate_until" and req.args[1].get("do_sample", False):
haileyschoelkopf's avatar
haileyschoelkopf committed
229
230
231
232
                    # when we are doing non-greedy generation, don't use the cache
                    # (else every "randomly sampled" generation would be identical for repeats > 1).
                    if not warned:
                        eval_logger.warning(
233
                            f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests."
haileyschoelkopf's avatar
haileyschoelkopf committed
234
235
236
237
238
239
240
241
242
243
244
245
246
                        )
                        warned = True
                    res.append(None)
                    remaining_reqs.append(req)
                elif hsh in self.dbdict:
                    ob = self.dbdict[hsh]

                    assert ob is not None

                    res.append(ob)
                else:
                    res.append(None)
                    remaining_reqs.append(req)
247
248
249
            eval_logger.info(
                f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}"
            )
haileyschoelkopf's avatar
haileyschoelkopf committed
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
            # actually run the LM on the requests that do not have cached results
            rem_res = getattr(self.lm, attr)(remaining_reqs)

            # stick the new ones back into the list and also cache any of the new ones
            resptr = 0
            for req, r in zip(remaining_reqs, rem_res):
                while res[resptr] is not None:
                    resptr += 1

                res[resptr] = r

                # caching
                hsh = hash_args(attr, req.args)
                self.dbdict[hsh] = r
            self.dbdict.commit()

            return res

        return fn

    def get_cache_hook(self):
        return CacheHook(self)
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298


class TemplateLM(LM):
    """
    A class acting as intermediary between the LM base class
    and boilerplate often included in other LM subclasses.
    """

    @property
    @abc.abstractmethod
    def eot_token_id(self):
        pass

    @abc.abstractmethod
    def tok_encode(self, string: str, **kwargs):
        pass

    @abc.abstractmethod
    def _loglikelihood_tokens(self, requests, **kwargs):
        pass

    def _encode_pair(self, context, continuation):
        n_spaces = len(context) - len(context.rstrip())
        if n_spaces > 0:
            continuation = context[-n_spaces:] + continuation
            context = context[:-n_spaces]

299
300
        whole_enc = self.tok_encode(context + continuation)
        context_enc = self.tok_encode(context)
301
302
303
304
305
306

        context_enc_len = len(context_enc)
        continuation_enc = whole_enc[context_enc_len:]

        return context_enc, continuation_enc

307
308
309
    def loglikelihood(
        self, requests, disable_tqdm: bool = False
    ) -> List[Tuple[float, bool]]:
310
311
312
313
314
315
316
317
318
319
320
321
322
        new_reqs = []
        for context, continuation in [req.args for req in requests]:
            if context == "":
                # end of text as context
                context_enc, continuation_enc = (
                    [self.eot_token_id],
                    self.tok_encode(continuation),
                )
            else:
                context_enc, continuation_enc = self._encode_pair(context, continuation)

            new_reqs.append(((context, continuation), context_enc, continuation_enc))

323
        return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)
324
325

    @abc.abstractmethod
326
327
328
    def loglikelihood_rolling(
        self, requests, disable_tqdm: bool = False
    ) -> List[Tuple[float, bool]]:
329
330
331
        pass

    @abc.abstractmethod
332
    def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
333
        pass