utils.py 18.2 KB
Newer Older
sdtblck's avatar
sdtblck committed
1
import os
Leo Gao's avatar
Leo Gao committed
2
import re
Stephen Hogg's avatar
Stephen Hogg committed
3
import sys
4
5
6
7
8
9
import yaml
import inspect
import pathlib
import functools
import subprocess
import collections
lintangsutawika's avatar
lintangsutawika committed
10
import importlib.util
gakada's avatar
gakada committed
11
import fnmatch
12

Ethan Smith's avatar
Ethan Smith committed
13
from typing import Iterator, List, Literal, Union
14

15
import gc
16
import torch
haileyschoelkopf's avatar
haileyschoelkopf committed
17
import transformers
lintangsutawika's avatar
lintangsutawika committed
18
import numpy as np
sdtblck's avatar
sdtblck committed
19

20
from jinja2 import BaseLoader, Environment, StrictUndefined
21
from itertools import islice
sdtblck's avatar
sdtblck committed
22

23
from lm_eval.logger import eval_logger
sdtblck's avatar
sdtblck committed
24
25


26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def escaped_split(text, sep_char, maxsplit=-1):
    """Split text into a list on occurrences of the given separation
    character `sep_char`. The separation character may be escaped by a
    backslash to avoid splitting at that location.

    The separation character must be a string of size 1.

    If `maxsplit` is given, at most `maxsplit` splits are done (thus,
    the list will have at most `maxsplit + 1` elements). If `maxsplit`
    is not specified or less than 0, then there is no limit on the
    number of splits (all possible splits are made).
    """
    assert (
        len(sep_char) == 1
    ), "separation string must be a single character for escaped splitting"

    if maxsplit == 0:
        return text
    maxsplit = max(0, maxsplit)

    return re.split(r"(?<!\\)" + sep_char, text, maxsplit)


haileyschoelkopf's avatar
haileyschoelkopf committed
49
50
51
52
53
54
55
56
def handle_arg_string(arg):
    if arg.lower() == "true":
        return True
    elif arg.lower() == "false":
        return False
    return arg


Jason Phang's avatar
gpt3  
Jason Phang committed
57
58
59
60
61
62
def simple_parse_args_string(args_string):
    """
    Parses something like
        args1=val1,arg2=val2
    Into a dictionary
    """
Jason Phang's avatar
Jason Phang committed
63
    args_string = args_string.strip()
Jason Phang's avatar
gpt3  
Jason Phang committed
64
65
    if not args_string:
        return {}
66
    arg_list = [arg for arg in args_string.split(",") if arg]
haileyschoelkopf's avatar
haileyschoelkopf committed
67
68
69
    args_dict = {
        k: handle_arg_string(v) for k, v in [arg.split("=") for arg in arg_list]
    }
Jason Phang's avatar
gpt3  
Jason Phang committed
70
    return args_dict
Leo Gao's avatar
Leo Gao committed
71

Fabrizio Milo's avatar
Fabrizio Milo committed
72

Leo Gao's avatar
Leo Gao committed
73
74
def join_iters(iters):
    for iter in iters:
Leo Gao's avatar
Leo Gao committed
75
        yield from iter
Leo Gao's avatar
Leo Gao committed
76
77


Ethan Smith's avatar
Ethan Smith committed
78
def chunks(iter, n: int = 0, fn=None):
Leo Gao's avatar
Leo Gao committed
79
    arr = []
80
    for i, x in enumerate(iter):
Leo Gao's avatar
Leo Gao committed
81
        arr.append(x)
82
        if len(arr) == (fn(i, iter) if fn else n):
Leo Gao's avatar
Leo Gao committed
83
84
            yield arr
            arr = []
Fabrizio Milo's avatar
Fabrizio Milo committed
85
86
87
88

    if arr:
        yield arr

Leo Gao's avatar
Leo Gao committed
89

90
91
92
93
94
def group(arr, fn):
    res = collections.defaultdict(list)

    for ob in arr:
        res[fn(ob)].append(ob)
Fabrizio Milo's avatar
Fabrizio Milo committed
95

96
97
    return list(res.values())

Fabrizio Milo's avatar
Fabrizio Milo committed
98

gakada's avatar
gakada committed
99
class MultiChoice:
Ethan Smith's avatar
Ethan Smith committed
100
    def __init__(self, choices) -> None:
gakada's avatar
gakada committed
101
102
103
        self.choices = choices

    # Simple wildcard support (linux filename patterns)
Ethan Smith's avatar
Ethan Smith committed
104
    def __contains__(self, values) -> bool:
gakada's avatar
gakada committed
105
        for value in values.split(","):
106
107
108
109
            if len(fnmatch.filter(self.choices, value)) == 0:
                eval_logger.info(f"Available tasks to choose:")
                for choice in self.choices:
                    eval_logger.info(f"  - {choice}")
110
                raise ValueError("'{}' is not in task list".format(value))
gakada's avatar
gakada committed
111
112
        return True

Ethan Smith's avatar
Ethan Smith committed
113
    def __iter__(self) -> Iterator:
gakada's avatar
gakada committed
114
115
116
117
118
119
120
        for choice in self.choices:
            yield choice


# Returns a list containing all values of the source_list that
# match at least one of the patterns
def pattern_match(patterns, source_list):
121
122
123
    if type(patterns) == str:
        patterns = [patterns]

gakada's avatar
gakada committed
124
125
126
127
128
129
130
    task_names = set()
    for pattern in patterns:
        for matching in fnmatch.filter(source_list, pattern):
            task_names.add(matching)
    return sorted(list(task_names))


lintangsutawika's avatar
lintangsutawika committed
131
132
133
134
135
136
def softmax(x):
    """Compute softmax values for each sets of scores in x."""
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum()


Leo Gao's avatar
Leo Gao committed
137
138
139
140
def general_detokenize(string):
    string = string.replace(" n't", "n't")
    string = string.replace(" )", ")")
    string = string.replace("( ", "(")
Fabrizio Milo's avatar
Fabrizio Milo committed
141
142
    string = string.replace('" ', '"')
    string = string.replace(' "', '"')
Leo Gao's avatar
Fix  
Leo Gao committed
143
    string = re.sub(r" (['.,])", r"\1", string)
144
145
146
    return string


Jason Phang's avatar
Jason Phang committed
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):
    """
    - context_len allows for a rolling window context, allowing each prediction window to potentially
      condition on some context

    :param token_list: list
        List of tokens to be PREDICTED
    :param max_seq_len: int
        max_seq_len of model (or max_seq_len we want to use)
    :param context_len: int
        Amount of desired token context for prediction. Needs to be at least 1.
    :param prefix_token: token
        Dummy token like <eos> so the first token has something to condition on
    :return: generator
        Generator of tuples
            (input_tokens, pred_tokens)
        Note: Score only the last len(pred_tokens) logits of the LM
    """
    assert 1 <= context_len <= max_seq_len
    if not token_list:
        return
    # +1 offset, going from input->preds
    pred_len = max_seq_len - context_len + 1
    predicted = 0

    # Special handling for first window: predict all tokens
    first_seq_len = min(max_seq_len, len(token_list))
Fabrizio Milo's avatar
Fabrizio Milo committed
174
    yield ([prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len])
Jason Phang's avatar
Jason Phang committed
175
176
177
178
179
    predicted += first_seq_len

    while predicted < len(token_list):
        window_pred_len = min(len(token_list) - predicted, pred_len)
        window_end = predicted + window_pred_len
Leo Gao's avatar
Leo Gao committed
180

Jason Phang's avatar
Jason Phang committed
181
        yield (
lintangsutawika's avatar
lintangsutawika committed
182
183
            token_list[window_end - max_seq_len - 1 : window_end - 1],
            token_list[window_end - window_pred_len : window_end],
Jason Phang's avatar
Jason Phang committed
184
185
186
        )
        predicted += window_pred_len

Fabrizio Milo's avatar
Fabrizio Milo committed
187

Leo Gao's avatar
Leo Gao committed
188
def make_disjoint_window(pair):
Fabrizio Milo's avatar
Fabrizio Milo committed
189
    """Takes output from get_rolling_token_windows and makes the context not overlap with the continuation"""
Leo Gao's avatar
Leo Gao committed
190
    a, b = pair
191
    return a[: len(a) - (len(b) - 1)], b
Fabrizio Milo's avatar
Fabrizio Milo committed
192

Jason Phang's avatar
Jason Phang committed
193

194
class Reorderer:
Ethan Smith's avatar
Ethan Smith committed
195
    def __init__(self, arr, fn) -> None:
196
197
198
        self.size = len(arr)
        arr = list(enumerate(arr))
        arr = group(arr, lambda x: fn(x[1]))
199
200
201
        # arr = [([y[0] for y in x], x[0][1]) for x in arr]
        # TODO: overhaul reorderer. It currently grouped requests by content but we don't want this
        arr = [([y[0]], x[0][1]) for x in arr for y in x]
202
203
204
        arr.sort(key=lambda x: fn(x[1]))

        self.arr = arr
Fabrizio Milo's avatar
Fabrizio Milo committed
205

206
207
    def get_reordered(self):
        return [x[1] for x in self.arr]
Fabrizio Milo's avatar
Fabrizio Milo committed
208

209
210
211
212
213
    def get_original(self, newarr):
        res = [None] * self.size
        cov = [False] * self.size

        for (inds, _), v in zip(self.arr, newarr):
Fabrizio Milo's avatar
Fabrizio Milo committed
214
            for ind in inds:
215
216
                res[ind] = v
                cov[ind] = True
Fabrizio Milo's avatar
Fabrizio Milo committed
217

218
        assert all(cov)
Fabrizio Milo's avatar
Fabrizio Milo committed
219

220
221
        return res

Fabrizio Milo's avatar
Fabrizio Milo committed
222

haileyschoelkopf's avatar
haileyschoelkopf committed
223
224
225
226
227
228
229
class Grouper:
    """
    takes an array `arr` and function `fn` and returns a dictionary
    with keys fn(ob) for each ob in `arr` and with values `self.arr[key]` a list of all
    objects in `arr` satisfying `key == fn(ob)`.
    """

Ethan Smith's avatar
Ethan Smith committed
230
    def __init__(self, arr, fn) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
        # self.orig_arr = arr
        self.size = len(arr)
        arr = list(enumerate(arr))

        def group_return_dict(arr, fn):
            res = collections.defaultdict(list)

            for ob in arr:
                res[fn(ob)].append(ob)
            return res

        arr = group_return_dict(arr, lambda x: fn(x[1]))

        # self.arr has format Dict[Tuple[int, <entry from orig. arr>]]
        self.arr = arr
        self._grouped = None

    def get_grouped(self):
        # return the contents but not indices for our grouped dict.
        if self._grouped:
            return self._grouped
        grouped = {}
        for key in self.arr.keys():
            # drop the index from each element of self.arr
            grouped[key] = [y[1] for y in self.arr[key]]
        self._grouped = grouped
        return grouped

    def get_original(self, grouped_dict):
        # take in a grouped dictionary with e.g. results for each key listed
        # in the same order as the instances in `self.arr`, and
        # return the results in the same (single list) order as `self.orig_arr`.
        res = [None] * self.size
        cov = [False] * self.size
        # orig = [None] * self.size

        assert grouped_dict.keys() == self.arr.keys()

        for key in grouped_dict.keys():
            for (ind, _), v in zip(self.arr[key], grouped_dict[key]):
                res[ind] = v
                cov[ind] = True
                # orig[ind] = _

        assert all(cov)
        # assert orig == self.orig_arr

        return res


Ethan Smith's avatar
Ethan Smith committed
281
def make_table(result_dict, column: str = "results"):
282
283
284
    """Generate table of results."""
    from pytablewriter import MarkdownTableWriter, LatexTableWriter

lintangsutawika's avatar
lintangsutawika committed
285
    if column == "results":
lintangsutawika's avatar
lintangsutawika committed
286
287
288
        column_name = "Tasks"
    elif column == "groups":
        column_name = "Groups"
lintangsutawika's avatar
lintangsutawika committed
289

290
291
    md_writer = MarkdownTableWriter()
    latex_writer = LatexTableWriter()
lintangsutawika's avatar
lintangsutawika committed
292
293
294
295
296
297
298
299
300
    md_writer.headers = [
        column_name,
        "Version",
        "Filter",
        "Metric",
        "Value",
        "",
        "Stderr",
    ]
lintangsutawika's avatar
lintangsutawika committed
301
    latex_writer.headers = [
lintangsutawika's avatar
lintangsutawika committed
302
        column_name,
lintangsutawika's avatar
lintangsutawika committed
303
304
305
306
307
308
309
        "Version",
        "Filter",
        "Metric",
        "Value",
        "",
        "Stderr",
    ]
310
311
312

    values = []

lintangsutawika's avatar
lintangsutawika committed
313
    for k, dic in result_dict[column].items():
314
        version = result_dict["versions"][k]
315
316
        for (mf), v in dic.items():
            m, _, f = mf.partition(",")
317
318
319
            if m.endswith("_stderr"):
                continue

320
321
            if m + "_stderr" + "," + f in dic:
                se = dic[m + "_stderr" + "," + f]
322
                values.append([k, version, f, m, "%.4f" % v, "±", "%.4f" % se])
323
            else:
324
                values.append([k, version, f, m, "%.4f" % v, "", ""])
325
326
327
328
329
330
331
332
333
334
335
            k = ""
            version = ""
    md_writer.value_matrix = values
    latex_writer.value_matrix = values

    # todo: make latex table look good
    # print(latex_writer.dumps())

    return md_writer.dumps()


336
337
def positional_deprecated(fn):
    """
Fabrizio Milo's avatar
Fabrizio Milo committed
338
    A decorator to nudge users into passing only keyword args (`kwargs`) to the
339
340
    wrapped function, `fn`.
    """
Fabrizio Milo's avatar
Fabrizio Milo committed
341

342
343
    @functools.wraps(fn)
    def _wrapper(*args, **kwargs):
Fabrizio Milo's avatar
Fabrizio Milo committed
344
345
346
        if len(args) != 1 if inspect.ismethod(fn) else 0:
            print(
                f"WARNING: using {fn.__name__} with positional arguments is "
347
                "deprecated and will be disallowed in a future version of "
Fabrizio Milo's avatar
Fabrizio Milo committed
348
349
                "lm-evaluation-harness!"
            )
350
        return fn(*args, **kwargs)
Fabrizio Milo's avatar
Fabrizio Milo committed
351

352
    return _wrapper
Stephen Hogg's avatar
Stephen Hogg committed
353

Fabrizio Milo's avatar
Fabrizio Milo committed
354

Stephen Hogg's avatar
Stephen Hogg committed
355
356
357
358
359
360
361
362
363
@positional_deprecated
def find_test_root(start_path: pathlib.Path) -> pathlib.Path:
    """
    Search upward in the directory tree to a maximum of three layers
    to find and return the package root (containing the 'tests' folder)
    """
    cur_path = start_path.resolve()
    max_layers = 3
    for _ in range(max_layers):
Fabrizio Milo's avatar
Fabrizio Milo committed
364
        if (cur_path / "tests" / "test_version_stable.py").exists():
Stephen Hogg's avatar
Stephen Hogg committed
365
366
367
            return cur_path
        else:
            cur_path = cur_path.parent.resolve()
Fabrizio Milo's avatar
Fabrizio Milo committed
368
369
370
371
    raise FileNotFoundError(
        f"Unable to find package root within {max_layers} upwards" + f"of {start_path}"
    )

Stephen Hogg's avatar
Stephen Hogg committed
372
373

@positional_deprecated
374
def run_task_tests(task_list: List[str]):
Stephen Hogg's avatar
Stephen Hogg committed
375
376
377
    """
    Find the package root and run the tests for the given tasks
    """
jon-tow's avatar
jon-tow committed
378
379
    import pytest

380
    package_root = find_test_root(start_path=pathlib.Path(__file__))
Fabrizio Milo's avatar
Fabrizio Milo committed
381
382
383
384
385
386
387
    task_string = " or ".join(task_list)
    args = [
        f"{package_root}/tests/test_version_stable.py",
        f"--rootdir={package_root}",
        "-k",
        f"{task_string}",
    ]
Stephen Hogg's avatar
Stephen Hogg committed
388
389
390
    sys.path.append(str(package_root))
    pytest_return_val = pytest.main(args)
    if pytest_return_val:
Fabrizio Milo's avatar
Fabrizio Milo committed
391
392
393
        raise ValueError(
            f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}"
        )
394
395


396
397
398
399
400
401
def get_git_commit_hash():
    """
    Gets the git commit hash of your current repo (if it exists).
    Source: https://github.com/EleutherAI/gpt-neox/blob/b608043be541602170bfcfb8ec9bf85e8a0799e0/megatron/neox_arguments/neox_args.py#L42
    """
    try:
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
402
        git_hash = subprocess.check_output(["git", "describe", "--always"]).strip()
403
        git_hash = git_hash.decode()
404
405
    except subprocess.CalledProcessError or FileNotFoundError:
        # FileNotFoundError occurs when git not installed on system
406
407
408
409
        git_hash = None
    return git_hash


lintangsutawika's avatar
lintangsutawika committed
410
411
412
413
def import_function(loader, node):
    function_name = loader.construct_scalar(node)
    yaml_path = os.path.dirname(loader.name)

lintangsutawika's avatar
lintangsutawika committed
414
415
416
417
    *module_name, function_name = function_name.split(".")
    if type(module_name) == list:
        module_name = ".".join(module_name)
    module_path = os.path.normpath(os.path.join(yaml_path, "{}.py".format(module_name)))
lintangsutawika's avatar
lintangsutawika committed
418
419
420
421
422
423
424
425

    spec = importlib.util.spec_from_file_location(module_name, module_path)
    module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(module)

    function = getattr(module, function_name)
    return function

lintangsutawika's avatar
lintangsutawika committed
426

lintangsutawika's avatar
lintangsutawika committed
427
# Add the import_function constructor to the YAML loader
lintangsutawika's avatar
lintangsutawika committed
428
yaml.add_constructor("!function", import_function)
lintangsutawika's avatar
lintangsutawika committed
429
430


431
432
433
434
435
def load_yaml_config(yaml_path=None, yaml_config=None, yaml_dir=None):

    if yaml_config is None:
        with open(yaml_path, "rb") as file:
            yaml_config = yaml.full_load(file)
lintangsutawika's avatar
lintangsutawika committed
436

lintangsutawika's avatar
lintangsutawika committed
437
438
    if yaml_dir is None:
        yaml_dir = os.path.dirname(yaml_path)
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469

    assert yaml_dir is not None

    if "include" in yaml_config:
        include_path = yaml_config["include"]
        del yaml_config["include"]

        if type(include_path) == str:
            include_path = [include_path]

        # Load from the last one first
        include_path.reverse()
        final_yaml_config = {}
        for path in include_path:

            # Assumes that path is a full path.
            # If not found, assume the included yaml
            # is in the same dir as the original yaml
            if not os.path.isfile(path):
                path = os.path.join(yaml_dir, path)

            try:
                included_yaml_config = load_yaml_config(path)
                final_yaml_config.update(included_yaml_config)
            except Exception as ex:
                # If failed to load, ignore
                raise ex

        final_yaml_config.update(yaml_config)
        return final_yaml_config
    return yaml_config
lintangsutawika's avatar
lintangsutawika committed
470
471


Ethan Smith's avatar
Ethan Smith committed
472
def regex_replace(string, pattern, repl, count: int = 0):
473
474
    """Implements the `re.sub` function as a custom Jinja filter."""
    return re.sub(pattern, repl, string, count=count)
lintangsutawika's avatar
lintangsutawika committed
475

lintangsutawika's avatar
lintangsutawika committed
476

477
env = Environment(loader=BaseLoader, undefined=StrictUndefined)
478
env.filters["regex_replace"] = regex_replace
479
480


baberabb's avatar
baberabb committed
481
def apply_template(template: str, doc: dict) -> str:
482
483
    rtemplate = env.from_string(template)
    return rtemplate.render(**doc)
484
485


486
487
488
489
def create_iterator(raw_iterator, rank, world_size, limit=None):
    """
    Method for creating a (potentially) sliced and limited
    iterator from a raw document iterator. Used for splitting data
490
491
492
    among ranks in multigpu setting or only pulling a sample of documents
    """
    return islice(raw_iterator, rank, limit, world_size)
493
494


haileyschoelkopf's avatar
haileyschoelkopf committed
495
496
497
498
499
def pad_and_concat(
    max_length: int,
    tensors: List[torch.Tensor],
    padding_side: Literal["right", "left"] = "right",
):
haileyschoelkopf's avatar
haileyschoelkopf committed
500
501
502
503
    """
    Method for padding a list of tensors given the maximum tensor
    length in the batch. Used for batching inputs and continuations in
    seq2seq models.
lintangsutawika's avatar
lintangsutawika committed
504
    """
haileyschoelkopf's avatar
haileyschoelkopf committed
505
506
507
    assert (
        padding_side == "left" or padding_side == "right"
    ), f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'"
haileyschoelkopf's avatar
haileyschoelkopf committed
508

lintangsutawika's avatar
lintangsutawika committed
509
    for i, tensor in enumerate(tensors):
510
511
        if len(tensor.shape) == 2:
            tensor = tensor.squeeze(0)  # squeeze, in case passed [1, seq] size
lintangsutawika's avatar
lintangsutawika committed
512
513
        tensor_len = tensor.shape[0]
        if tensor_len < max_length:
haileyschoelkopf's avatar
haileyschoelkopf committed
514
515
516
            if padding_side == "right":
                # right-pad
                tensors[i] = torch.cat(
haileyschoelkopf's avatar
haileyschoelkopf committed
517
518
519
520
521
522
523
524
525
526
                    [
                        tensor,  # [seq]
                        torch.zeros(
                            max_length - tensor_len,
                            dtype=torch.long,
                            device=tensor.device,
                        ),  # [padding_length - seq]
                    ],
                    dim=0,
                ).unsqueeze(0)
haileyschoelkopf's avatar
haileyschoelkopf committed
527
528
529
530
            else:
                # left-pad
                tensors[i] = torch.cat(
                    [
531
                        torch.zeros(
haileyschoelkopf's avatar
haileyschoelkopf committed
532
                            max_length - tensor_len,
533
534
                            dtype=torch.long,
                            device=tensor.device,
haileyschoelkopf's avatar
haileyschoelkopf committed
535
                        ),  # [padding_length - seq]
haileyschoelkopf's avatar
haileyschoelkopf committed
536
                        tensor,  # [seq]
haileyschoelkopf's avatar
haileyschoelkopf committed
537
538
539
                    ],
                    dim=0,
                ).unsqueeze(0)
lintangsutawika's avatar
lintangsutawika committed
540
541
542
        else:
            tensors[i] = tensor.unsqueeze(0)

haileyschoelkopf's avatar
haileyschoelkopf committed
543
    return torch.cat(tensors, dim=0)
haileyschoelkopf's avatar
haileyschoelkopf committed
544
545


Ethan Smith's avatar
Ethan Smith committed
546
def clear_torch_cache() -> None:
547
548
    gc.collect()
    torch.cuda.empty_cache()
haileyschoelkopf's avatar
haileyschoelkopf committed
549
550


lintangsutawika's avatar
lintangsutawika committed
551
552
553
554
555
556
557
558
559
560
def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype:
    """Converts `dtype` from `str` to torch.dtype when possible. Does not use an instantiated HF AutoConfig"""
    if isinstance(dtype, str) and dtype != "auto":
        # Convert `str` args torch dtype: `float16` -> `torch.float16`
        _torch_dtype = getattr(torch, dtype)
    else:
        _torch_dtype = dtype
    return _torch_dtype


haileyschoelkopf's avatar
haileyschoelkopf committed
561
# Multi-token stopping criteria
haileyschoelkopf's avatar
haileyschoelkopf committed
562
563
564
565
566
567
568
569
570
class MultiTokenEOSCriteria(transformers.StoppingCriteria):
    """Criteria to stop on the specified multi-token sequence."""

    def __init__(
        self,
        sequence: str,
        tokenizer: transformers.PreTrainedTokenizer,
        initial_decoder_input_length: int,
        batch_size: int,
Ethan Smith's avatar
Ethan Smith committed
571
    ) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
        self.initial_decoder_input_length = initial_decoder_input_length
        self.done_tracker = [False] * batch_size
        self.sequence = sequence
        self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
        self.sequence_id_len = len(self.sequence_ids)
        self.tokenizer = tokenizer

    def __call__(self, input_ids, scores, **kwargs) -> bool:
        # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence
        lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :][
            :, -self.sequence_id_len :
        ]

        lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)

        for i, done in enumerate(self.done_tracker):
            if not done:
                self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]
        return False not in self.done_tracker


def stop_sequences_criteria(
    tokenizer: transformers.PreTrainedTokenizer,
    stop_sequences: List[str],
    initial_decoder_input_length: int,
    batch_size: int,
) -> transformers.StoppingCriteriaList:
    return transformers.StoppingCriteriaList(
        [
            *[
                MultiTokenEOSCriteria(
                    sequence, tokenizer, initial_decoder_input_length, batch_size
                )
                for sequence in stop_sequences
            ],
        ]
    )