utils.py 18 KB
Newer Older
sdtblck's avatar
sdtblck committed
1
import os
Leo Gao's avatar
Leo Gao committed
2
import re
Stephen Hogg's avatar
Stephen Hogg committed
3
import sys
4
5
6
7
8
9
import yaml
import inspect
import pathlib
import functools
import subprocess
import collections
lintangsutawika's avatar
lintangsutawika committed
10
import importlib.util
gakada's avatar
gakada committed
11
import fnmatch
12

Ethan Smith's avatar
Ethan Smith committed
13
from typing import Iterator, List, Literal, Union
14

15
import gc
16
import torch
haileyschoelkopf's avatar
haileyschoelkopf committed
17
import transformers
sdtblck's avatar
sdtblck committed
18

19
from jinja2 import BaseLoader, Environment, StrictUndefined
20
from itertools import islice
sdtblck's avatar
sdtblck committed
21

22
from lm_eval.logger import eval_logger
sdtblck's avatar
sdtblck committed
23
24


25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
def escaped_split(text, sep_char, maxsplit=-1):
    """Split text into a list on occurrences of the given separation
    character `sep_char`. The separation character may be escaped by a
    backslash to avoid splitting at that location.

    The separation character must be a string of size 1.

    If `maxsplit` is given, at most `maxsplit` splits are done (thus,
    the list will have at most `maxsplit + 1` elements). If `maxsplit`
    is not specified or less than 0, then there is no limit on the
    number of splits (all possible splits are made).
    """
    assert (
        len(sep_char) == 1
    ), "separation string must be a single character for escaped splitting"

    if maxsplit == 0:
        return text
    maxsplit = max(0, maxsplit)

    return re.split(r"(?<!\\)" + sep_char, text, maxsplit)


haileyschoelkopf's avatar
haileyschoelkopf committed
48
49
50
51
52
53
54
55
def handle_arg_string(arg):
    if arg.lower() == "true":
        return True
    elif arg.lower() == "false":
        return False
    return arg


Jason Phang's avatar
gpt3  
Jason Phang committed
56
57
58
59
60
61
def simple_parse_args_string(args_string):
    """
    Parses something like
        args1=val1,arg2=val2
    Into a dictionary
    """
Jason Phang's avatar
Jason Phang committed
62
    args_string = args_string.strip()
Jason Phang's avatar
gpt3  
Jason Phang committed
63
64
    if not args_string:
        return {}
65
    arg_list = [arg for arg in args_string.split(",") if arg]
haileyschoelkopf's avatar
haileyschoelkopf committed
66
    args_dict = {k: handle_arg_string(v) for k, v in [arg.split("=") for arg in arg_list]}
Jason Phang's avatar
gpt3  
Jason Phang committed
67
    return args_dict
Leo Gao's avatar
Leo Gao committed
68

Fabrizio Milo's avatar
Fabrizio Milo committed
69

Leo Gao's avatar
Leo Gao committed
70
71
def join_iters(iters):
    for iter in iters:
Leo Gao's avatar
Leo Gao committed
72
        yield from iter
Leo Gao's avatar
Leo Gao committed
73
74


Ethan Smith's avatar
Ethan Smith committed
75
def chunks(iter, n: int = 0, fn=None):
Leo Gao's avatar
Leo Gao committed
76
    arr = []
77
    for i, x in enumerate(iter):
Leo Gao's avatar
Leo Gao committed
78
        arr.append(x)
79
        if len(arr) == (fn(i) if fn else n):
Leo Gao's avatar
Leo Gao committed
80
81
            yield arr
            arr = []
Fabrizio Milo's avatar
Fabrizio Milo committed
82
83
84
85

    if arr:
        yield arr

Leo Gao's avatar
Leo Gao committed
86

87
88
89
90
91
def group(arr, fn):
    res = collections.defaultdict(list)

    for ob in arr:
        res[fn(ob)].append(ob)
Fabrizio Milo's avatar
Fabrizio Milo committed
92

93
94
    return list(res.values())

Fabrizio Milo's avatar
Fabrizio Milo committed
95

gakada's avatar
gakada committed
96
class MultiChoice:
Ethan Smith's avatar
Ethan Smith committed
97
    def __init__(self, choices) -> None:
gakada's avatar
gakada committed
98
99
100
        self.choices = choices

    # Simple wildcard support (linux filename patterns)
Ethan Smith's avatar
Ethan Smith committed
101
    def __contains__(self, values) -> bool:
gakada's avatar
gakada committed
102
        for value in values.split(","):
103
104
105
106
            if len(fnmatch.filter(self.choices, value)) == 0:
                eval_logger.info(f"Available tasks to choose:")
                for choice in self.choices:
                    eval_logger.info(f"  - {choice}")
107
                raise ValueError("'{}' is not in task list".format(value))
gakada's avatar
gakada committed
108
109
        return True

Ethan Smith's avatar
Ethan Smith committed
110
    def __iter__(self) -> Iterator:
gakada's avatar
gakada committed
111
112
113
114
115
116
117
        for choice in self.choices:
            yield choice


# Returns a list containing all values of the source_list that
# match at least one of the patterns
def pattern_match(patterns, source_list):
118
119
120
    if type(patterns) == str:
        patterns = [patterns]

gakada's avatar
gakada committed
121
122
123
124
125
126
127
    task_names = set()
    for pattern in patterns:
        for matching in fnmatch.filter(source_list, pattern):
            task_names.add(matching)
    return sorted(list(task_names))


Leo Gao's avatar
Leo Gao committed
128
129
130
131
def general_detokenize(string):
    string = string.replace(" n't", "n't")
    string = string.replace(" )", ")")
    string = string.replace("( ", "(")
Fabrizio Milo's avatar
Fabrizio Milo committed
132
133
    string = string.replace('" ', '"')
    string = string.replace(' "', '"')
Leo Gao's avatar
Fix  
Leo Gao committed
134
    string = re.sub(r" (['.,])", r"\1", string)
135
136
137
    return string


Jason Phang's avatar
Jason Phang committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):
    """
    - context_len allows for a rolling window context, allowing each prediction window to potentially
      condition on some context

    :param token_list: list
        List of tokens to be PREDICTED
    :param max_seq_len: int
        max_seq_len of model (or max_seq_len we want to use)
    :param context_len: int
        Amount of desired token context for prediction. Needs to be at least 1.
    :param prefix_token: token
        Dummy token like <eos> so the first token has something to condition on
    :return: generator
        Generator of tuples
            (input_tokens, pred_tokens)
        Note: Score only the last len(pred_tokens) logits of the LM
    """
    assert 1 <= context_len <= max_seq_len
    if not token_list:
        return
    # +1 offset, going from input->preds
    pred_len = max_seq_len - context_len + 1
    predicted = 0

    # Special handling for first window: predict all tokens
    first_seq_len = min(max_seq_len, len(token_list))
Fabrizio Milo's avatar
Fabrizio Milo committed
165
    yield ([prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len])
Jason Phang's avatar
Jason Phang committed
166
167
168
169
170
    predicted += first_seq_len

    while predicted < len(token_list):
        window_pred_len = min(len(token_list) - predicted, pred_len)
        window_end = predicted + window_pred_len
Leo Gao's avatar
Leo Gao committed
171

Jason Phang's avatar
Jason Phang committed
172
        yield (
lintangsutawika's avatar
lintangsutawika committed
173
174
            token_list[window_end - max_seq_len - 1 : window_end - 1],
            token_list[window_end - window_pred_len : window_end],
Jason Phang's avatar
Jason Phang committed
175
176
177
        )
        predicted += window_pred_len

Fabrizio Milo's avatar
Fabrizio Milo committed
178

Leo Gao's avatar
Leo Gao committed
179
def make_disjoint_window(pair):
Fabrizio Milo's avatar
Fabrizio Milo committed
180
    """Takes output from get_rolling_token_windows and makes the context not overlap with the continuation"""
Leo Gao's avatar
Leo Gao committed
181
    a, b = pair
182
    return a[: len(a) - (len(b) - 1)], b
Fabrizio Milo's avatar
Fabrizio Milo committed
183

Jason Phang's avatar
Jason Phang committed
184

185
class Reorderer:
Ethan Smith's avatar
Ethan Smith committed
186
    def __init__(self, arr, fn) -> None:
187
188
189
        self.size = len(arr)
        arr = list(enumerate(arr))
        arr = group(arr, lambda x: fn(x[1]))
190
191
192
        # arr = [([y[0] for y in x], x[0][1]) for x in arr]
        # TODO: overhaul reorderer. It currently grouped requests by content but we don't want this
        arr = [([y[0]], x[0][1]) for x in arr for y in x]
193
194
195
        arr.sort(key=lambda x: fn(x[1]))

        self.arr = arr
Fabrizio Milo's avatar
Fabrizio Milo committed
196

197
198
    def get_reordered(self):
        return [x[1] for x in self.arr]
Fabrizio Milo's avatar
Fabrizio Milo committed
199

200
201
202
203
204
    def get_original(self, newarr):
        res = [None] * self.size
        cov = [False] * self.size

        for (inds, _), v in zip(self.arr, newarr):
Fabrizio Milo's avatar
Fabrizio Milo committed
205
            for ind in inds:
206
207
                res[ind] = v
                cov[ind] = True
Fabrizio Milo's avatar
Fabrizio Milo committed
208

209
        assert all(cov)
Fabrizio Milo's avatar
Fabrizio Milo committed
210

211
212
        return res

Fabrizio Milo's avatar
Fabrizio Milo committed
213

haileyschoelkopf's avatar
haileyschoelkopf committed
214
215
216
217
218
219
220
class Grouper:
    """
    takes an array `arr` and function `fn` and returns a dictionary
    with keys fn(ob) for each ob in `arr` and with values `self.arr[key]` a list of all
    objects in `arr` satisfying `key == fn(ob)`.
    """

Ethan Smith's avatar
Ethan Smith committed
221
    def __init__(self, arr, fn) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
        # self.orig_arr = arr
        self.size = len(arr)
        arr = list(enumerate(arr))

        def group_return_dict(arr, fn):
            res = collections.defaultdict(list)

            for ob in arr:
                res[fn(ob)].append(ob)
            return res

        arr = group_return_dict(arr, lambda x: fn(x[1]))

        # self.arr has format Dict[Tuple[int, <entry from orig. arr>]]
        self.arr = arr
        self._grouped = None

    def get_grouped(self):
        # return the contents but not indices for our grouped dict.
        if self._grouped:
            return self._grouped
        grouped = {}
        for key in self.arr.keys():
            # drop the index from each element of self.arr
            grouped[key] = [y[1] for y in self.arr[key]]
        self._grouped = grouped
        return grouped

    def get_original(self, grouped_dict):
        # take in a grouped dictionary with e.g. results for each key listed
        # in the same order as the instances in `self.arr`, and
        # return the results in the same (single list) order as `self.orig_arr`.
        res = [None] * self.size
        cov = [False] * self.size
        # orig = [None] * self.size

        assert grouped_dict.keys() == self.arr.keys()

        for key in grouped_dict.keys():
            for (ind, _), v in zip(self.arr[key], grouped_dict[key]):
                res[ind] = v
                cov[ind] = True
                # orig[ind] = _

        assert all(cov)
        # assert orig == self.orig_arr

        return res


Ethan Smith's avatar
Ethan Smith committed
272
def make_table(result_dict, column: str = "results"):
273
274
275
    """Generate table of results."""
    from pytablewriter import MarkdownTableWriter, LatexTableWriter

lintangsutawika's avatar
lintangsutawika committed
276
    if column == "results":
lintangsutawika's avatar
lintangsutawika committed
277
278
279
        column_name = "Tasks"
    elif column == "groups":
        column_name = "Groups"
lintangsutawika's avatar
lintangsutawika committed
280

281
282
    md_writer = MarkdownTableWriter()
    latex_writer = LatexTableWriter()
lintangsutawika's avatar
lintangsutawika committed
283
284
285
286
287
288
289
290
291
    md_writer.headers = [
        column_name,
        "Version",
        "Filter",
        "Metric",
        "Value",
        "",
        "Stderr",
    ]
lintangsutawika's avatar
lintangsutawika committed
292
    latex_writer.headers = [
lintangsutawika's avatar
lintangsutawika committed
293
        column_name,
lintangsutawika's avatar
lintangsutawika committed
294
295
296
297
298
299
300
        "Version",
        "Filter",
        "Metric",
        "Value",
        "",
        "Stderr",
    ]
301
302
303

    values = []

lintangsutawika's avatar
lintangsutawika committed
304
    for k, dic in result_dict[column].items():
305
        version = result_dict["versions"][k]
306
307
        for (mf), v in dic.items():
            m, _, f = mf.partition(",")
308
309
310
            if m.endswith("_stderr"):
                continue

311
312
            if m + "_stderr" + "," + f in dic:
                se = dic[m + "_stderr" + "," + f]
313
                values.append([k, version, f, m, "%.4f" % v, "±", "%.4f" % se])
314
            else:
315
                values.append([k, version, f, m, "%.4f" % v, "", ""])
316
317
318
319
320
321
322
323
324
325
326
            k = ""
            version = ""
    md_writer.value_matrix = values
    latex_writer.value_matrix = values

    # todo: make latex table look good
    # print(latex_writer.dumps())

    return md_writer.dumps()


327
328
def positional_deprecated(fn):
    """
Fabrizio Milo's avatar
Fabrizio Milo committed
329
    A decorator to nudge users into passing only keyword args (`kwargs`) to the
330
331
    wrapped function, `fn`.
    """
Fabrizio Milo's avatar
Fabrizio Milo committed
332

333
334
    @functools.wraps(fn)
    def _wrapper(*args, **kwargs):
Fabrizio Milo's avatar
Fabrizio Milo committed
335
336
337
        if len(args) != 1 if inspect.ismethod(fn) else 0:
            print(
                f"WARNING: using {fn.__name__} with positional arguments is "
338
                "deprecated and will be disallowed in a future version of "
Fabrizio Milo's avatar
Fabrizio Milo committed
339
340
                "lm-evaluation-harness!"
            )
341
        return fn(*args, **kwargs)
Fabrizio Milo's avatar
Fabrizio Milo committed
342

343
    return _wrapper
Stephen Hogg's avatar
Stephen Hogg committed
344

Fabrizio Milo's avatar
Fabrizio Milo committed
345

Stephen Hogg's avatar
Stephen Hogg committed
346
347
348
349
350
351
352
353
354
@positional_deprecated
def find_test_root(start_path: pathlib.Path) -> pathlib.Path:
    """
    Search upward in the directory tree to a maximum of three layers
    to find and return the package root (containing the 'tests' folder)
    """
    cur_path = start_path.resolve()
    max_layers = 3
    for _ in range(max_layers):
Fabrizio Milo's avatar
Fabrizio Milo committed
355
        if (cur_path / "tests" / "test_version_stable.py").exists():
Stephen Hogg's avatar
Stephen Hogg committed
356
357
358
            return cur_path
        else:
            cur_path = cur_path.parent.resolve()
Fabrizio Milo's avatar
Fabrizio Milo committed
359
360
361
362
    raise FileNotFoundError(
        f"Unable to find package root within {max_layers} upwards" + f"of {start_path}"
    )

Stephen Hogg's avatar
Stephen Hogg committed
363
364

@positional_deprecated
365
def run_task_tests(task_list: List[str]):
Stephen Hogg's avatar
Stephen Hogg committed
366
367
368
    """
    Find the package root and run the tests for the given tasks
    """
jon-tow's avatar
jon-tow committed
369
370
    import pytest

371
    package_root = find_test_root(start_path=pathlib.Path(__file__))
Fabrizio Milo's avatar
Fabrizio Milo committed
372
373
374
375
376
377
378
    task_string = " or ".join(task_list)
    args = [
        f"{package_root}/tests/test_version_stable.py",
        f"--rootdir={package_root}",
        "-k",
        f"{task_string}",
    ]
Stephen Hogg's avatar
Stephen Hogg committed
379
380
381
    sys.path.append(str(package_root))
    pytest_return_val = pytest.main(args)
    if pytest_return_val:
Fabrizio Milo's avatar
Fabrizio Milo committed
382
383
384
        raise ValueError(
            f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}"
        )
385
386


387
388
389
390
391
392
def get_git_commit_hash():
    """
    Gets the git commit hash of your current repo (if it exists).
    Source: https://github.com/EleutherAI/gpt-neox/blob/b608043be541602170bfcfb8ec9bf85e8a0799e0/megatron/neox_arguments/neox_args.py#L42
    """
    try:
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
393
        git_hash = subprocess.check_output(["git", "describe", "--always"]).strip()
394
        git_hash = git_hash.decode()
395
396
    except subprocess.CalledProcessError or FileNotFoundError:
        # FileNotFoundError occurs when git not installed on system
397
398
399
400
        git_hash = None
    return git_hash


lintangsutawika's avatar
lintangsutawika committed
401
402
403
404
def import_function(loader, node):
    function_name = loader.construct_scalar(node)
    yaml_path = os.path.dirname(loader.name)

lintangsutawika's avatar
lintangsutawika committed
405
406
407
408
    *module_name, function_name = function_name.split(".")
    if type(module_name) == list:
        module_name = ".".join(module_name)
    module_path = os.path.normpath(os.path.join(yaml_path, "{}.py".format(module_name)))
lintangsutawika's avatar
lintangsutawika committed
409
410
411
412
413
414
415
416

    spec = importlib.util.spec_from_file_location(module_name, module_path)
    module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(module)

    function = getattr(module, function_name)
    return function

lintangsutawika's avatar
lintangsutawika committed
417

lintangsutawika's avatar
lintangsutawika committed
418
# Add the import_function constructor to the YAML loader
lintangsutawika's avatar
lintangsutawika committed
419
yaml.add_constructor("!function", import_function)
lintangsutawika's avatar
lintangsutawika committed
420
421
422


def load_yaml_config(yaml_path):
lintangsutawika's avatar
lintangsutawika committed
423
    with open(yaml_path, "rb") as file:
lintangsutawika's avatar
lintangsutawika committed
424
425
        yaml_config = yaml.full_load(file)
        yaml_dir = os.path.dirname(yaml_path)
lintangsutawika's avatar
lintangsutawika committed
426
427
428
429

        if "include" in yaml_config:
            include_path = yaml_config["include"]
            del yaml_config["include"]
lintangsutawika's avatar
lintangsutawika committed
430
431
432

            if type(include_path) == str:
                include_path = [include_path]
lintangsutawika's avatar
lintangsutawika committed
433

lintangsutawika's avatar
lintangsutawika committed
434
435
436
437
438
            # Load from the last one first
            include_path.reverse()
            final_yaml_config = {}
            for path in include_path:
                # Assumes that path is a full path.
lintangsutawika's avatar
lintangsutawika committed
439
                # If not found, assume the included yaml
lintangsutawika's avatar
lintangsutawika committed
440
441
                # is in the same dir as the original yaml
                if not os.path.isfile(path):
lintangsutawika's avatar
update  
lintangsutawika committed
442
                    path = os.path.normpath(os.path.join(yaml_dir, path))
lintangsutawika's avatar
lintangsutawika committed
443
444
445
                try:
                    included_yaml_config = load_yaml_config(path)
                    final_yaml_config.update(included_yaml_config)
lintangsutawika's avatar
lintangsutawika committed
446
                except Exception as ex:
lintangsutawika's avatar
lintangsutawika committed
447
                    # If failed to load, ignore
lintangsutawika's avatar
lintangsutawika committed
448
                    raise ex
lintangsutawika's avatar
lintangsutawika committed
449
450
451
452
453
454

            final_yaml_config.update(yaml_config)
            return final_yaml_config
        return yaml_config


Ethan Smith's avatar
Ethan Smith committed
455
def regex_replace(string, pattern, repl, count: int = 0):
456
457
    """Implements the `re.sub` function as a custom Jinja filter."""
    return re.sub(pattern, repl, string, count=count)
lintangsutawika's avatar
lintangsutawika committed
458

lintangsutawika's avatar
lintangsutawika committed
459

460
env = Environment(loader=BaseLoader, undefined=StrictUndefined)
461
env.filters["regex_replace"] = regex_replace
462
463


baberabb's avatar
baberabb committed
464
def apply_template(template: str, doc: dict) -> str:
465
466
    rtemplate = env.from_string(template)
    return rtemplate.render(**doc)
467
468


469
470
471
472
def create_iterator(raw_iterator, rank, world_size, limit=None):
    """
    Method for creating a (potentially) sliced and limited
    iterator from a raw document iterator. Used for splitting data
473
474
475
    among ranks in multigpu setting or only pulling a sample of documents
    """
    return islice(raw_iterator, rank, limit, world_size)
476
477


haileyschoelkopf's avatar
haileyschoelkopf committed
478
479
480
481
482
def pad_and_concat(
    max_length: int,
    tensors: List[torch.Tensor],
    padding_side: Literal["right", "left"] = "right",
):
haileyschoelkopf's avatar
haileyschoelkopf committed
483
484
485
486
    """
    Method for padding a list of tensors given the maximum tensor
    length in the batch. Used for batching inputs and continuations in
    seq2seq models.
lintangsutawika's avatar
lintangsutawika committed
487
    """
haileyschoelkopf's avatar
haileyschoelkopf committed
488
489
490
    assert (
        padding_side == "left" or padding_side == "right"
    ), f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'"
haileyschoelkopf's avatar
haileyschoelkopf committed
491

lintangsutawika's avatar
lintangsutawika committed
492
    for i, tensor in enumerate(tensors):
493
494
        if len(tensor.shape) == 2:
            tensor = tensor.squeeze(0)  # squeeze, in case passed [1, seq] size
lintangsutawika's avatar
lintangsutawika committed
495
496
        tensor_len = tensor.shape[0]
        if tensor_len < max_length:
haileyschoelkopf's avatar
haileyschoelkopf committed
497
498
499
            if padding_side == "right":
                # right-pad
                tensors[i] = torch.cat(
haileyschoelkopf's avatar
haileyschoelkopf committed
500
501
502
503
504
505
506
507
508
509
                    [
                        tensor,  # [seq]
                        torch.zeros(
                            max_length - tensor_len,
                            dtype=torch.long,
                            device=tensor.device,
                        ),  # [padding_length - seq]
                    ],
                    dim=0,
                ).unsqueeze(0)
haileyschoelkopf's avatar
haileyschoelkopf committed
510
511
512
513
            else:
                # left-pad
                tensors[i] = torch.cat(
                    [
514
                        torch.zeros(
haileyschoelkopf's avatar
haileyschoelkopf committed
515
                            max_length - tensor_len,
516
517
                            dtype=torch.long,
                            device=tensor.device,
haileyschoelkopf's avatar
haileyschoelkopf committed
518
                        ),  # [padding_length - seq]
haileyschoelkopf's avatar
haileyschoelkopf committed
519
                        tensor,  # [seq]
haileyschoelkopf's avatar
haileyschoelkopf committed
520
521
522
                    ],
                    dim=0,
                ).unsqueeze(0)
lintangsutawika's avatar
lintangsutawika committed
523
524
525
        else:
            tensors[i] = tensor.unsqueeze(0)

haileyschoelkopf's avatar
haileyschoelkopf committed
526
    return torch.cat(tensors, dim=0)
haileyschoelkopf's avatar
haileyschoelkopf committed
527
528


Ethan Smith's avatar
Ethan Smith committed
529
def clear_torch_cache() -> None:
530
531
    gc.collect()
    torch.cuda.empty_cache()
haileyschoelkopf's avatar
haileyschoelkopf committed
532
533


lintangsutawika's avatar
lintangsutawika committed
534
535
536
537
538
539
540
541
542
543
def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype:
    """Converts `dtype` from `str` to torch.dtype when possible. Does not use an instantiated HF AutoConfig"""
    if isinstance(dtype, str) and dtype != "auto":
        # Convert `str` args torch dtype: `float16` -> `torch.float16`
        _torch_dtype = getattr(torch, dtype)
    else:
        _torch_dtype = dtype
    return _torch_dtype


haileyschoelkopf's avatar
haileyschoelkopf committed
544
# Multi-token stopping criteria
haileyschoelkopf's avatar
haileyschoelkopf committed
545
546
547
548
549
550
551
552
553
class MultiTokenEOSCriteria(transformers.StoppingCriteria):
    """Criteria to stop on the specified multi-token sequence."""

    def __init__(
        self,
        sequence: str,
        tokenizer: transformers.PreTrainedTokenizer,
        initial_decoder_input_length: int,
        batch_size: int,
Ethan Smith's avatar
Ethan Smith committed
554
    ) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
        self.initial_decoder_input_length = initial_decoder_input_length
        self.done_tracker = [False] * batch_size
        self.sequence = sequence
        self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
        self.sequence_id_len = len(self.sequence_ids)
        self.tokenizer = tokenizer

    def __call__(self, input_ids, scores, **kwargs) -> bool:
        # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence
        lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :][
            :, -self.sequence_id_len :
        ]

        lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)

        for i, done in enumerate(self.done_tracker):
            if not done:
                self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]
        return False not in self.done_tracker


def stop_sequences_criteria(
    tokenizer: transformers.PreTrainedTokenizer,
    stop_sequences: List[str],
    initial_decoder_input_length: int,
    batch_size: int,
) -> transformers.StoppingCriteriaList:
    return transformers.StoppingCriteriaList(
        [
            *[
                MultiTokenEOSCriteria(
                    sequence, tokenizer, initial_decoder_input_length, batch_size
                )
                for sequence in stop_sequences
            ],
        ]
    )