huggingface.py 40 KB
Newer Older
1
2
import os

3
4
import torch
import transformers
5
6
7
8
from transformers.models.auto.modeling_auto import (
    MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
    MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
)
9
from peft import __version__ as PEFT_VERSION, PeftModel
10
11

import copy
12
from collections import defaultdict
13
from tqdm import tqdm
14
from pathlib import Path
15
16
17
18

import torch.nn.functional as F

from lm_eval import utils
baberabb's avatar
baberabb committed
19
from lm_eval.api.instance import Instance
20
21
22
23
24
from lm_eval.api.model import LM
from lm_eval.api.registry import register_model

from lm_eval.utils import MultiTokenEOSCriteria, stop_sequences_criteria

25
from accelerate import Accelerator, find_executable_batch_size, DistributedType
baberabb's avatar
baberabb committed
26
from typing import List, Optional, Union, Tuple
27

28
eval_logger = utils.eval_logger
29

lintangsutawika's avatar
lintangsutawika committed
30

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def _get_accelerate_args(
    device_map_option: Optional[str] = "auto",
    max_memory_per_gpu: Optional[Union[int, str]] = None,
    max_cpu_memory: Optional[Union[int, str]] = None,
    offload_folder: Optional[str] = "./offload",
) -> dict:
    """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
    max_memory = {}
    if max_memory_per_gpu is not None:
        max_memory_per_gpu_map = {
            device_idx: max_memory_per_gpu
            for device_idx in range(torch.cuda.device_count())
        }
        max_memory.update(max_memory_per_gpu_map)
    if max_cpu_memory is not None:
        max_memory["cpu"] = max_cpu_memory

    args = {}
    if max_memory:
        args["max_memory"] = max_memory
    args["device_map"] = device_map_option
    args["offload_folder"] = offload_folder
    return args
54
55


56
@register_model("hf-auto", "hf", "huggingface")
57
class HFLM(LM):
58
59
60
61
62
63
64
    """
    An abstracted Huggingface model class. Enables usage with both models of
    `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes.

    Supports data-parallel multi-GPU with HF Accelerate.
    """

65
    AUTO_MODEL_CLASS = None
66
    _DEFAULT_MAX_LENGTH = 2048
haileyschoelkopf's avatar
haileyschoelkopf committed
67

68
69
    def __init__(
        self,
70
71
72
73
        pretrained: Optional[str] = "gpt2",
        revision: Optional[str] = "main",
        subfolder: Optional[str] = None,
        tokenizer: Optional[str] = None,
lintangsutawika's avatar
lintangsutawika committed
74
        truncation: Optional[bool] = False,
75
76
        max_length: Optional[int] = None,
        device: Optional[str] = "cuda",
77
        dtype: Optional[Union[str, torch.dtype]] = "auto",
Benjamin Fattori's avatar
Benjamin Fattori committed
78
79
        batch_size: Optional[Union[int, str]] = 1,
        max_batch_size: Optional[int] = 64,
80
81
        low_cpu_mem_usage: Optional[bool] = True,
        trust_remote_code: Optional[bool] = False,
haileyschoelkopf's avatar
haileyschoelkopf committed
82
        use_fast_tokenizer: Optional[bool] = True,
lintangsutawika's avatar
lintangsutawika committed
83
        cache_dir: Optional[Union[str, os.PathLike]] = None,
84
        # arguments used for splitting a model across GPUs naively.
85
86
        # only used if `parallelize=True`.
        parallelize: Optional[bool] = False,
87
88
89
90
        device_map_option: Optional[str] = "auto",
        max_memory_per_gpu: Optional[Union[int, str]] = None,
        max_cpu_memory: Optional[Union[int, str]] = None,
        offload_folder: Optional[str] = "./offload",
91
92
93
94
95
96
97
98
        # PEFT and quantization options
        peft: Optional[str] = None,
        load_in_8bit: Optional[bool] = False,
        load_in_4bit: Optional[bool] = False,
        bnb_4bit_quant_type: Optional[str] = None,
        bnb_4bit_compute_dtype: Optional[Union[str, torch.dtype]] = None,
        gptq: Optional[Union[bool, str]] = False,
        gptq_use_triton: Optional[bool] = False,
Ethan Smith's avatar
Ethan Smith committed
99
    ) -> None:
100
101
102
103
        super().__init__()

        assert isinstance(device, str)
        assert isinstance(pretrained, str)
Benjamin Fattori's avatar
Benjamin Fattori committed
104
        assert isinstance(batch_size, (int, str))
105
106

        gpus = torch.cuda.device_count()
107
        accelerator = Accelerator()
haileyschoelkopf's avatar
haileyschoelkopf committed
108

109
        if not (parallelize or accelerator.num_processes > 1):
110
            # use user-passed device
111
            device_list = set(
112
                ["cuda", "cpu"]
113
                + [f"cuda:{i}" for i in range(torch.cuda.device_count())]
114
                + ["mps", "mps:0"]
115
            )
116
            if device:
117
                if device not in device_list:
118
119
120
                    device = int(device)
                self._device = torch.device(device)
                eval_logger.info(f"Using device '{device}'")
121
                if device in ("mps", "mps:0") and "dev" not in torch.__version__:
122
                    eval_logger.info(
123
124
125
                        "MPS: Setting dtype to float32. To use float16 with MPS, please install a nightly build of "
                        "PyTorch: pip3 install --pre torch torchvision torchaudio --index-url "
                        "https://download.pytorch.org/whl/nightly/cpu"
126
                    )
127
128
129
130
131
132
133
134
            else:
                eval_logger.info("Device not specified")
                eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}")
                self._device = (
                    torch.device("cuda")
                    if torch.cuda.is_available()
                    else torch.device("cpu")
                )
135
        else:
136
137
138
139
            if device != "cuda":
                eval_logger.info(
                    f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model."
                )
140
            # TODO: include in warning that `load_in_8bit` etc. affect this too
141
142
143
            self._device = device

        model_kwargs = {}
144
        if parallelize:
145
146
147
148
149
150
            model_kwargs = _get_accelerate_args(
                device_map_option,
                max_memory_per_gpu,
                max_cpu_memory,
                offload_folder,
            )
151
152
153
154
155
156
157

        # TODO: update this to be less of a hack once subfolder is fixed in HF
        revision = revision + ("/" + subfolder if subfolder is not None else "")

        self._config = transformers.AutoConfig.from_pretrained(
            pretrained,
            revision=revision,
158
            trust_remote_code=trust_remote_code,
159
160
161
162
        )

        if getattr(self._config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
            self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
163
164
165
166
167
168
169
170
171
172
173
174
        elif (
            not getattr(self._config, "model_type")
            in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
        ):
            if not trust_remote_code:
                eval_logger.warning(
                    "HF model type is neither marked as CausalLM or Seq2SeqLM. \
                This is expected if your model requires `trust_remote_code=True` but may be an error otherwise."
                )
            # if model type is neither in HF transformers causal or seq2seq model registries
            # then we default to AutoModelForCausalLM
            self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
175
        else:
haileyschoelkopf's avatar
haileyschoelkopf committed
176
            self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
177

haileyschoelkopf's avatar
haileyschoelkopf committed
178
179
180
181
        assert self.AUTO_MODEL_CLASS in [
            transformers.AutoModelForCausalLM,
            transformers.AutoModelForSeq2SeqLM,
        ]
182

183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        if not gptq:
            if load_in_4bit:
                assert (
                    transformers.__version__ >= "4.30.0"
                ), "load_in_4bit requires transformers >= 4.30.0"
            if transformers.__version__ >= "4.30.0":
                model_kwargs["load_in_4bit"] = load_in_4bit
                if load_in_4bit:
                    if bnb_4bit_quant_type:
                        model_kwargs["bnb_4bit_quant_type"] = bnb_4bit_quant_type
                    if bnb_4bit_compute_dtype:
                        model_kwargs["bnb_4bit_compute_dtype"] = utils.get_dtype(
                            bnb_4bit_compute_dtype
                        )
            self._model = self.AUTO_MODEL_CLASS.from_pretrained(
                pretrained,
                revision=revision,
                torch_dtype=utils.get_dtype(dtype),
                low_cpu_mem_usage=low_cpu_mem_usage,
                trust_remote_code=trust_remote_code,
                load_in_8bit=load_in_8bit,
                **model_kwargs,
            )
        else:
gk's avatar
gk committed
207
208
209
210
211
212
213
            try:
                from auto_gptq import AutoGPTQForCausalLM
            except ModuleNotFoundError:
                raise Exception(
                    "Tried to load auto_gptq, but auto-gptq is not installed ",
                    "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]",
                )
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232

            self._model = AutoGPTQForCausalLM.from_quantized(
                pretrained,
                model_basename=None if gptq is True else Path(gptq).stem,
                low_cpu_mem_usage=low_cpu_mem_usage,
                trust_remote_code=trust_remote_code,
                use_safetensors=True if gptq is True else gptq.endswith(".safetensors"),
                use_triton=gptq_use_triton,
                warmup_triton=gptq_use_triton,
                **model_kwargs,
            )

        if peft:
            if load_in_4bit:
                assert PEFT_VERSION >= "0.4.0", "load_in_4bit requires peft >= 0.4.0"
            self._model = PeftModel.from_pretrained(
                self._model, peft, revision=revision
            )

233
        # forever after, access self._model through self.model property
234
        self.model.eval()
235
236
237
        self.model.tie_weights()
        if gpus <= 1 and not parallelize:
            # place model onto device, if not using HF Accelerate in any form
238
239
240
241
242
243
            try:
                self.model.to(self.device)
            except ValueError:
                eval_logger.info(
                    "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes`. If the desired GPU is being used, this message is safe to ignore."
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
244

245
246
247
        self.tokenizer = transformers.AutoTokenizer.from_pretrained(
            pretrained if tokenizer is None else tokenizer,
            revision=revision,
248
            trust_remote_code=trust_remote_code,
haileyschoelkopf's avatar
haileyschoelkopf committed
249
            use_fast=use_fast_tokenizer,
250
251
        )

lintangsutawika's avatar
lintangsutawika committed
252
253
        self.truncation = truncation

254
        self.vocab_size = self.tokenizer.vocab_size
haileyschoelkopf's avatar
haileyschoelkopf committed
255
        self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
256

257
258
        self._max_length = max_length

Benjamin Fattori's avatar
Benjamin Fattori committed
259
260
261
262
263
264
265
266
267
268
        self.batch_schedule = 1
        self.batch_sizes = {}
        self.max_batch_size = max_batch_size

        if str(batch_size).startswith("auto"):
            batch_size = batch_size.split(":")
            self.batch_size_per_gpu = batch_size[0]
            self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1
        else:
            self.batch_size_per_gpu = int(batch_size)
269
270
271
272
273
274
275
276
277
278
279

        # multigpu data-parallel support when launched with accelerate
        if gpus > 1:
            if parallelize:
                if accelerator.num_processes > 1:
                    raise RuntimeError(
                        "Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher."
                    )
                else:
                    pass
            elif gpus > accelerator.num_processes:
280
                # TODO: make sure there's still never an edge case where we unintentionally default to CPU
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
                eval_logger.warning(
                    "WARNING: The number of total system GPUs does not match the number of spawned processes. "
                    "If you would like to use data parallelism, please launch the script "
                    "with 'accelerate launch *script*'. "
                    f"Current run will proceed with {accelerator.num_processes} devices."
                )
                self._rank = accelerator.local_process_index
                self._world_size = accelerator.num_processes
                # manually set model to use gpu, for case where many GPUs available but
                # only seek to use one
                self._device = (
                    torch.device(f"cuda:{accelerator.local_process_index}")
                    if torch.cuda.is_available()
                    else torch.device("cpu")
                )
296
297
298
299
300
301
                try:
                    self.model.to(self.device)
                except ValueError:
                    eval_logger.info(
                        "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes`. If the desired GPU is being used, this message is safe to ignore."
                    )
302
            else:
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
303
                assert accelerator.distributed_type in [
lintangsutawika's avatar
lintangsutawika committed
304
305
                    DistributedType.FSDP,
                    DistributedType.MULTI_GPU,
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
306
                ], "Unsupported distributed type provided. Only DDP and FSDP are supported."
307
                if accelerator.distributed_type == DistributedType.FSDP:
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
308
                    self._model = accelerator.prepare(self.model)
309
310
                else:
                    self._model = accelerator.prepare_model(
lintangsutawika's avatar
lintangsutawika committed
311
                        self.model, evaluation_mode=True
312
                    )
313
314
315
316
317
318
319
320
                self._device = torch.device(f"cuda:{accelerator.local_process_index}")
                self.accelerator = accelerator

                if self.accelerator.is_local_main_process:
                    eval_logger.info(f"Using {gpus} devices with data parallelism")

                self._rank = self.accelerator.local_process_index
                self._world_size = self.accelerator.num_processes
haileyschoelkopf's avatar
haileyschoelkopf committed
321

322
323
324
325
326
    @property
    def config(self):
        # return the associated transformers.AutoConfig for the given pretrained model.
        return self._config

327
328
329
330
331
332
333
334
    @property
    def model(self):
        # returns the model, unwrapping it if using Accelerate
        if hasattr(self, "accelerator"):
            return self.accelerator.unwrap_model(self._model)
        else:
            return self._model

335
336
337
338
339
340
341
    @property
    def eot_token_id(self):
        # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
        return self.tokenizer.eos_token_id

    @property
    def max_length(self):
342
343
344
345
346
347
348
349
350
351
352
        if self._max_length:  # if max length manually set, return it
            return self._max_length
        seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
        for attr in seqlen_config_attrs:
            if hasattr(self.model.config, attr):
                return getattr(self.model.config, attr)
        if hasattr(self.tokenizer, "model_max_length"):
            if self.tokenizer.model_max_length == 1000000000000000019884624838656:
                return self._DEFAULT_MAX_LENGTH
            return self.tokenizer.model_max_length
        return self._DEFAULT_MAX_LENGTH
353

354
    @property
Ethan Smith's avatar
Ethan Smith committed
355
    def max_gen_toks(self) -> int:
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
        return 256

    @property
    def batch_size(self):
        return self.batch_size_per_gpu

    @property
    def device(self):
        return self._device

    @property
    def rank(self):
        return self._rank

    @property
    def world_size(self):
        return self._world_size

Ethan Smith's avatar
Ethan Smith committed
374
    def _detect_batch_size(self, requests=None, pos: int = 0):
Benjamin Fattori's avatar
Benjamin Fattori committed
375
376
377
378
379
        if requests:
            _, context_enc, continuation_enc = requests[pos]
            max_length = len(
                (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
            )
380
381
            max_context_enc = len(context_enc[-(self.max_length + 1) :])
            max_cont_enc = len(continuation_enc[-(self.max_length + 1) :])
Benjamin Fattori's avatar
Benjamin Fattori committed
382
383
        else:
            max_length = self.max_length
lintangsutawika's avatar
lintangsutawika committed
384

Benjamin Fattori's avatar
Benjamin Fattori committed
385
386
387
        # if OOM, then halves batch_size and tries again
        @find_executable_batch_size(starting_batch_size=self.max_batch_size)
        def forward_batch(batch_size):
388
389
            if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
                length = max(max_context_enc, max_cont_enc)
lintangsutawika's avatar
lintangsutawika committed
390
391
392
                batched_conts = torch.ones(
                    (batch_size, length), device=self.device
                ).long()
393
394
                test_batch = torch.ones((batch_size, length), device=self.device).long()
                call_kwargs = {
lintangsutawika's avatar
lintangsutawika committed
395
396
397
                    "attn_mask": test_batch,
                    "labels": batched_conts,
                }
398
399
            else:
                call_kwargs = {}
lintangsutawika's avatar
lintangsutawika committed
400
401
402
                test_batch = torch.ones(
                    (batch_size, max_length), device=self.device
                ).long()
Benjamin Fattori's avatar
Benjamin Fattori committed
403
            for _ in range(5):
404
                out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)
lintangsutawika's avatar
lintangsutawika committed
405
406
                out = out  # Identity process so that it passes pre-commit

Benjamin Fattori's avatar
Benjamin Fattori committed
407
408
409
410
            return batch_size

        batch_size = forward_batch()

411
412
413
414
415
416
417
418
419
420
421
        if self.world_size > 1:
            # if multi-GPU, always take minimum over all selected batch sizes
            max_rnk_bs = torch.tensor([batch_size], device=self.device)
            gathered = (
                self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist()
            )
            batch_size = min(gathered)
            utils.clear_torch_cache()
            return batch_size

        utils.clear_torch_cache()
Benjamin Fattori's avatar
Benjamin Fattori committed
422
423
        return batch_size

baberabb's avatar
baberabb committed
424
425
426
    def tok_encode(
        self, string: str, left_truncate_len=None, add_special_tokens=None
    ) -> List[int]:
haileyschoelkopf's avatar
haileyschoelkopf committed
427
        """ """
428
429
430
431
432
        if add_special_tokens is None:
            if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
                add_special_tokens = False
            elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
                add_special_tokens = True
433
434

        encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
haileyschoelkopf's avatar
haileyschoelkopf committed
435

436
437
438
        # left-truncate the encoded context to be at most `left_truncate_len` tokens long
        if left_truncate_len:
            encoding = encoding[-left_truncate_len:]
haileyschoelkopf's avatar
haileyschoelkopf committed
439

440
441
        return encoding

haileyschoelkopf's avatar
haileyschoelkopf committed
442
    def tok_batch_encode(
lintangsutawika's avatar
lintangsutawika committed
443
444
        self,
        strings: List[str],
lintangsutawika's avatar
lintangsutawika committed
445
        padding_side: str = "left",
446
447
        left_truncate_len: int = None,
        truncation: bool = False,
baberabb's avatar
baberabb committed
448
    ) -> Tuple[List[int], List[int]]:
haileyschoelkopf's avatar
haileyschoelkopf committed
449
450
451
452
453
454
455
456
457
458
459
        # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
        old_padding_side = self.tokenizer.padding_side
        self.tokenizer.padding_side = padding_side

        if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
            add_special_tokens = False
        elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
            add_special_tokens = True

        encoding = self.tokenizer(
            strings,
lintangsutawika's avatar
lintangsutawika committed
460
            truncation=truncation,
haileyschoelkopf's avatar
haileyschoelkopf committed
461
462
463
464
465
466
467
468
469
470
471
472
473
            padding="longest",
            return_tensors="pt",
            add_special_tokens=add_special_tokens,
        )
        if left_truncate_len:
            encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
            encoding["attention_mask"] = encoding["attention_mask"][
                :, -left_truncate_len:
            ]
        self.tokenizer.padding_side = old_padding_side

        return encoding["input_ids"], encoding["attention_mask"]

474
475
476
477
478
479
480
481
    def tok_decode(self, tokens):
        if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
            return self.tokenizer.decode(tokens)
        elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
            return self.tokenizer.decode(tokens, skip_special_tokens=True)

    def _model_call(self, inps, attn_mask=None, labels=None):
        """
haileyschoelkopf's avatar
haileyschoelkopf committed
482
        :param inps: torch.Tensor
483
484
485
486
487
488
489
490
491
492
493
494
495
            A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape
            [batch, sequence_ctx]. the size of sequence may vary from call to call
        :param attn_mask: torch.Tensor, optional
            A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
            (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
        :param labels: torch.Tensor, optional
            A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
            (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
        :return
            A torch tensor of shape [batch, sequence, vocab] with the
        logits returned from the model's decoder
        """
        with torch.no_grad():
496
497
            if attn_mask is not None or labels is not None:
                assert attn_mask is not None and labels is not None
498
                assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM
haileyschoelkopf's avatar
haileyschoelkopf committed
499
500
501
                return self.model(
                    input_ids=inps, attention_mask=attn_mask, labels=labels
                ).logits
502
503
504
505
506
507
508
509
510
511
512
513
514
            else:
                assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
                return self.model(inps).logits

    def _model_generate(self, context, max_length, stop, **generation_kwargs):
        # we require users to pass do_sample=True explicitly
        # for non-greedy gen. This should be reevaluated when considering beam search.
        if "do_sample" not in generation_kwargs.keys():
            generation_kwargs["do_sample"] = False
        # build stopping criteria
        stopping_criteria = stop_sequences_criteria(
            self.tokenizer, stop, 1, context.shape[0]
        )
515
        return self.model.generate(
516
            input_ids=context,
517
518
519
520
521
522
            max_length=max_length,
            stopping_criteria=stopping_criteria,
            pad_token_id=self.eot_token_id,
            use_cache=True,
            **generation_kwargs,
        )
523
524
525

    def _select_cont_toks(self, logits, contlen=None, inplen=None):
        if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
haileyschoelkopf's avatar
haileyschoelkopf committed
526
527
528
            assert (
                contlen and inplen
            ), "Must pass input len and cont. len to select scored logits for causal LM"
529
530
531
532
            # discard right-padding.
            # also discard the input/context tokens. we'll only score continuations.
            logits = logits[inplen - contlen : inplen]
        elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
haileyschoelkopf's avatar
haileyschoelkopf committed
533
534
535
536
            assert (
                contlen and not inplen
            ), "Selecting scored logits for Seq2SeqLM requires only cont. len"
            # only discard right-padding.
537
            # the logits input to this fn only contain decoder-side tokens.
haileyschoelkopf's avatar
haileyschoelkopf committed
538
539
            logits = logits[:contlen]

540
541
        return logits

baberabb's avatar
baberabb committed
542
543
544
    def _encode_pair(
        self, context: str, continuation: str
    ) -> Tuple[List[int], List[int]]:
545
546
547
548
        n_spaces = len(context) - len(context.rstrip())
        if n_spaces > 0:
            continuation = context[-n_spaces:] + continuation
            context = context[:-n_spaces]
549
550
551
552
553
554

        whole_enc = self.tok_encode(context + continuation, add_special_tokens=False)
        context_enc = self.tok_encode(context, add_special_tokens=False)

        # whole_enc = self.tok_encode(context + continuation)
        # context_enc = self.tok_encode(context, add_special_tokens=False)
555
556
557
558
        context_enc_len = len(context_enc)
        continuation_enc = whole_enc[context_enc_len:]
        return context_enc, continuation_enc

baberabb's avatar
baberabb committed
559
    def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
560
561
562
563
        new_reqs = []
        for context, continuation in [req.args for req in requests]:
            if context == "":
                # end of text as context
564
565
566
                context_enc, continuation_enc = [self.eot_token_id], self.tok_encode(
                    continuation
                )
567
            else:
568
                context_enc, continuation_enc = self._encode_pair(context, continuation)
569
570
571
572
573

            new_reqs.append(((context, continuation), context_enc, continuation_enc))

        return self._loglikelihood_tokens(new_reqs)

baberabb's avatar
baberabb committed
574
    def loglikelihood_rolling(self, requests: List[Instance]) -> List[float]:
575
        loglikelihoods = []
Benjamin Fattori's avatar
Benjamin Fattori committed
576
577
578
579
580
581
582
583
584

        adaptive_batch_size = None
        if self.batch_size == "auto":
            # using rolling window with maximum context
            print("Passed argument batch_size = auto. Detecting largest batch size")
            batch_size = self._detect_batch_size()
            print(f"Determined Largest batch size: {batch_size}")
            adaptive_batch_size = batch_size

585
586
587
588
589
590
        for (string,) in tqdm([req.args for req in requests], disable=(self.rank != 0)):
            rolling_token_windows = list(
                map(
                    utils.make_disjoint_window,
                    utils.get_rolling_token_windows(
                        token_list=self.tok_encode(string),
haileyschoelkopf's avatar
haileyschoelkopf committed
591
                        prefix_token=self.eot_token_id,
592
593
594
595
596
                        max_seq_len=self.max_length,
                        context_len=1,
                    ),
                )
            )
haileyschoelkopf's avatar
haileyschoelkopf committed
597
598

            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
            rolling_token_windows = [(None,) + x for x in rolling_token_windows]

            pad_amnt = 0
            if self.world_size > 1:
                # We pad out the external document-level iterator so the inner iterator doesn't hang
                mytensor = torch.tensor(len(rolling_token_windows), device=self.device)
                gathered = (
                    self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
                )

                pad_amnt = max(gathered) - gathered[self.rank]
                if pad_amnt > 0:
                    rolling_token_windows += pad_amnt * [rolling_token_windows[0]]

            string_nll = self._loglikelihood_tokens(
lintangsutawika's avatar
lintangsutawika committed
614
615
616
                rolling_token_windows,
                disable_tqdm=True,
                override_bs=adaptive_batch_size,
617
618
619
620
621
622
623
624
625
626
627
628
            )

            if (self.world_size > 1) and (pad_amnt > 0):
                string_nll = [x[0] for x in string_nll[:-pad_amnt]]
            else:
                # discard is_greedy
                string_nll = [x[0] for x in string_nll]

            string_nll = sum(string_nll)
            loglikelihoods.append(string_nll)

        return loglikelihoods
Zhiwei Zhuang's avatar
Zhiwei Zhuang committed
629

630
631
632
633
634
635
636
637
638
639
640
641
642
    def _batch_scheduler(self, pos, n_reordered_requests):
        sched = pos // int(len(n_reordered_requests) / self.batch_schedule)
        if sched in self.batch_sizes:
            return self.batch_sizes[sched]
        if (len(self.batch_sizes) > 1) and (
            self.batch_sizes[sched - 1] == self.max_batch_size
        ):
            # if previous batch size is already maximal, skip recomputation
            self.batch_sizes[sched] = self.max_batch_size
            return self.batch_sizes[sched]
        print(
            f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size"
        )
Zhiwei Zhuang's avatar
Zhiwei Zhuang committed
643
        self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos)
644
645
        print(f"Determined largest batch size: {self.batch_sizes[sched]}")
        return self.batch_sizes[sched]
646

Ethan Smith's avatar
Ethan Smith committed
647
    def _loglikelihood_tokens(
baberabb's avatar
baberabb committed
648
649
650
651
652
        self,
        requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
        disable_tqdm: bool = False,
        override_bs: int = None,
    ) -> List[Tuple[float, bool]]:
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
        # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
        res = []

        def _collate(x):
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end

            toks = x[1] + x[2]
            return -len(toks), tuple(toks)

        re_ord = utils.Reorderer(requests, _collate)
Benjamin Fattori's avatar
Benjamin Fattori committed
668
669
670
671

        n_reordered_requests = len(re_ord.get_reordered())
        # automatic (variable) batch size detection for vectorization
        # pull longest context sample from request
lintangsutawika's avatar
lintangsutawika committed
672

673
674
        chunks = utils.chunks(
            re_ord.get_reordered(),
675
676
677
678
679
680
681
682
683
684
            n=self.batch_size
            if self.batch_size != "auto"
            else override_bs
            if override_bs is not None
            else 0,
            fn=self._batch_scheduler
            if self.batch_size == "auto"
            and n_reordered_requests > 0
            and not override_bs
            else None,
685
686
        )

haileyschoelkopf's avatar
haileyschoelkopf committed
687
        pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
haileyschoelkopf's avatar
haileyschoelkopf committed
688
        for chunk in chunks:
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
            inps = []
            cont_toks_list = []
            inplens = []

            conts = []
            encoder_attns = []

            padding_len_inp = None
            padding_len_cont = None
            # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
            # tensors, then we pack them together into a batch, call the model, and then pick it all apart
            # again because vectorizing is annoying

            for _, context_enc, continuation_enc in chunk:
                # sanity check
                assert len(context_enc) > 0
                assert len(continuation_enc) > 0
                assert len(continuation_enc) <= self.max_length

haileyschoelkopf's avatar
haileyschoelkopf committed
708
                # how this all works (illustrated on a causal decoder-only setup):
709
710
711
712
713
714
715
716
717
718
719
                #          CTX      CONT
                # inp    0 1 2 3|4 5 6 7 8 9   <- last token is deleted by inp[:, :-1]
                # model  \               \
                # logits   1 2 3|4 5 6 7 8 9   <- the ctx half gets tossed out by the
                # cont_toks      4 5 6 7 8 9      [:, -len(continuation_enc):, :self.vocab_size] slice

                # when too long to fit in context, truncate from the left
                if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
                    inp = torch.tensor(
                        (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
                        dtype=torch.long,
720
721
                        device=self.device,
                    )
722
723
724
725
726
                    (inplen,) = inp.shape
                elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
                    inp = torch.tensor(
                        (context_enc)[-self.max_length :],
                        dtype=torch.long,
haileyschoelkopf's avatar
haileyschoelkopf committed
727
                        device=self.device,
728
                    )
729
                    (inplen,) = inp.shape
730
731
732
733

                    # build encoder attn masks
                    encoder_attns.append(torch.ones_like(inp))

734
                    cont = torch.tensor(
haileyschoelkopf's avatar
haileyschoelkopf committed
735
                        (continuation_enc)[-self.max_length :],
736
737
                        # TODO: left-shift these?
                        # TODO: our code assumes we never end up truncating conts for either model type
738
                        dtype=torch.long,
739
740
                        device=self.device,
                    )
741
742
                    (contlen,) = cont.shape

743
744
                    conts.append(cont)

haileyschoelkopf's avatar
haileyschoelkopf committed
745
746
747
748
749
                    padding_len_cont = (
                        max(padding_len_cont, contlen)
                        if padding_len_cont is not None
                        else contlen
                    )
750

haileyschoelkopf's avatar
haileyschoelkopf committed
751
752
753
754
755
                padding_len_inp = (
                    max(padding_len_inp, inplen)
                    if padding_len_inp is not None
                    else inplen
                )
756
757
758
759

                inps.append(inp)  # [1, inp_length]
                cont_toks_list.append(continuation_enc)
                inplens.append(inplen)
haileyschoelkopf's avatar
haileyschoelkopf committed
760

761
762
763
            # create encoder attn mask and batched conts, if seq2seq
            call_kwargs = {}
            if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
haileyschoelkopf's avatar
haileyschoelkopf committed
764
765
766
                batched_inps = utils.pad_and_concat(
                    padding_len_inp, inps, padding_side="right"
                )  # [batch, padding_len_inp]
767
768
            elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
                # TODO: left-pad encoder inps and mask?
haileyschoelkopf's avatar
haileyschoelkopf committed
769
770
771
772
773
774
775
776
777
778
779
780
781
                batched_inps = utils.pad_and_concat(
                    padding_len_inp, inps
                )  # [batch, padding_len_inp]
                batched_conts = utils.pad_and_concat(
                    padding_len_cont, conts
                )  # [batch, padding_len_cont]
                batched_encoder_mask = utils.pad_and_concat(
                    padding_len_inp, encoder_attns
                )  # [batch, padding_len_inp]
                call_kwargs = {
                    "attn_mask": batched_encoder_mask,
                    "labels": batched_conts,
                }
782
783
784

            multi_logits = F.log_softmax(
                self._model_call(batched_inps, **call_kwargs), dim=-1
785
            )  # [batch, padding_length (inp or cont), vocab]
786
787
788
789
790
791

            for (cache_key, _, _), logits, inplen, cont_toks in zip(
                chunk, multi_logits, inplens, cont_toks_list
            ):
                # Slice to original seq length
                contlen = len(cont_toks)
haileyschoelkopf's avatar
haileyschoelkopf committed
792
                # take only logits in the continuation
793
                # (discard context toks if decoder-only ; discard right-padding)
794
795
                # also discards + checks for "virtual tokens" in the causal LM's input window
                # from prompt/prefix tuning tokens, if applicable
haileyschoelkopf's avatar
haileyschoelkopf committed
796
                ctx_len = (
797
                    inplen + (logits.shape[0] - padding_len_inp)
haileyschoelkopf's avatar
haileyschoelkopf committed
798
799
800
                    if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
                    else None
                )
801
                logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
haileyschoelkopf's avatar
haileyschoelkopf committed
802
                logits = logits.unsqueeze(0)  # [1, seq, vocab]
803
804
805

                # Check if per-token argmax is exactly equal to continuation
                greedy_tokens = logits.argmax(dim=-1)
806
807
808
                cont_toks = torch.tensor(
                    cont_toks, dtype=torch.long, device=self.device
                ).unsqueeze(
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
                    0
                )  # [1, seq]
                max_equal = (greedy_tokens == cont_toks).all()

                # Obtain log-probs at the corresponding continuation token indices
                # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
                logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
                    -1
                )  # [1, seq]

                # Answer: (log prob, is-exact-match)
                answer = (float(logits.sum()), bool(max_equal))

                res.append(answer)

haileyschoelkopf's avatar
haileyschoelkopf committed
824
                self.cache_hook.add_partial("loglikelihood", cache_key, answer)
haileyschoelkopf's avatar
haileyschoelkopf committed
825
826
827
                pbar.update(1)

        pbar.close()
haileyschoelkopf's avatar
haileyschoelkopf committed
828

829
830
        return re_ord.get_original(res)

baberabb's avatar
baberabb committed
831
    def generate_until(self, requests: List[Instance]) -> List[str]:
832
833
        res = defaultdict(list)
        re_ords = {}
834
835

        def _collate(x):
836
837
838
839
840
841
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end
842
            toks = self.tok_encode(x[0])
haileyschoelkopf's avatar
haileyschoelkopf committed
843
            return -len(toks), x[0]
844

845
846
847
        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
848
849
        grouper = utils.Grouper(requests, lambda x: str(x.args[1]))
        for key, reqs in grouper.get_grouped().items():
850
            # within each set of reqs for given kwargs, we reorder by token length, descending.
851
            re_ords[key] = utils.Reorderer([req.args for req in reqs], _collate)
852

853
        pbar = tqdm(total=len(requests), disable=(self.rank != 0))
854
855
856
857
858
859
        if self.batch_size == "auto":
            # using rolling window with maximum context
            print("Passed argument batch_size = auto. Detecting largest batch size")
            batch_size = self._detect_batch_size()
            print(f"Determined Largest batch size: {batch_size}")
            adaptive_batch_size = batch_size
860
        # for each different set of kwargs, we execute all requests, by batch.
861
        for key, re_ord in re_ords.items():
862
863
            chunks = utils.chunks(
                re_ord.get_reordered(),
864
865
866
867
868
869
870
871
                n=self.batch_size
                if self.batch_size != "auto"
                else adaptive_batch_size
                if adaptive_batch_size is not None
                else 0,
                fn=self._batch_scheduler
                if self.batch_size == "auto" and not adaptive_batch_size
                else None,
872
            )
haileyschoelkopf's avatar
haileyschoelkopf committed
873
            for chunk in chunks:
874
                contexts, all_gen_kwargs = zip(*chunk)
875
876
877
878
                # we assume all gen kwargs in the batch are the same
                # this is safe to assume because the `grouper` object ensures it.
                gen_kwargs = all_gen_kwargs[0]
                # unpack our keyword arguments.
879
880
881
882
883
884
885
886
887
                until = None
                if isinstance(gen_kwargs, dict):
                    kwargs = copy.deepcopy(gen_kwargs)  # edge case for repeats > 1
                    if "until" in kwargs.keys():
                        until = kwargs.pop("until")
                        if isinstance(until, str):
                            until = [kwargs]
                        elif not isinstance(until, list):
                            raise ValueError(
888
                                f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
889
890
891
                            )
                else:
                    raise ValueError(
892
                        f"Expected `kwargs` to be of type `dict` but got {kwargs}"
893
894
895
896
897
898
899
                    )
                if not until:
                    until = [self.tok_decode(self.eot_token_id)]
                if "max_gen_toks" in kwargs.keys():
                    max_gen_toks = kwargs.pop("max_gen_toks")
                else:
                    max_gen_toks = self.max_gen_toks
900

901
                # set the max length in tokens of inputs ("context_enc")
haileyschoelkopf's avatar
haileyschoelkopf committed
902
                if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
903
904
905
906
907
                    # max len for inputs = max length, minus room to generate the max new tokens
                    max_ctx_len = self.max_length - max_gen_toks
                elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
                    # max len for inputs = encoder's whole max_length
                    max_ctx_len = self.max_length
908

909
                # encode, pad, and truncate contexts for this batch
910
                context_enc, attn_masks = self.tok_batch_encode(
lintangsutawika's avatar
lintangsutawika committed
911
912
913
                    contexts,
                    left_truncate_len=max_ctx_len,
                    truncation=self.truncation,
914
915
916
917
                )
                context_enc = context_enc.to(self.device)
                attn_masks = attn_masks.to(self.device)

918
                if "max_length" not in kwargs:
Lintang Sutawika's avatar
Lintang Sutawika committed
919
                    kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
920

921
                # perform batched generation
922
923
924
                cont = self._model_generate(
                    context=context_enc,
                    attention_mask=attn_masks,
925
                    stop=until,
926
927
                    **kwargs,
                )
928

929
930
931
932
933
                cont_toks_list = cont.tolist()
                for cont_toks, context in zip(cont_toks_list, contexts):
                    # discard context + left-padding toks if using causal decoder-only LM
                    if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
                        cont_toks = cont_toks[context_enc.shape[1] :]
934

935
                    s = self.tok_decode(cont_toks)
936

937
938
                    # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
                    for term in until:
939
940
941
                        if len(term) > 0:
                            # ignore '' separator,
                            # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
942
                            s = s.split(term)[0]
943

944
                    res[key].append(s)
945

946
                    self.cache_hook.add_partial(
947
                        "generate_until", (context, gen_kwargs), s
948
949
                    )
                    pbar.update(1)
950
            # reorder this group of results back to original unsorted form
951
            res[key] = re_ord.get_original(res[key])
952

953
        pbar.close()
954

955
        return grouper.get_original(res)