huggingface.py 60.6 KB
Newer Older
1
import copy
Lintang Sutawika's avatar
Lintang Sutawika committed
2
import logging
3
import os
Jeevan's avatar
Jeevan committed
4
from datetime import timedelta
5
from pathlib import Path
KonradSzafer's avatar
KonradSzafer committed
6
from typing import Dict, List, Literal, Optional, Tuple, Union
7

8
import jinja2
9
import torch
10
import torch.nn.functional as F
11
import transformers
Jeevan's avatar
Jeevan committed
12
13
14
15
16
from accelerate import (
    Accelerator,
    InitProcessGroupKwargs,
    find_executable_batch_size,
)
Nathan Habib's avatar
Nathan Habib committed
17
from accelerate.utils import get_max_memory
18
from huggingface_hub import HfApi
19
20
21
22
from packaging import version
from peft import PeftModel
from peft import __version__ as PEFT_VERSION
from tqdm import tqdm
23
24
25
26
from transformers.models.auto.modeling_auto import (
    MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
    MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
)
27
28

from lm_eval import utils
baberabb's avatar
baberabb committed
29
from lm_eval.api.instance import Instance
30
from lm_eval.api.model import TemplateLM
31
from lm_eval.api.registry import register_model
32
33
34
from lm_eval.models.utils import (
    Collator,
    clear_torch_cache,
35
    configure_pad_token,
36
    get_dtype,
37
    handle_stop_sequences,
38
39
40
    pad_and_concat,
    stop_sequences_criteria,
)
41

42

Lintang Sutawika's avatar
Lintang Sutawika committed
43
eval_logger = logging.getLogger(__name__)
44

lintangsutawika's avatar
lintangsutawika committed
45

46
@register_model("hf-auto", "hf", "huggingface")
47
class HFLM(TemplateLM):
48
49
50
51
52
53
54
    """
    An abstracted Huggingface model class. Enables usage with both models of
    `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes.

    Supports data-parallel multi-GPU with HF Accelerate.
    """

55
    AUTO_MODEL_CLASS = None
56
    _DEFAULT_MAX_LENGTH = 2048
haileyschoelkopf's avatar
haileyschoelkopf committed
57

58
59
    def __init__(
        self,
60
        pretrained: Union[str, transformers.PreTrainedModel],
61
        backend: Literal["default", "causal", "seq2seq"] = "default",
Baber Abbasi's avatar
Baber Abbasi committed
62
        # override whether the model should be treated as decoder-only (causal) or encoder-decoder (seq2seq)
63
64
        revision: Optional[str] = "main",
        subfolder: Optional[str] = None,
65
66
67
68
69
70
71
        tokenizer: Optional[
            Union[
                str,
                transformers.PreTrainedTokenizer,
                transformers.PreTrainedTokenizerFast,
            ]
        ] = None,
lintangsutawika's avatar
lintangsutawika committed
72
        truncation: Optional[bool] = False,
Baber Abbasi's avatar
Baber Abbasi committed
73
        logits_cache: bool = True,
74
75
        max_length: Optional[int] = None,
        device: Optional[str] = "cuda",
76
        dtype: Optional[Union[str, torch.dtype]] = "auto",
Benjamin Fattori's avatar
Benjamin Fattori committed
77
78
        batch_size: Optional[Union[int, str]] = 1,
        max_batch_size: Optional[int] = 64,
79
        trust_remote_code: Optional[bool] = False,
haileyschoelkopf's avatar
haileyschoelkopf committed
80
        use_fast_tokenizer: Optional[bool] = True,
81
        add_bos_token: Optional[bool] = False,
82
        prefix_token_id: Optional[int] = None,
83
        # arguments used for splitting a model across GPUs naively.
84
85
        # only used if `parallelize=True`.
        parallelize: Optional[bool] = False,
86
87
        max_memory_per_gpu: Optional[Union[int, str]] = None,
        max_cpu_memory: Optional[Union[int, str]] = None,
88
        offload_folder: Optional[Union[str, os.PathLike]] = "./offload",
89
        # PEFT, delta weights and quantization options
90
        peft: Optional[str] = None,
91
        delta: Optional[str] = None,
92
        autogptq: Optional[Union[bool, str]] = False,
93
        gptqmodel: Optional[bool] = False,
94
        gguf_file: Optional[str] = None,
95
        **kwargs,
Ethan Smith's avatar
Ethan Smith committed
96
    ) -> None:
97
        super().__init__()
98
99
100
101
        # optionally: take in an already-initialized transformers.PreTrainedModel
        if not isinstance(pretrained, str):
            eval_logger.warning(
                "`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way."
102
            )
Baber Abbasi's avatar
Baber Abbasi committed
103
104
105
            assert not parallelize, (
                "`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`"
            )
106
107
108
            self._model = pretrained
            self._device = self._model.device
            self._config = self._model.config
Baber Abbasi's avatar
Baber Abbasi committed
109
            gpus = 0
110

111
        else:
112
113
114
115
116
            assert isinstance(device, str)
            assert isinstance(pretrained, str)
            assert isinstance(batch_size, (int, str))

            gpus = torch.cuda.device_count()
Jeevan's avatar
Jeevan committed
117
118
            accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
            accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
119
120
            if accelerator.num_processes > 1:
                self.accelerator = accelerator
121

122
123
124
            if "npu" in accelerator.device.type:
                gpus = torch.npu.device_count()

Nathan Habib's avatar
Nathan Habib committed
125
            # using one process with no model parallelism
126
127
128
129
            if not (parallelize or accelerator.num_processes > 1):
                # use user-passed device
                device_list = set(
                    ["cuda", "cpu"]
130
                    + [f"cuda:{i}" for i in range(gpus)]
131
                    + ["mps", "mps:0"]
132
                    + [f"npu:{i}" for i in range(gpus)]
133
                )
134
                if device and device in device_list:
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
                    self._device = torch.device(device)
                    eval_logger.info(f"Using device '{device}'")
                    if device in ("mps", "mps:0") and version.parse(
                        torch.__version__
                    ) < version.parse("2.1"):
                        raise RuntimeError(
                            f"mps requires torch >= 2.1. You have {torch.__version__}"
                        )
                else:
                    eval_logger.info("Device not specified")
                    eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}")
                    self._device = (
                        torch.device("cuda")
                        if torch.cuda.is_available()
                        else torch.device("cpu")
                    )
Nathan Habib's avatar
Nathan Habib committed
151
            else:  # Parallelism managed by accelerate
152
153
154
155
156
                if device != "cuda":
                    eval_logger.info(
                        f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model."
                    )
                # TODO: include in warning that `load_in_8bit` etc. affect this too
Nathan Habib's avatar
Nathan Habib committed
157
158
159
160
161
                self._device = (
                    self.accelerator.device
                    if hasattr(self, "accelerator")
                    else torch.device(device)
                )
162

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
163
            revision = str(revision)  # cast to string if not already one
164
165
            # TODO: update this to be less of a hack once subfolder is fixed in HF
            revision = revision + ("/" + subfolder if subfolder is not None else "")
166

167
            self._get_config(
168
169
170
                pretrained,
                revision=revision,
                trust_remote_code=trust_remote_code,
171
                gguf_file=gguf_file,
172
173
            )

174
            # determine which of 'causal' and 'seq2seq' backends to use for HF models
175
176
177
        self._get_backend(
            config=self.config, backend=backend, trust_remote_code=trust_remote_code
        )
178

179
180
181
182
183
184
185
        # load tokenizer so we know tokenizer vocabulary size before loading model and PEFT
        self._create_tokenizer(
            pretrained,
            tokenizer,
            revision=revision,
            trust_remote_code=trust_remote_code,
            use_fast_tokenizer=use_fast_tokenizer,
186
            gguf_file=gguf_file,
187
            add_bos_token=add_bos_token,
188
189
        )

190
191
192
193
194
195
196
197
        # if we passed `pretrained` as a string, initialize our model now
        if isinstance(pretrained, str):
            self._create_model(
                pretrained=pretrained,
                revision=revision,
                dtype=dtype,
                trust_remote_code=trust_remote_code,
                parallelize=parallelize,
198
                gpus=gpus,
199
200
201
202
                max_memory_per_gpu=max_memory_per_gpu,
                max_cpu_memory=max_cpu_memory,
                offload_folder=offload_folder,
                peft=peft,
203
                delta=delta,
204
                autogptq=autogptq,
205
                gptqmodel=gptqmodel,
206
                gguf_file=gguf_file,
207
                **kwargs,
208
209
            )

210
        # access self._model through self.model property outside this method
211
212
213
        if isinstance(self.model, torch.nn.Module):
            self.model.eval()
            self.model.tie_weights()
haileyschoelkopf's avatar
haileyschoelkopf committed
214

lintangsutawika's avatar
lintangsutawika committed
215
        self.truncation = truncation
Baber Abbasi's avatar
Baber Abbasi committed
216
        self.logits_cache = logits_cache
217
        self.vocab_size = self.tokenizer.vocab_size
218
        # select (or create) a pad token to use
219
        self.tokenizer = configure_pad_token(self.tokenizer, model_config=self.config)
220

221
        self.add_bos_token = add_bos_token
222
        if "gemma" in getattr(self.config, "model_type", ""):
223
            self.add_bos_token = True
224
            eval_logger.info(
225
                f"Model type is '{self.config.model_type}', part of the Gemma family--a BOS token will be used as Gemma underperforms without it."
226
227
            )

228
        self._max_length = max_length
229
230
231
232
        self.pretrained = pretrained
        self.delta = delta
        self.peft = peft
        self.revision = revision
Benjamin Fattori's avatar
Benjamin Fattori committed
233
234
235
236
237
238
239
240
241
242
        self.batch_schedule = 1
        self.batch_sizes = {}
        self.max_batch_size = max_batch_size

        if str(batch_size).startswith("auto"):
            batch_size = batch_size.split(":")
            self.batch_size_per_gpu = batch_size[0]
            self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1
        else:
            self.batch_size_per_gpu = int(batch_size)
243

244
        if isinstance(pretrained, str):
Nathan Habib's avatar
Nathan Habib committed
245
246
247
248
249
250
251
252
253
254
255
256
            if gpus >= 1 or str(self.device) == "mps":
                # TODO: can remove this whole snippet except in the mps case, perhaps?
                if not (parallelize or autogptq or hasattr(self, "accelerator")):
                    # place model onto device requested manually,
                    # if not using HF Accelerate or device_map
                    # or any other option that preloads model onto device
                    try:
                        self.model.to(self.device)
                    except ValueError:
                        eval_logger.debug(
                            "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore."
                        )
257
258
            # multigpu data-parallel support when launched with accelerate
            if gpus > 1:
Nathan Habib's avatar
Nathan Habib committed
259
260
261
262
                if accelerator.num_processes > 1:
                    if parallelize:
                        eval_logger.warning(
                            "You are both using a HF Accelerate `device_map` (`--model_args parallelize=True`) and launching via `accelerate launch`. This will attempt to do model and data parallelism depending on the resources available."
263
                        )
Nathan Habib's avatar
Nathan Habib committed
264
                    elif gpus > accelerator.num_processes:
265
266
267
268
269
270
                        eval_logger.warning(
                            "WARNING: The number of total system GPUs does not match the number of spawned processes. "
                            "If you would like to use data parallelism, please launch the script "
                            "with 'accelerate launch *script*'. "
                            f"Current run will proceed with {accelerator.num_processes} devices."
                        )
Nathan Habib's avatar
Nathan Habib committed
271
272
273
274
275
                        if self.accelerator.is_local_main_process:
                            eval_logger.info(
                                f"Using {gpus} devices with data parallelism"
                            )

276
                    self._device = torch.device(f"{accelerator.device}")
277
                    self.accelerator = accelerator
278

279
280
                    self._rank = self.accelerator.local_process_index
                    self._world_size = self.accelerator.num_processes
Nathan Habib's avatar
Nathan Habib committed
281
282
283
284
                else:
                    # if we aren't launching via accelerate, ditch
                    self._rank = 0
                    self._world_size = 1
285
286
287
288
289
290
291
        else:
            # if a PreTrainedModel was passed into HFLM, we forgo distributed setup.
            eval_logger.warning(
                "Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration"
            )
            self._rank = 0
            self._world_size = 1
haileyschoelkopf's avatar
haileyschoelkopf committed
292

293
        self.custom_prefix_token_id = prefix_token_id
294
295
296
297
        if prefix_token_id is not None:
            eval_logger.info(
                f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
            )
298

Nathan Habib's avatar
Nathan Habib committed
299
300
    def _get_accelerate_args(
        self,
301
        parallelize: Optional[bool] = None,
Nathan Habib's avatar
Nathan Habib committed
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
        device_map: Optional[str] = "auto",
        max_memory_per_gpu: Optional[Union[int, str]] = None,
        max_cpu_memory: Optional[Union[int, str]] = None,
        offload_folder: Optional[str] = "./offload",
        gpus: Optional[int] = None,
    ) -> dict:
        """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
        num_local_processes = int(os.environ.get("LOCAL_WORLD_SIZE", 1))
        num_machines = int(os.environ.get("WORLD_SIZE", 0)) // num_local_processes
        if (
            num_machines == 0
            and hasattr(self, "accelerator")
            and self.accelerator is not None
        ):
            eval_logger.info(
                "We are not in a distributed setting for accelerate. Setting model_parallel to False."
            )
            parallelize = False

        if parallelize is None:
            # If parallelism is unset by the user, we automatically assign model parallelism
            # if enough extra GPUs are available
            max_memory_all_gpus = get_max_memory()
            # We just want gpu, not cpu, max memory
            if "cpu" in max_memory_all_gpus:
                del max_memory_all_gpus["cpu"]
            parallelize = bool(num_local_processes < len(max_memory_all_gpus))
            eval_logger.info(
                f"Setting model parallel to {parallelize} since "
                f"the number of local processes is {num_local_processes} "
                f"and the number of GPUs is {len(max_memory_all_gpus)}"
            )

        args = {}
        if parallelize:  # Model parallelism will be used
            max_memory = {}
            if max_memory_per_gpu is not None:  # Using the provided memory requirements
                max_memory_per_gpu_map = {
                    device_idx: max_memory_per_gpu for device_idx in range(gpus)
                }
            else:  # Estimating the possible memory requirements
                max_memory_all_gpus = get_max_memory()
                if "cpu" in max_memory_all_gpus:
                    del max_memory_all_gpus["cpu"]
                if not hasattr(self, "accelerator"):
                    max_memory_per_gpu_map = {
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
348
                        k: v for k, v in max_memory_all_gpus.items()
Nathan Habib's avatar
Nathan Habib committed
349
                    }
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
350
                else:
Nathan Habib's avatar
Nathan Habib committed
351
352
353
354
355
356
357
358
                    # use only 1 / num_processes of the GPUs if we are running under accelerate launch
                    max_memory_per_gpu_map = {
                        k: v
                        for k, v in max_memory_all_gpus.items()
                        if k % num_local_processes
                        == (self.accelerator.process_index % num_local_processes)
                    }
            args["max_memory"] = max_memory_per_gpu_map
359
            args["device_map"] = "auto" if device_map is None else device_map
Nathan Habib's avatar
Nathan Habib committed
360
            eval_logger.info(
361
                f"Model parallel was set to True, setting max memory per GPU to {max_memory_per_gpu_map} and device map to {args.get('device_map')}"
Nathan Habib's avatar
Nathan Habib committed
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
            )

            if max_cpu_memory is not None:
                max_memory["cpu"] = max_cpu_memory

            args["offload_folder"] = offload_folder
        elif (
            device_map is None
        ):  # No model parallelism, we use the default provided device for our model
            if hasattr(self, "accelerator"):
                device_map = {"": f"{self.accelerator.device}"}
            else:
                device_map = {"": str(self.device)}
            args["max_memory"] = None
            args["device_map"] = device_map
            eval_logger.info(
                f"Model parallel was set to False, max memory was not set, and device map was set to {device_map}"
            )
        else:
            args["max_memory"] = None
            args["device_map"] = None
            eval_logger.info("Model parallel was set to False.")

        return args

387
388
389
390
391
    @property
    def config(self):
        # return the associated transformers.AutoConfig for the given pretrained model.
        return self._config

392
393
394
395
396
397
398
399
    @property
    def model(self):
        # returns the model, unwrapping it if using Accelerate
        if hasattr(self, "accelerator"):
            return self.accelerator.unwrap_model(self._model)
        else:
            return self._model

400
401
402
403
404
    @property
    def eot_token_id(self):
        # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
        return self.tokenizer.eos_token_id

405
406
407
408
409
410
411
412
413
    @property
    def prefix_token_id(self):
        # it is used as prefix for loglikelihood
        if self.custom_prefix_token_id is not None:
            return self.custom_prefix_token_id
        if self.tokenizer.bos_token_id is not None:
            return self.tokenizer.bos_token_id
        return self.tokenizer.eos_token_id

414
415
    @property
    def max_length(self):
416
417
418
419
420
421
422
423
424
425
426
        if self._max_length:  # if max length manually set, return it
            return self._max_length
        seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
        for attr in seqlen_config_attrs:
            if hasattr(self.model.config, attr):
                return getattr(self.model.config, attr)
        if hasattr(self.tokenizer, "model_max_length"):
            if self.tokenizer.model_max_length == 1000000000000000019884624838656:
                return self._DEFAULT_MAX_LENGTH
            return self.tokenizer.model_max_length
        return self._DEFAULT_MAX_LENGTH
427

428
    @property
Ethan Smith's avatar
Ethan Smith committed
429
    def max_gen_toks(self) -> int:
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
        return 256

    @property
    def batch_size(self):
        return self.batch_size_per_gpu

    @property
    def device(self):
        return self._device

    @property
    def rank(self):
        return self._rank

    @property
    def world_size(self):
        return self._world_size

KonradSzafer's avatar
KonradSzafer committed
448
449
450
451
    @property
    def tokenizer_name(self) -> str:
        return self.tokenizer.name_or_path.replace("/", "__")

452
453
    def _get_backend(
        self,
Baber Abbasi's avatar
Baber Abbasi committed
454
        config: Union[transformers.PretrainedConfig, transformers.AutoConfig],
455
        backend: Literal["default", "causal", "seq2seq"] = "default",
456
457
458
459
        trust_remote_code: Optional[bool] = False,
    ) -> None:
        """
        Helper method during initialization.
460
        Determines the backend ("causal" (decoder-only) or "seq2seq" (encoder-decoder)) model type to be used.
461
        sets `self.AUTO_MODEL_CLASS` appropriately if not already set.
462
463
464

        **If not calling HFLM.__init__() or HFLM._get_backend() within a subclass of HFLM,
        user must set `self.backend` to be either "causal" or "seq2seq" manually!**
465
        """
466

467
468
469
470
471
        assert backend in ["default", "causal", "seq2seq"]

        if backend != "default":
            # if we've settled on non-default backend, use that manually
            if backend == "causal":
472
                self.backend = backend
473
            elif backend == "seq2seq":
474
                self.backend = backend
475
            eval_logger.info(
476
                f"Overrode HF model backend type, and using type '{self.backend}'"
477
478
479
480
481
482
483
484
485
486
            )
        else:
            # determine and use the default HF backend for this model, based on its config + metadata.
            if (
                getattr(config, "model_type")
                in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
            ):
                # first check if model type is listed under seq2seq models, since some
                # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers.
                # these special cases should be treated as seq2seq models.
487
                self.backend = "seq2seq"
488
                eval_logger.debug(f"Using model type '{self.backend}'")
489
490
491
            elif (
                getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
            ):
492
                self.backend = "causal"
493
                eval_logger.debug(f"Using model type '{self.backend}'")
494
495
496
497
498
            else:
                if not trust_remote_code:
                    eval_logger.warning(
                        "HF model type is neither marked as CausalLM or Seq2SeqLM. \
                    This is expected if your model requires `trust_remote_code=True` but may be an error otherwise."
499
                        "Setting backend to causal"
500
501
                    )
                # if model type is neither in HF transformers causal or seq2seq model registries
502
503
504
                # then we default to assuming AutoModelForCausalLM
                self.backend = "causal"
                eval_logger.info(
505
                    f"Model type cannot be determined. Using default model type '{self.backend}'"
506
                )
507

508
509
510
511
512
        if self.AUTO_MODEL_CLASS is None:
            if self.backend == "causal":
                self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
            elif self.backend == "seq2seq":
                self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
513
514
515
516
517
518

    def _get_config(
        self,
        pretrained: str,
        revision: str = "main",
        trust_remote_code: bool = False,
519
        gguf_file: Optional[str] = None,
520
    ) -> None:
521
        """Return the model config for HuggingFace models"""
522
523
524
525
        self._config = transformers.AutoConfig.from_pretrained(
            pretrained,
            revision=revision,
            trust_remote_code=trust_remote_code,
526
            gguf_file=gguf_file,
527
528
529
530
531
532
533
534
535
536
537
538
        )

    def _create_model(
        self,
        pretrained: str,
        revision: Optional[str] = "main",
        dtype: Optional[Union[str, torch.dtype]] = "auto",
        trust_remote_code: Optional[bool] = False,
        # arguments used for splitting a model across GPUs naively.
        # only used if `parallelize=True`.
        # (accelerate naive PP (device_map) options)
        parallelize: Optional[bool] = False,
539
        gpus: Optional[int] = None,
540
541
542
        max_memory_per_gpu: Optional[Union[int, str]] = None,
        max_cpu_memory: Optional[Union[int, str]] = None,
        offload_folder: Optional[str] = "./offload",
543
        # PEFT, delta weights and quantization options
544
        peft: Optional[str] = None,
545
        delta: Optional[str] = None,
546
        autogptq: Optional[Union[bool, str]] = False,
547
        gptqmodel: Optional[bool] = False,
548
        gguf_file: Optional[str] = None,
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
        **kwargs,
    ) -> None:
        """
        Initializes an HF or HF-compatible PreTrainedModel from scratch
        inside HFLM, using the kwargs passed into self.__init__().

        Also handles functionality such as AutoGPTQ usage and PEFT wrapping.

        For future similar extensions to AutoGPTQ that are not core to HF's ecosystem,
        (such as PyTorch models that are nearly, but not quite, fully mirroring
        HF's public interface relied on in this HFLM class)
        please consider subclassing HFLM and overriding this and other methods as needed.
        """

        model_kwargs = kwargs if kwargs else {}

Nathan Habib's avatar
Nathan Habib committed
565
566
567
568
569
570
571
572
        model_kwargs.update(
            self._get_accelerate_args(
                parallelize=parallelize,
                device_map=kwargs.get("device_map", None),
                max_memory_per_gpu=max_memory_per_gpu,
                max_cpu_memory=max_cpu_memory,
                offload_folder=offload_folder,
                gpus=gpus,
573
            )
Nathan Habib's avatar
Nathan Habib committed
574
        )
575

576
        if not autogptq and not gptqmodel:
577
            if model_kwargs.get("load_in_4bit", None):
Baber Abbasi's avatar
Baber Abbasi committed
578
579
580
                assert transformers.__version__ >= "4.30.0", (
                    "load_in_4bit requires transformers >= 4.30.0"
                )
581
582
583
            if transformers.__version__ >= "4.30.0":
                if model_kwargs.get("load_in_4bit", None):
                    if model_kwargs.get("bnb_4bit_compute_dtype", None):
584
                        model_kwargs["bnb_4bit_compute_dtype"] = get_dtype(
585
586
                            model_kwargs["bnb_4bit_compute_dtype"]
                        )
Nathan Habib's avatar
Nathan Habib committed
587

588
589
590
            self._model = self.AUTO_MODEL_CLASS.from_pretrained(
                pretrained,
                revision=revision,
591
                torch_dtype=get_dtype(dtype),
592
                trust_remote_code=trust_remote_code,
593
                gguf_file=gguf_file,
594
595
596
                **model_kwargs,
            )
        else:
597
598
599
            if autogptq and gptqmodel:
                raise ValueError(
                    "Cannot use both 'autogptq' and 'gptqmodel' options at the same time."
600
601
                )

602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
            if autogptq:
                try:
                    from auto_gptq import AutoGPTQForCausalLM
                except ModuleNotFoundError as exception:
                    raise type(exception)(
                        "Tried to load auto_gptq, but auto-gptq is not installed ",
                        "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]",
                    )

                self._model = AutoGPTQForCausalLM.from_quantized(
                    pretrained,
                    trust_remote_code=trust_remote_code,
                    model_basename=None if autogptq is True else Path(autogptq).stem,
                    use_safetensors=True
                    if autogptq is True
                    else autogptq.endswith(".safetensors"),
                    **model_kwargs,
                )

            if gptqmodel:
                try:
                    from gptqmodel import GPTQModel
                except ModuleNotFoundError as exception:
                    raise type(exception)(
                        "Tried to load gptqmodel, but gptqmodel is not installed ",
                        "please install gptqmodel via `pip install gptqmodel --no-build-isolation` or `pip install lm-eval[gptqmodel] --no-build-isolation`",
                    )

                self._model = GPTQModel.from_quantized(
                    pretrained, trust_remote_code=trust_remote_code, **model_kwargs
                )
633

634
635
636
637
638
        if peft and delta:
            raise ValueError(
                "Cannot use both 'peft' and 'delta' options at the same time."
            )

639
640
        if peft:
            if model_kwargs.get("load_in_4bit", None):
WoosungMyung's avatar
WoosungMyung committed
641
642
                if version.parse(PEFT_VERSION) < version.parse("0.4.0"):
                    raise AssertionError("load_in_4bit requires peft >= 0.4.0")
643
644
            if self._model.config.vocab_size != len(self.tokenizer):
                # resize model for LoRAs with added tokens
645
646
647
                eval_logger.info(
                    f"Model config indicates vocab_size='{self._model.config.vocab_size}', but found tokenizer with vocab size '{len(self.tokenizer)}'. Resizing model embedding layer..."
                )
648
                self._model.resize_token_embeddings(len(self.tokenizer))
649
650
651
            self._model = PeftModel.from_pretrained(
                self._model, peft, revision=revision
            )
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
        elif delta:
            if autogptq:
                eval_logger.warning(
                    "Delta weights might trigger unexpected behavior when used with AutoGPTQ."
                )
            _model_delta = self.AUTO_MODEL_CLASS.from_pretrained(
                delta,
                revision=revision,
                torch_dtype=get_dtype(dtype),
                trust_remote_code=trust_remote_code,
                **model_kwargs,
            )
            for name, param in self._model.state_dict().items():
                try:
                    param.data += _model_delta.state_dict()[name]
                except KeyError:
                    raise KeyError(f"Delta model is missing weights for layer: {name}")
                except Exception as e:
                    raise RuntimeError(
                        f"Failed to add delta weights to layer {name}. Error: {e}"
                    )

            del _model_delta
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690

        return None

    def _create_tokenizer(
        self,
        pretrained: Union[str, transformers.PreTrainedModel],
        tokenizer: Optional[
            Union[
                str,
                transformers.PreTrainedTokenizer,
                transformers.PreTrainedTokenizerFast,
            ]
        ],
        revision: Optional[str] = "main",
        trust_remote_code: Optional[bool] = False,
        use_fast_tokenizer: Optional[bool] = True,
691
        gguf_file: Optional[str] = None,
692
        add_bos_token: Optional[bool] = False,
693
694
695
696
697
698
699
    ) -> None:
        """
        Helper method during initialization.

        Create a tokenizer object corresponding to the correct
        tokenizer for value of `pretrained`, or use the pre-initialized tokenizer passed.
        """
700
701
702
703
704
705
706
707
708
709
        kwargs = {
            "revision": revision,
            "trust_remote_code": trust_remote_code,
        }

        # gguf format embeds tokenizer and is not compatible with hf tokenizer `use_fast` param
        if gguf_file is not None:
            kwargs["gguf_file"] = gguf_file
        else:
            kwargs["use_fast"] = use_fast_tokenizer
710

711
712
713
        if add_bos_token:
            kwargs["add_bos_token"] = True

714
715
716
        if tokenizer:
            if isinstance(tokenizer, str):
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
717
                    tokenizer, **kwargs
718
719
720
721
722
723
724
725
726
727
728
729
730
731
                )
            else:
                assert isinstance(
                    tokenizer, transformers.PreTrainedTokenizer
                ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
                self.tokenizer = tokenizer
        else:
            # Get tokenizer based on 'pretrained'
            if isinstance(pretrained, str):
                model_name = pretrained
            else:
                # get the HF hub name via accessor on model
                model_name = self.model.name_or_path
            self.tokenizer = transformers.AutoTokenizer.from_pretrained(
732
                model_name, **kwargs
733
734
735
            )
        return None

Ethan Smith's avatar
Ethan Smith committed
736
    def _detect_batch_size(self, requests=None, pos: int = 0):
Benjamin Fattori's avatar
Benjamin Fattori committed
737
738
739
740
741
        if requests:
            _, context_enc, continuation_enc = requests[pos]
            max_length = len(
                (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
            )
742
743
            max_context_enc = len(context_enc[-(self.max_length + 1) :])
            max_cont_enc = len(continuation_enc[-(self.max_length + 1) :])
Benjamin Fattori's avatar
Benjamin Fattori committed
744
745
        else:
            max_length = self.max_length
746
747
            max_context_enc = max_length
            max_cont_enc = max_length
lintangsutawika's avatar
lintangsutawika committed
748

Benjamin Fattori's avatar
Benjamin Fattori committed
749
750
751
        # if OOM, then halves batch_size and tries again
        @find_executable_batch_size(starting_batch_size=self.max_batch_size)
        def forward_batch(batch_size):
752
            if self.backend == "seq2seq":
753
                length = max(max_context_enc, max_cont_enc)
lintangsutawika's avatar
lintangsutawika committed
754
755
756
                batched_conts = torch.ones(
                    (batch_size, length), device=self.device
                ).long()
757
758
                test_batch = torch.ones((batch_size, length), device=self.device).long()
                call_kwargs = {
lintangsutawika's avatar
lintangsutawika committed
759
760
761
                    "attn_mask": test_batch,
                    "labels": batched_conts,
                }
762
763
            else:
                call_kwargs = {}
lintangsutawika's avatar
lintangsutawika committed
764
765
766
                test_batch = torch.ones(
                    (batch_size, max_length), device=self.device
                ).long()
Benjamin Fattori's avatar
Benjamin Fattori committed
767
            for _ in range(5):
768
                out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)  # noqa: F841
lintangsutawika's avatar
lintangsutawika committed
769

Benjamin Fattori's avatar
Benjamin Fattori committed
770
771
            return batch_size

772
773
774
775
776
777
778
        try:
            batch_size = forward_batch()
        except RuntimeError as e:
            if "No executable batch size found" in str(e):
                batch_size = 1
            else:
                raise
Benjamin Fattori's avatar
Benjamin Fattori committed
779

780
781
782
783
784
785
786
        if self.world_size > 1:
            # if multi-GPU, always take minimum over all selected batch sizes
            max_rnk_bs = torch.tensor([batch_size], device=self.device)
            gathered = (
                self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist()
            )
            batch_size = min(gathered)
787
            clear_torch_cache()
788
789
            return batch_size

790
        clear_torch_cache()
Benjamin Fattori's avatar
Benjamin Fattori committed
791
792
        return batch_size

baberabb's avatar
baberabb committed
793
794
795
    def tok_encode(
        self, string: str, left_truncate_len=None, add_special_tokens=None
    ) -> List[int]:
haileyschoelkopf's avatar
haileyschoelkopf committed
796
        """ """
Lintang Sutawika's avatar
Lintang Sutawika committed
797
798
799
800
801
        # default for None - empty dict, use predefined tokenizer param
        # used for all models except for CausalLM or predefined value
        special_tokens_kwargs = {}

        # by default for CausalLM - false or self.add_bos_token is set
802
        if add_special_tokens is None:
803
            if self.backend == "causal":
Lintang Sutawika's avatar
Lintang Sutawika committed
804
805
806
807
808
809
                special_tokens_kwargs = {
                    "add_special_tokens": False or self.add_bos_token
                }
        # otherwise the method explicitly defines the value
        else:
            special_tokens_kwargs = {"add_special_tokens": add_special_tokens}
810

Lintang Sutawika's avatar
Lintang Sutawika committed
811
        encoding = self.tokenizer.encode(string, **special_tokens_kwargs)
haileyschoelkopf's avatar
haileyschoelkopf committed
812

813
814
815
        # left-truncate the encoded context to be at most `left_truncate_len` tokens long
        if left_truncate_len:
            encoding = encoding[-left_truncate_len:]
haileyschoelkopf's avatar
haileyschoelkopf committed
816

817
818
        return encoding

haileyschoelkopf's avatar
haileyschoelkopf committed
819
    def tok_batch_encode(
lintangsutawika's avatar
lintangsutawika committed
820
821
        self,
        strings: List[str],
lintangsutawika's avatar
lintangsutawika committed
822
        padding_side: str = "left",
823
824
        left_truncate_len: int = None,
        truncation: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
825
    ) -> Tuple[torch.Tensor, torch.Tensor]:
haileyschoelkopf's avatar
haileyschoelkopf committed
826
827
828
829
        # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
        old_padding_side = self.tokenizer.padding_side
        self.tokenizer.padding_side = padding_side

Lintang Sutawika's avatar
Lintang Sutawika committed
830
        add_special_tokens = {}
831
        if self.backend == "causal":
Lintang Sutawika's avatar
Lintang Sutawika committed
832
            add_special_tokens = {"add_special_tokens": False or self.add_bos_token}
haileyschoelkopf's avatar
haileyschoelkopf committed
833
834
835

        encoding = self.tokenizer(
            strings,
lintangsutawika's avatar
lintangsutawika committed
836
            truncation=truncation,
haileyschoelkopf's avatar
haileyschoelkopf committed
837
838
            padding="longest",
            return_tensors="pt",
Lintang Sutawika's avatar
Lintang Sutawika committed
839
            **add_special_tokens,
haileyschoelkopf's avatar
haileyschoelkopf committed
840
841
        )
        if left_truncate_len:
842
843
844
845
846
847
            original_lengths = encoding["input_ids"].size(1)
            if original_lengths > left_truncate_len:
                eval_logger.warn(
                    f"Left truncation applied. Original sequence length was {original_lengths}, "
                    f"truncating to last {left_truncate_len} tokens. Some content will be lost.",
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
848
849
850
851
852
853
854
855
            encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
            encoding["attention_mask"] = encoding["attention_mask"][
                :, -left_truncate_len:
            ]
        self.tokenizer.padding_side = old_padding_side

        return encoding["input_ids"], encoding["attention_mask"]

Lintang Sutawika's avatar
Lintang Sutawika committed
856
857
    def tok_decode(self, tokens, skip_special_tokens=True):
        return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens)
858
859
860

    def _model_call(self, inps, attn_mask=None, labels=None):
        """
haileyschoelkopf's avatar
haileyschoelkopf committed
861
        :param inps: torch.Tensor
862
863
864
865
866
867
868
869
870
871
872
873
874
            A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape
            [batch, sequence_ctx]. the size of sequence may vary from call to call
        :param attn_mask: torch.Tensor, optional
            A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
            (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
        :param labels: torch.Tensor, optional
            A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
            (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
        :return
            A torch tensor of shape [batch, sequence, vocab] with the
        logits returned from the model's decoder
        """
        with torch.no_grad():
875
876
            if attn_mask is not None or labels is not None:
                assert attn_mask is not None and labels is not None
877
                assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM
haileyschoelkopf's avatar
haileyschoelkopf committed
878
879
880
                return self.model(
                    input_ids=inps, attention_mask=attn_mask, labels=labels
                ).logits
881
882
883
884
885
            else:
                assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
                return self.model(inps).logits

    def _model_generate(self, context, max_length, stop, **generation_kwargs):
Baber Abbasi's avatar
Baber Abbasi committed
886
        # temperature = 0.0 if not set
887
888
889
        # if do_sample is false and temp==0.0:
        # remove temperature, as do_sample=False takes care of this
        # and we don't want a warning from HF
Baber Abbasi's avatar
Baber Abbasi committed
890
        generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0)
891
        do_sample = generation_kwargs.get("do_sample", None)
892
893
894
895
896

        # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies
        if generation_kwargs.get("temperature") == 0.0 and do_sample is None:
            generation_kwargs["do_sample"] = do_sample = False

Baber Abbasi's avatar
Baber Abbasi committed
897
898
        if do_sample is False and generation_kwargs.get("temperature") == 0.0:
            generation_kwargs.pop("temperature")
899
900
        # build stopping criteria
        stopping_criteria = stop_sequences_criteria(
901
            self.tokenizer, stop, context.shape[1], context.shape[0]
902
        )
903
        return self.model.generate(
904
            input_ids=context,
905
906
            max_length=max_length,
            stopping_criteria=stopping_criteria,
907
            pad_token_id=self.tokenizer.pad_token_id,
908
909
910
            use_cache=True,
            **generation_kwargs,
        )
911

Baber Abbasi's avatar
Baber Abbasi committed
912
913
914
    def _select_cont_toks(
        self, logits: torch.Tensor, contlen: int = None, inplen: int = None
    ) -> torch.Tensor:
915
        if self.backend == "causal":
Baber Abbasi's avatar
Baber Abbasi committed
916
917
918
            assert contlen and inplen, (
                "Must pass input len and cont. len to select scored logits for causal LM"
            )
919
920
921
            # discard right-padding.
            # also discard the input/context tokens. we'll only score continuations.
            logits = logits[inplen - contlen : inplen]
922
        elif self.backend == "seq2seq":
Baber Abbasi's avatar
Baber Abbasi committed
923
924
925
            assert contlen and not inplen, (
                "Selecting scored logits for Seq2SeqLM requires only cont. len"
            )
haileyschoelkopf's avatar
haileyschoelkopf committed
926
            # only discard right-padding.
927
            # the logits input to this fn only contain decoder-side tokens.
haileyschoelkopf's avatar
haileyschoelkopf committed
928
929
            logits = logits[:contlen]

930
931
        return logits

932
933
934
    def loglikelihood_rolling(
        self, requests: List[Instance], disable_tqdm: bool = False
    ) -> List[float]:
Benjamin Fattori's avatar
Benjamin Fattori committed
935
936
937
938
939
940
941
942
        adaptive_batch_size = None
        if self.batch_size == "auto":
            # using rolling window with maximum context
            print("Passed argument batch_size = auto. Detecting largest batch size")
            batch_size = self._detect_batch_size()
            print(f"Determined Largest batch size: {batch_size}")
            adaptive_batch_size = batch_size

943
944
945
946
947
948
949
950
951
        # First, collect all windows from all requests
        all_windows = []  # List of (request_idx, window) tuples
        request_window_counts = []  # Track number of windows per request

        for req_idx, (string,) in enumerate(
            tqdm(
                [req.args for req in requests],
                disable=(disable_tqdm or (self.rank != 0)),
            )
952
        ):
953
            rolling_token_windows: List[Tuple[List[int], List[int]]] = list(
954
955
956
957
                map(
                    utils.make_disjoint_window,
                    utils.get_rolling_token_windows(
                        token_list=self.tok_encode(string),
958
                        prefix_token=self.prefix_token_id,
959
960
961
962
963
                        max_seq_len=self.max_length,
                        context_len=1,
                    ),
                )
            )
haileyschoelkopf's avatar
haileyschoelkopf committed
964
965

            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
966
            windows = [(None,) + x for x in rolling_token_windows]
967

968
969
970
            # Store windows with their request index
            all_windows.extend((req_idx, window) for window in windows)
            request_window_counts.append(len(windows))
971

972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
        # Handle distributed case padding
        pad_amnt = 0
        if self.world_size > 1:
            mytensor = torch.tensor(len(all_windows), device=self.device)
            gathered = self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
            pad_amnt = max(gathered) - gathered[self.rank]
            if pad_amnt > 0:
                all_windows += pad_amnt * [all_windows[0]]

        all_nlls = []
        batch_size = adaptive_batch_size or self.batch_size
        for i in range(0, len(all_windows), batch_size):
            batch = all_windows[i : i + batch_size]
            # Extract just the windows for processing, keeping track of request indices
            batch_indices, batch_windows = zip(*batch)

            batch_nlls = self._loglikelihood_tokens(
                requests=batch_windows,
                disable_tqdm=False,
                override_bs=len(batch_windows),
992
            )
993
994
            # Store results with their request indices
            all_nlls.extend(zip(batch_indices, batch_nlls))
995

996
997
998
        # Remove padding if necessary
        if (self.world_size > 1) and (pad_amnt > 0):
            all_nlls = all_nlls[:-pad_amnt]
999

1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
        # Reconstruct per-request loglikelihoods
        loglikelihoods = []
        current_idx = 0
        for window_count in request_window_counts:
            # Get all nlls for this request
            request_nlls = all_nlls[current_idx : current_idx + window_count]
            # Sum up the nlls for this request (discarding is_greedy)
            request_total = sum(nll[0] for _, nll in request_nlls)
            loglikelihoods.append(request_total)
            current_idx += window_count

            string = requests[len(loglikelihoods) - 1].args[0]
            self.cache_hook.add_partial(
                "loglikelihood_rolling", (string,), request_total
            )
1015

1016
        return loglikelihoods
Zhiwei Zhuang's avatar
Zhiwei Zhuang committed
1017

1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
    def _batch_scheduler(self, pos, n_reordered_requests):
        sched = pos // int(len(n_reordered_requests) / self.batch_schedule)
        if sched in self.batch_sizes:
            return self.batch_sizes[sched]
        if (len(self.batch_sizes) > 1) and (
            self.batch_sizes[sched - 1] == self.max_batch_size
        ):
            # if previous batch size is already maximal, skip recomputation
            self.batch_sizes[sched] = self.max_batch_size
            return self.batch_sizes[sched]
        print(
            f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size"
        )
Zhiwei Zhuang's avatar
Zhiwei Zhuang committed
1031
        self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos)
1032
1033
        print(f"Determined largest batch size: {self.batch_sizes[sched]}")
        return self.batch_sizes[sched]
1034

Ethan Smith's avatar
Ethan Smith committed
1035
    def _loglikelihood_tokens(
baberabb's avatar
baberabb committed
1036
1037
1038
1039
1040
        self,
        requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
        disable_tqdm: bool = False,
        override_bs: int = None,
    ) -> List[Tuple[float, bool]]:
1041
1042
1043
        # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
        res = []

Baber Abbasi's avatar
Baber Abbasi committed
1044
        def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]):
Baber Abbasi's avatar
Baber Abbasi committed
1045
            """Defines the key for the sorted method"""
1046
1047
1048
1049
1050
1051
1052
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end

Baber Abbasi's avatar
Baber Abbasi committed
1053
            toks = req[1] + req[2]
1054
1055
            return -len(toks), tuple(toks)

Baber Abbasi's avatar
Baber Abbasi committed
1056
1057
1058
        def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]):
            """Defines the key to group and lookup one-token continuations"""
            # Use with group_by="contexts" (optional)"
Baber Abbasi's avatar
Baber Abbasi committed
1059
            # allows for the creation of a lookup, so we can reuse logits in case of one-token continuations.
Baber Abbasi's avatar
Baber Abbasi committed
1060
1061
1062
1063
1064
1065
1066
1067
            # speeds up some multiple-choice tasks proportionally to the number of choices.
            # groups requests by context+continuation[:-1] and infer on one request/group.
            return req[-2] + req[-1][:-1]

        re_ord = Collator(
            requests,
            sort_fn=_collate,
            group_by="contexts"
1068
            if self.backend == "causal" and self.logits_cache
Baber Abbasi's avatar
Baber Abbasi committed
1069
1070
1071
            else None,
            group_fn=_lookup_one_token_cont,
        )
Benjamin Fattori's avatar
Benjamin Fattori committed
1072
1073
1074

        # automatic (variable) batch size detection for vectorization
        # pull longest context sample from request
Baber Abbasi's avatar
Baber Abbasi committed
1075
1076
1077
        n_reordered_requests = len(re_ord)
        batch_size = (
            self.batch_size
1078
1079
1080
            if self.batch_size != "auto"
            else override_bs
            if override_bs is not None
Baber Abbasi's avatar
Baber Abbasi committed
1081
1082
1083
1084
            else 0
        )
        batch_fn = (
            self._batch_scheduler
1085
1086
1087
            if self.batch_size == "auto"
            and n_reordered_requests > 0
            and not override_bs
Baber Abbasi's avatar
Baber Abbasi committed
1088
            else None
1089
1090
        )

Baber Abbasi's avatar
Baber Abbasi committed
1091
        chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn)
1092
1093
1094
1095
1096
        pbar = tqdm(
            total=len(requests),
            disable=(disable_tqdm or (self.rank != 0)),
            desc="Running loglikelihood requests",
        )
haileyschoelkopf's avatar
haileyschoelkopf committed
1097
        for chunk in chunks:
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
            inps = []
            cont_toks_list = []
            inplens = []

            conts = []
            encoder_attns = []

            padding_len_inp = None
            padding_len_cont = None
            # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
            # tensors, then we pack them together into a batch, call the model, and then pick it all apart
            # again because vectorizing is annoying

            for _, context_enc, continuation_enc in chunk:
                # sanity check
                assert len(context_enc) > 0
                assert len(continuation_enc) > 0
                assert len(continuation_enc) <= self.max_length

haileyschoelkopf's avatar
haileyschoelkopf committed
1117
                # how this all works (illustrated on a causal decoder-only setup):
1118
1119
1120
1121
1122
1123
1124
                #          CTX      CONT
                # inp    0 1 2 3|4 5 6 7 8 9   <- last token is deleted by inp[:, :-1]
                # model  \               \
                # logits   1 2 3|4 5 6 7 8 9   <- the ctx half gets tossed out by the
                # cont_toks      4 5 6 7 8 9      [:, -len(continuation_enc):, :self.vocab_size] slice

                # when too long to fit in context, truncate from the left
1125
                if self.backend == "causal":
1126
1127
1128
1129
1130
1131
1132
                    total_length = len(context_enc) + len(continuation_enc)
                    if total_length > self.max_length + 1:
                        eval_logger.warn(
                            f"Combined length of context ({len(context_enc)}) and continuation ({len(continuation_enc)}) "
                            f"exceeds model's maximum length ({self.max_length}). "
                            f"Truncating {total_length - self.max_length + 1} tokens from the left."
                        )
1133
1134
1135
                    inp = torch.tensor(
                        (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
                        dtype=torch.long,
1136
1137
                        device=self.device,
                    )
1138
                    (inplen,) = inp.shape
1139
                elif self.backend == "seq2seq":
1140
1141
1142
                    inp = torch.tensor(
                        (context_enc)[-self.max_length :],
                        dtype=torch.long,
haileyschoelkopf's avatar
haileyschoelkopf committed
1143
                        device=self.device,
1144
                    )
1145
                    (inplen,) = inp.shape
1146
1147
1148
1149

                    # build encoder attn masks
                    encoder_attns.append(torch.ones_like(inp))

1150
                    cont = torch.tensor(
haileyschoelkopf's avatar
haileyschoelkopf committed
1151
                        (continuation_enc)[-self.max_length :],
1152
1153
                        # TODO: left-shift these?
                        # TODO: our code assumes we never end up truncating conts for either model type
1154
                        dtype=torch.long,
1155
1156
                        device=self.device,
                    )
1157
1158
                    (contlen,) = cont.shape

1159
1160
                    conts.append(cont)

haileyschoelkopf's avatar
haileyschoelkopf committed
1161
1162
1163
1164
1165
                    padding_len_cont = (
                        max(padding_len_cont, contlen)
                        if padding_len_cont is not None
                        else contlen
                    )
1166

haileyschoelkopf's avatar
haileyschoelkopf committed
1167
1168
1169
1170
1171
                padding_len_inp = (
                    max(padding_len_inp, inplen)
                    if padding_len_inp is not None
                    else inplen
                )
1172
1173
1174
1175

                inps.append(inp)  # [1, inp_length]
                cont_toks_list.append(continuation_enc)
                inplens.append(inplen)
haileyschoelkopf's avatar
haileyschoelkopf committed
1176

1177
1178
            # create encoder attn mask and batched conts, if seq2seq
            call_kwargs = {}
1179
            if self.backend == "causal":
1180
                batched_inps = pad_and_concat(
haileyschoelkopf's avatar
haileyschoelkopf committed
1181
1182
                    padding_len_inp, inps, padding_side="right"
                )  # [batch, padding_len_inp]
1183
            elif self.backend == "seq2seq":
1184
                # TODO: left-pad encoder inps and mask?
1185
                batched_inps = pad_and_concat(
haileyschoelkopf's avatar
haileyschoelkopf committed
1186
1187
                    padding_len_inp, inps
                )  # [batch, padding_len_inp]
1188
                batched_conts = pad_and_concat(
haileyschoelkopf's avatar
haileyschoelkopf committed
1189
1190
                    padding_len_cont, conts
                )  # [batch, padding_len_cont]
1191
                batched_encoder_mask = pad_and_concat(
haileyschoelkopf's avatar
haileyschoelkopf committed
1192
1193
1194
1195
1196
1197
                    padding_len_inp, encoder_attns
                )  # [batch, padding_len_inp]
                call_kwargs = {
                    "attn_mask": batched_encoder_mask,
                    "labels": batched_conts,
                }
1198
1199
1200

            multi_logits = F.log_softmax(
                self._model_call(batched_inps, **call_kwargs), dim=-1
1201
            )  # [batch, padding_length (inp or cont), vocab]
1202

Baber Abbasi's avatar
Baber Abbasi committed
1203
            for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip(
1204
1205
1206
1207
                chunk, multi_logits, inplens, cont_toks_list
            ):
                # Slice to original seq length
                contlen = len(cont_toks)
haileyschoelkopf's avatar
haileyschoelkopf committed
1208
                # take only logits in the continuation
1209
                # (discard context toks if decoder-only ; discard right-padding)
1210
1211
                # also discards + checks for "virtual tokens" in the causal LM's input window
                # from prompt/prefix tuning tokens, if applicable
haileyschoelkopf's avatar
haileyschoelkopf committed
1212
                ctx_len = (
1213
                    inplen + (logits.shape[0] - padding_len_inp)
1214
                    if self.backend == "causal"
haileyschoelkopf's avatar
haileyschoelkopf committed
1215
1216
                    else None
                )
1217
                logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
haileyschoelkopf's avatar
haileyschoelkopf committed
1218
                logits = logits.unsqueeze(0)  # [1, seq, vocab]
1219
1220
1221
1222

                # Check if per-token argmax is exactly equal to continuation
                greedy_tokens = logits.argmax(dim=-1)

Baber Abbasi's avatar
Baber Abbasi committed
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
                # check for one-token continuation cache hits.
                # noop in case group_by != "contexts" or no cache hit and returns the
                # original args. Otherwise, expands the logits batch dimension and yields each
                # batch along with matching continuation tokens and prompt strings.
                # logits -> [1, seq, vocab]
                for request_str, cont_toks, logits in re_ord.get_cache(
                    req_str=request_str,
                    cxt_toks=ctx_tokens,
                    cont_toks=cont_toks,
                    logits=logits,
                ):
                    cont_toks = torch.tensor(
                        cont_toks, dtype=torch.long, device=self.device
                    ).unsqueeze(0)  # [1, seq]
                    max_equal = (greedy_tokens == cont_toks).all()

                    # Obtain log-probs at the corresponding continuation token indices
                    # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
                    logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
                        -1
                    )  # [1, seq]

                    # Answer: (log prob, is-exact-match)
                    answer = (float(logits.sum()), bool(max_equal))

                    res.append(answer)

1250
1251
1252
1253
1254
1255
1256
                    if request_str is not None:
                        # special case: loglikelihood_rolling produces a number of loglikelihood requests
                        # all with cache key None. instead do add_partial on the per-example level
                        # in the loglikelihood_rolling() function for those.
                        self.cache_hook.add_partial(
                            "loglikelihood", request_str, answer
                        )
Baber Abbasi's avatar
Baber Abbasi committed
1257
                    pbar.update(1)
haileyschoelkopf's avatar
haileyschoelkopf committed
1258
1259

        pbar.close()
haileyschoelkopf's avatar
haileyschoelkopf committed
1260

1261
1262
        return re_ord.get_original(res)

1263
1264
1265
    def generate_until(
        self, requests: List[Instance], disable_tqdm: bool = False
    ) -> List[str]:
Baber Abbasi's avatar
Baber Abbasi committed
1266
        res = []
1267

Baber Abbasi's avatar
Baber Abbasi committed
1268
        def _collate(req: Tuple[str, dict]):
Baber Abbasi's avatar
Baber Abbasi committed
1269
            """Defines the key for the sorted method"""
1270
1271
1272
1273
1274
1275
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end
Baber Abbasi's avatar
Baber Abbasi committed
1276
1277
            toks = self.tok_encode(req[0])
            return -len(toks), req[0]
1278

1279
1280
        pbar = tqdm(
            total=len(requests),
1281
            disable=(disable_tqdm or (self.rank != 0)),
1282
1283
            desc="Running generate_until requests",
        )
Baber Abbasi's avatar
Baber Abbasi committed
1284
        adaptive_batch_size = None
1285
1286
1287
1288
1289
1290
        if self.batch_size == "auto":
            # using rolling window with maximum context
            print("Passed argument batch_size = auto. Detecting largest batch size")
            batch_size = self._detect_batch_size()
            print(f"Determined Largest batch size: {batch_size}")
            adaptive_batch_size = batch_size
1291
        # for each different set of kwargs, we execute all requests, by batch.
Baber Abbasi's avatar
Baber Abbasi committed
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
        batch_size = (
            self.batch_size
            if self.batch_size != "auto"
            else adaptive_batch_size
            if adaptive_batch_size is not None
            else 0
        )
        batch_fn = (
            self._batch_scheduler
            if self.batch_size == "auto" and not adaptive_batch_size
            else None
        )
1304

Baber Abbasi's avatar
Baber Abbasi committed
1305
1306
1307
        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
Baber Abbasi's avatar
Baber Abbasi committed
1308
1309
1310
1311
1312
1313
1314
        # group_fn=lambda x: x[1] -> x=(context, gen_kwargs)
        re_ords = Collator(
            [reg.args for reg in requests],
            sort_fn=_collate,
            group_by="gen_kwargs",
            group_fn=lambda x: x[1],
        )
Baber Abbasi's avatar
Baber Abbasi committed
1315
        chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn)
1316
        eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False)
Baber Abbasi's avatar
Baber Abbasi committed
1317
1318
1319
1320
1321
1322
1323
1324
        for chunk in chunks:
            contexts, all_gen_kwargs = zip(*chunk)
            # we assume all gen kwargs in the batch are the same
            # this is safe to assume because the `grouper` object ensures it.
            gen_kwargs = all_gen_kwargs[0]
            # unpack our keyword arguments.
            if isinstance(gen_kwargs, dict):
                kwargs = copy.deepcopy(gen_kwargs)  # edge case for repeats > 1
1325
1326
                # add EOS token to stop sequences
                until = handle_stop_sequences(kwargs.pop("until", None), eos=eos)
Baber Abbasi's avatar
Baber Abbasi committed
1327
1328
            else:
                raise ValueError(
Baber Abbasi's avatar
Baber Abbasi committed
1329
                    f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
1330
                )
Baber Abbasi's avatar
Baber Abbasi committed
1331
1332
1333
1334
1335
1336
            if "max_gen_toks" in kwargs.keys():
                max_gen_toks = kwargs.pop("max_gen_toks")
            else:
                max_gen_toks = self.max_gen_toks

            # set the max length in tokens of inputs ("context_enc")
1337
            if self.backend == "causal":
Baber Abbasi's avatar
Baber Abbasi committed
1338
1339
                # max len for inputs = max length, minus room to generate the max new tokens
                max_ctx_len = self.max_length - max_gen_toks
Baber Abbasi's avatar
Baber Abbasi committed
1340
1341
1342
                assert max_ctx_len > 0, (
                    f"Invalid configuration: requested max tokens to generate ({max_gen_toks}) must be less than model's maximum sequence length ({self.max_length})."
                )
1343
            elif self.backend == "seq2seq":
Baber Abbasi's avatar
Baber Abbasi committed
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
                # max len for inputs = encoder's whole max_length
                max_ctx_len = self.max_length

            # encode, pad, and truncate contexts for this batch
            context_enc, attn_masks = self.tok_batch_encode(
                contexts,
                left_truncate_len=max_ctx_len,
                truncation=self.truncation,
            )
            context_enc = context_enc.to(self.device)
            attn_masks = attn_masks.to(self.device)
1355

Baber Abbasi's avatar
Baber Abbasi committed
1356
1357
            if "max_length" not in kwargs:
                kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
1358

Baber Abbasi's avatar
Baber Abbasi committed
1359
1360
1361
1362
1363
1364
1365
            # perform batched generation
            cont = self._model_generate(
                context=context_enc,
                attention_mask=attn_masks,
                stop=until,
                **kwargs,
            )
1366

Baber Abbasi's avatar
Baber Abbasi committed
1367
1368
1369
            cont_toks_list = cont.tolist()
            for cont_toks, context in zip(cont_toks_list, contexts):
                # discard context + left-padding toks if using causal decoder-only LM
1370
                if self.backend == "causal":
Baber Abbasi's avatar
Baber Abbasi committed
1371
                    cont_toks = cont_toks[context_enc.shape[1] :]
1372

Baber Abbasi's avatar
Baber Abbasi committed
1373
                s = self.tok_decode(cont_toks)
1374

Baber Abbasi's avatar
Baber Abbasi committed
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
                # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
                for term in until:
                    if len(term) > 0:
                        # ignore '' separator,
                        # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
                        s = s.split(term)[0]

                res.append(s)

                self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s)
                pbar.update(1)
        # reorder this group of results back to original unsorted form
        res = re_ords.get_original(res)
1388

1389
        pbar.close()
1390

Baber Abbasi's avatar
Baber Abbasi committed
1391
        return res
1392

Baber Abbasi's avatar
Baber Abbasi committed
1393
1394
1395
    def apply_chat_template(
        self, chat_history: List[Dict[str, str]], add_generation_prompt: bool = True
    ) -> str:
KonradSzafer's avatar
KonradSzafer committed
1396
1397
1398
        """
        Method to apply a chat template to a list of chat history between user and model.
        """
1399
1400
        try:
            chat_templated = self.tokenizer.apply_chat_template(
Baber Abbasi's avatar
Baber Abbasi committed
1401
1402
1403
1404
                chat_history,
                tokenize=False,
                add_generation_prompt=add_generation_prompt,
                continue_final_message=not add_generation_prompt,
1405
1406
1407
1408
1409
1410
1411
            )
        except jinja2.exceptions.TemplateError:
            eval_logger.warning(
                "Failed to apply chat template. removing the system role in chat history."
            )
            chat_history = [msg for msg in chat_history if msg["role"] != "system"]
            chat_templated = self.tokenizer.apply_chat_template(
Baber Abbasi's avatar
Baber Abbasi committed
1412
1413
1414
1415
                chat_history,
                tokenize=False,
                add_generation_prompt=add_generation_prompt,
                continue_final_message=not add_generation_prompt,
1416
1417
1418
            )

        return chat_templated
KonradSzafer's avatar
KonradSzafer committed
1419

1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
    def get_model_info(self) -> dict:
        """
        Method to get Hugging Face model information for experiment reproducibility.
        """

        def get_model_num_params(model) -> int:
            if hasattr(model, "num_parameters"):
                return model.num_parameters()
            if hasattr(model, "parameters"):
                return sum(p.numel() for p in model.parameters())
            else:
                return -1

        def get_model_dtype(model) -> str:
            if hasattr(model, "dtype"):
                return model.dtype
            else:
                return ""

        def get_model_sha(pretrained: str, revision: str) -> str:
            try:
                model_info = HfApi().model_info(repo_id=pretrained, revision=revision)
                return model_info.sha
            except Exception as e:
Baber Abbasi's avatar
Baber Abbasi committed
1444
                eval_logger.debug(
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
                    f"Failed to get model SHA for {pretrained} at revision {revision}. Error: {e}"
                )
                return ""

        model_info = {
            "model_num_parameters": get_model_num_params(self._model),
            "model_dtype": get_model_dtype(self._model),
            "model_revision": self.revision,
            "model_sha": get_model_sha(self.pretrained, self.revision),
        }
        if self.peft:
            model_info["peft_sha"] = get_model_sha(self.peft, self.revision)
        if self.delta:
            model_info["delta_sha"] = get_model_sha(self.delta, self.revision)
        return model_info