"configs/faster_rcnn_x101_32x4d_fpn_1x.py" did not exist on "12003485b30382f3c0d522a040ba1e76cf3e0a93"
vllm_causallms.py 30.9 KB
Newer Older
Baber's avatar
types  
Baber committed
1
2
from __future__ import annotations

3
import copy
4
import gc
5
import inspect
Lintang Sutawika's avatar
Lintang Sutawika committed
6
import logging
7
import os
Baber Abbasi's avatar
Baber Abbasi committed
8
from importlib.metadata import version
9
from importlib.util import find_spec
10
11
12
from multiprocessing import Process, Queue
from queue import Empty
from time import sleep
Baber's avatar
types  
Baber committed
13
from typing import TYPE_CHECKING, Literal
14

15
import jinja2
16
from more_itertools import distribute
Baber Abbasi's avatar
Baber Abbasi committed
17
from packaging.version import parse as parse_version
18
19
from tqdm import tqdm

baberabb's avatar
baberabb committed
20
from lm_eval.api.instance import Instance
21
from lm_eval.api.model import TemplateLM
baberabb's avatar
baberabb committed
22
from lm_eval.api.registry import register_model
23
24
25
26
from lm_eval.models.utils import (
    Collator,
    configure_pad_token,
    handle_stop_sequences,
27
    postprocess_generated_text,
28
29
    undistribute,
)
30
31
32
33
from lm_eval.utils import (
    get_rolling_token_windows,
    make_disjoint_window,
)
34

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
35

36
try:
37
    import ray
38
    from vllm import LLM, SamplingParams
39
    from vllm.lora.request import LoRARequest
baberabb's avatar
baberabb committed
40
    from vllm.transformers_utils.tokenizer import get_tokenizer
41
    from vllm.utils import get_open_port
42
43
44

    if parse_version(version("vllm")) >= parse_version("0.8.3"):
        from vllm.entrypoints.chat_utils import resolve_hf_chat_template
45
46
except ModuleNotFoundError:
    pass
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
47

48
49
if TYPE_CHECKING:
    pass
bcicc's avatar
bcicc committed
50

Lintang Sutawika's avatar
Lintang Sutawika committed
51
eval_logger = logging.getLogger(__name__)
baberabb's avatar
baberabb committed
52

baberabb's avatar
baberabb committed
53

54
55
def _vllm_mp_worker(
    model_args: dict,
Baber's avatar
types  
Baber committed
56
    sampling_params: SamplingParams,
57
    requests: list[list[int]],
Baber's avatar
types  
Baber committed
58
59
    lora_request: LoRARequest,
    result_queue: Queue,
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    dp_size: int,
    local_dp_rank: int,
    dp_master_port: int,
    dp_master_ip: str = "127.0.0.1",
) -> None:
    """
    Worker process for vLLM multiprocessing.
    Initializes a vLLM engine, processes requests, and puts results or errors
    onto the result_queue.
    """

    if not requests:
        result_queue.put((local_dp_rank, []))
        return None

    os.environ["VLLM_DP_RANK"] = os.environ["VLLM_DP_RANK_LOCAL"] = str(local_dp_rank)
    os.environ["VLLM_DP_SIZE"] = str(dp_size)
    os.environ["VLLM_DP_MASTER_IP"] = str(dp_master_ip)
    os.environ["VLLM_DP_MASTER_PORT"] = str(dp_master_port)

    llm = None
    try:
        llm = LLM(**model_args)
        res = llm.generate(
            prompt_token_ids=requests,
            sampling_params=sampling_params,
            lora_request=lora_request,
        )
        # Give engines time to pause their processing loops before exiting."
        sleep(1)
        result_queue.put((local_dp_rank, res))

    except Exception as e:
        error_message = f"Worker {local_dp_rank} failed during generation: {type(e).__name__}: {str(e)}"
        eval_logger.error(error_message, exc_info=True)
        result_queue.put((local_dp_rank, {"error": error_message}))

    finally:
        if llm is not None:
            try:
                del llm
                gc.collect()
            except Exception as e_cleanup:
                eval_logger.warning(
                    f"Worker {local_dp_rank} encountered an error during LLM cleanup: {type(e_cleanup).__name__}: {str(e_cleanup)}",
                    exc_info=True,
                )

    return None


baberabb's avatar
baberabb committed
111
@register_model("vllm")
112
class VLLM(TemplateLM):
baberabb's avatar
baberabb committed
113
114
115
116
    _DEFAULT_MAX_LENGTH = 2048

    def __init__(
        self,
117
        pretrained: str,
baberabb's avatar
baberabb committed
118
        dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto",
Baber's avatar
types  
Baber committed
119
120
121
        revision: str | None = None,
        trust_remote_code: bool | None = False,
        tokenizer: str | None = None,
baberabb's avatar
baberabb committed
122
        tokenizer_mode: Literal["auto", "slow"] = "auto",
Baber's avatar
types  
Baber committed
123
124
125
        tokenizer_revision: str | None = None,
        add_bos_token: bool | None = False,
        prefix_token_id: int | None = None,
baberabb's avatar
baberabb committed
126
        tensor_parallel_size: int = 1,
Baber's avatar
types  
Baber committed
127
        quantization: str | None = None,
baberabb's avatar
baberabb committed
128
129
        max_gen_toks: int = 256,
        swap_space: int = 4,
Baber's avatar
types  
Baber committed
130
131
132
133
        batch_size: str | int = 1,
        max_batch_size: int | None = None,
        max_length: int | None = None,
        max_model_len: int | None = None,
baberabb's avatar
baberabb committed
134
        seed: int = 1234,
135
        gpu_memory_utilization: float = 0.9,
136
        data_parallel_size: int = 1,
Baber's avatar
types  
Baber committed
137
        lora_local_path: str | None = None,
138
139
        # VLLM: enable thinking tags in the prompt.
        enable_thinking: bool = True,
Baber's avatar
Baber committed
140
        chat_template_args: dict | None = None,
141
        # End marker for thinking tags - splits to get response after this token (if provided).
Baber's avatar
types  
Baber committed
142
        think_end_token: str | None = None,
MaYongQing's avatar
MaYongQing committed
143
        max_lora_rank: int = 16,
Baber Abbasi's avatar
Baber Abbasi committed
144
        **kwargs,
baberabb's avatar
baberabb committed
145
146
    ):
        super().__init__()
147

148
        if not find_spec("vllm"):
149
            raise ModuleNotFoundError(
150
151
                "attempted to use 'vllm' LM type, but package `vllm` is not installed. "
                "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
152
153
            )

Baber Abbasi's avatar
Baber Abbasi committed
154
155
156
        assert max_length is None or max_model_len is None, (
            "Either max_length or max_model_len may be provided, but not both"
        )
Baber Abbasi's avatar
Baber Abbasi committed
157
        kwargs.pop("device", None)
158
        self.think_end_token = think_end_token
159
        self.V1 = os.environ.get("VLLM_USE_V1", "1") != "0"
160
        self._max_length = max_model_len if max_model_len is not None else max_length
baberabb's avatar
baberabb committed
161
        self.tensor_parallel_size = int(tensor_parallel_size)
162
        self.data_parallel_size = int(data_parallel_size)
baberabb's avatar
baberabb committed
163
164
165
166
167
        self.model_args = {
            "model": pretrained,
            "gpu_memory_utilization": float(gpu_memory_utilization),
            "revision": revision,
            "dtype": dtype,
baberabb's avatar
baberabb committed
168
            "tokenizer": tokenizer,
baberabb's avatar
baberabb committed
169
            "tokenizer_mode": tokenizer_mode,
baberabb's avatar
baberabb committed
170
            "tokenizer_revision": tokenizer_revision,
baberabb's avatar
baberabb committed
171
172
            "trust_remote_code": trust_remote_code,
            "tensor_parallel_size": int(tensor_parallel_size),
173
            "max_model_len": int(self._max_length) if self._max_length else None,
174
            "max_num_seqs": kwargs.get("max_num_seqs", max_batch_size),
baberabb's avatar
baberabb committed
175
176
177
            "swap_space": int(swap_space),
            "quantization": quantization,
            "seed": int(seed),
Baber's avatar
types  
Baber committed
178
            "enable_lora": bool(lora_local_path),
MaYongQing's avatar
MaYongQing committed
179
            "max_lora_rank": int(max_lora_rank),
baberabb's avatar
baberabb committed
180
        }
Baber Abbasi's avatar
Baber Abbasi committed
181
        self.model_args.update(kwargs)
182
183
184
        self.batch_size = (
            "auto"
            if isinstance(batch_size, str) and "auto" in batch_size
185
            else int(batch_size)
186
        )
187
        if self.data_parallel_size <= 1:
baberabb's avatar
baberabb committed
188
            self.model = LLM(**self.model_args)
baberabb's avatar
baberabb committed
189
        else:
Baber Abbasi's avatar
Baber Abbasi committed
190
191
192
            eval_logger.warning(
                "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached."
            )
193
194
195
196
197
            self.model_args["distributed_executor_backend"] = (
                "ray"
                if not self.V1
                else self.model_args.get("distributed_executor_backend", None)
            )
198
199
200
            self.batch_size = "auto"
            eval_logger.info("Manual batching is not compatible with data parallelism.")

201
        from transformers import AutoConfig
202

203
204
205
        self._config = AutoConfig.from_pretrained(
            pretrained, trust_remote_code=trust_remote_code, revision=revision
        )
baberabb's avatar
nits  
baberabb committed
206
207
208
209
        self.tokenizer = get_tokenizer(
            tokenizer if tokenizer else pretrained,
            tokenizer_mode=tokenizer_mode,
            trust_remote_code=trust_remote_code,
210
            revision=tokenizer_revision,
211
            add_bos_token=add_bos_token,
baberabb's avatar
nits  
baberabb committed
212
        )
213
        self.tokenizer = configure_pad_token(self.tokenizer, model_config=self._config)
214
        self.chat_template_args = chat_template_args or {}
215
        self.enable_thinking = self.chat_template_args.pop(
216
217
            "enable_thinking", enable_thinking
        )
218
        self.add_bos_token = add_bos_token
219
220
221
        if "gemma" in pretrained.lower():
            self.add_bos_token = True
            eval_logger.info(
222
                "Found 'gemma' in model name, a BOS token will be used as Gemma series models underperform without it."
223
224
            )

225
        if parse_version(version("vllm")) >= parse_version("0.8.3"):
226
227
228
229
230
231
232
            kwargs_resolve_hf_chat_template = {
                "tokenizer": self.tokenizer,
                "chat_template": None,
                "tools": None,
            }

            if parse_version(version("vllm")) >= parse_version("0.9.0"):
233
234
235
236
237
238
239
240
241
242
243
                if self.data_parallel_size <= 1:
                    kwargs_resolve_hf_chat_template["model_config"] = (
                        self.model.llm_engine.model_config
                    )
                else:
                    from vllm.engine.arg_utils import EngineArgs

                    engine_args = EngineArgs(**self.model_args)
                    model_config = engine_args.create_model_config()

                    kwargs_resolve_hf_chat_template["model_config"] = model_config
244
245
246
247
248
249
250
251
252
253

            # https://github.com/vllm-project/vllm/pull/18259
            if (
                "trsut_remote_code"
                in inspect.signature(resolve_hf_chat_template).parameters
            ):
                kwargs_resolve_hf_chat_template["trsut_remote_code"] = trust_remote_code
            else:
                kwargs_resolve_hf_chat_template["trust_remote_code"] = trust_remote_code

254
            self.hf_chat_template = resolve_hf_chat_template(
255
                **kwargs_resolve_hf_chat_template
256
257
258
            )
        else:
            self.hf_chat_template = None
259

260
261
262
263
264
        self.custom_prefix_token_id = prefix_token_id
        if prefix_token_id is not None:
            eval_logger.info(
                f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
            )
265

baberabb's avatar
baberabb committed
266
267
        self._max_gen_toks = max_gen_toks

bcicc's avatar
bcicc committed
268
        if lora_local_path is not None:
Baber Abbasi's avatar
Baber Abbasi committed
269
270
271
            assert parse_version(version("vllm")) > parse_version("0.3.0"), (
                "lora adapters only compatible with vllm > v0.3.0."
            )
bcicc's avatar
bcicc committed
272
273
274
275
            self.lora_request = LoRARequest("finetuned", 1, lora_local_path)
        else:
            self.lora_request = None

baberabb's avatar
baberabb committed
276
277
278
279
280
    @property
    def eot_token_id(self):
        # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
        return self.tokenizer.eos_token_id

281
282
283
284
285
286
287
288
289
    @property
    def prefix_token_id(self):
        # it is used as prefix for loglikelihood
        if self.custom_prefix_token_id is not None:
            return self.custom_prefix_token_id
        if self.tokenizer.bos_token_id is not None:
            return self.tokenizer.bos_token_id
        return self.tokenizer.eos_token_id

baberabb's avatar
baberabb committed
290
291
292
293
    @property
    def max_length(self):
        if self._max_length:  # if max length manually set, return it
            return self._max_length
294
295
296
297
298
299
300
301
302
303
304
305
        if self.data_parallel_size <= 1:
            return self.model.llm_engine.model_config.max_model_len
        else:
            seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
            for attr in seqlen_config_attrs:
                if hasattr(self._config, attr):
                    return getattr(self._config, attr)
            if hasattr(self.tokenizer, "model_max_length"):
                if self.tokenizer.model_max_length == 1000000000000000019884624838656:
                    return self._DEFAULT_MAX_LENGTH
                return self.tokenizer.model_max_length
            return self._DEFAULT_MAX_LENGTH
baberabb's avatar
baberabb committed
306
307
308
309
310

    @property
    def max_gen_toks(self):
        return self._max_gen_toks

Baber Abbasi's avatar
Baber Abbasi committed
311
    def apply_chat_template(
Baber's avatar
types  
Baber committed
312
        self, chat_history: list[dict[str, str]], add_generation_prompt: bool = True
Baber Abbasi's avatar
Baber Abbasi committed
313
    ) -> str:
314
315
316
        """
        Method to apply a chat template to a list of chat history between user and model.
        """
317
318
319
320
321
322
323
324
        try:
            chat_templated = self.tokenizer.apply_chat_template(
                chat_history,
                tokenize=False,
                add_generation_prompt=add_generation_prompt,
                continue_final_message=not add_generation_prompt,
                chat_template=self.hf_chat_template,
                enable_thinking=self.enable_thinking,
325
                **self.chat_template_args,
326
327
328
329
330
331
332
333
334
335
336
337
            )
        except jinja2.exceptions.TemplateError:
            eval_logger.warning(
                "Failed to apply chat template. removing the system role in chat history."
            )
            chat_templated = self.tokenizer.apply_chat_template(
                [msg for msg in chat_history if msg["role"] != "system"],
                tokenize=False,
                add_generation_prompt=add_generation_prompt,
                continue_final_message=not add_generation_prompt,
                chat_template=self.hf_chat_template,
                enable_thinking=self.enable_thinking,
338
                **self.chat_template_args,
339
            )
340

Baber Abbasi's avatar
Baber Abbasi committed
341
342
        return chat_templated

343
344
345
346
    @property
    def tokenizer_name(self) -> str:
        return self.tokenizer.name_or_path.replace("/", "__")

baberabb's avatar
baberabb committed
347
348
    def tok_encode(
        self,
Baber's avatar
types  
Baber committed
349
        string: str | list[str],
350
351
352
        left_truncate_len: int = None,
        add_special_tokens: bool = False,
        truncation: bool = False,
Baber's avatar
types  
Baber committed
353
    ) -> list[int] | list[list[int]]:
354
355
        if not add_special_tokens:
            add_special_tokens = False or self.add_bos_token
Baber's avatar
types  
Baber committed
356
        encoding: list[list[int]] | list[int] = self.tokenizer(
357
358
359
360
361
            string,
            add_special_tokens=add_special_tokens,
            truncation=truncation,
            return_attention_mask=False,
        ).input_ids
baberabb's avatar
baberabb committed
362
363
364

        # left-truncate the encoded context to be at most `left_truncate_len` tokens long
        if left_truncate_len:
365
366
367
368
            if not isinstance(string, str):
                encoding = [enc[-left_truncate_len:] for enc in encoding]
            else:
                encoding = encoding[-left_truncate_len:]
baberabb's avatar
baberabb committed
369
370
371
372
373

        return encoding

    def _model_generate(
        self,
Baber's avatar
types  
Baber committed
374
        requests: list[list[int]] = None,
baberabb's avatar
baberabb committed
375
376
        generate: bool = False,
        max_tokens: int = None,
Baber's avatar
types  
Baber committed
377
        stop: list[str] | None = None,
baberabb's avatar
baberabb committed
378
379
380
        **kwargs,
    ):
        if generate:
381
            kwargs = self.modify_gen_kwargs(kwargs)
baberabb's avatar
baberabb committed
382
            sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs)
baberabb's avatar
baberabb committed
383
        else:
baberabb's avatar
baberabb committed
384
            sampling_params = SamplingParams(
385
                temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False
baberabb's avatar
baberabb committed
386
            )
387
        if self.data_parallel_size > 1 and not self.V1:
Baber Abbasi's avatar
Baber Abbasi committed
388
            # vLLM hangs if resources are set in ray.remote
Baber Abbasi's avatar
Baber Abbasi committed
389
390
            # also seems to only work with decorator and not with ray.remote() fn
            # see https://github.com/vllm-project/vllm/issues/973
Baber Abbasi's avatar
Baber Abbasi committed
391
            @ray.remote
Baber Abbasi's avatar
Baber Abbasi committed
392
            def run_inference_one_model(
393
                model_args: dict,
Baber Abbasi's avatar
Baber Abbasi committed
394
                sampling_params: SamplingParams,
Baber's avatar
types  
Baber committed
395
                requests: list[list[int]],
396
                lora_request: LoRARequest,
Baber Abbasi's avatar
Baber Abbasi committed
397
398
399
            ):
                llm = LLM(**model_args)
                return llm.generate(
400
401
402
                    prompt_token_ids=requests,
                    sampling_params=sampling_params,
                    lora_request=lora_request,
Baber Abbasi's avatar
Baber Abbasi committed
403
404
                )

405
406
407
            # dispatch requests to all self.data_parallel_size workers, in interleaved fashion
            # interleaved important to balance context lengths across workers
            requests = [list(x) for x in distribute(self.data_parallel_size, requests)]
408
409
410
411
            inputs = (
                (self.model_args, sampling_params, req, self.lora_request)
                for req in requests
            )
Baber Abbasi's avatar
Baber Abbasi committed
412
413
            object_refs = [run_inference_one_model.remote(*x) for x in inputs]
            results = ray.get(object_refs)
414
415
            # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required.
            ray.shutdown()
baberabb's avatar
baberabb committed
416
            # flatten results
417
            return undistribute(results)
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
        elif self.data_parallel_size > 1:
            # based on https://github.com/vllm-project/vllm/blob/a04720bc36401d831cb048c3917b9e58173d9c1d/examples/offline_inference/data_parallel.py
            dp_size = self.data_parallel_size
            dp_master_ip = os.environ.get("VLLM_DP_MASTER_IP", "127.0.0.1")
            dp_master_port = os.environ.get("VLLM_DP_MASTER_PORT") or get_open_port()

            requests = (list(x) for x in distribute(self.data_parallel_size, requests))

            procs, resq = [], Queue()
            # We use Process as it is non-daemonic
            try:
                for rank, req in enumerate(requests):
                    proc = Process(
                        target=_vllm_mp_worker,
                        args=(
                            self.model_args.copy(),
                            sampling_params,
                            req,
                            self.lora_request,
                            resq,
                            dp_size,
                            rank,
                            dp_master_port,
                            dp_master_ip,
                        ),
                    )
                    proc.start()
                    procs.append(proc)

                # Collect results
                rank_res = {}
                while len(rank_res) < len(procs):
                    try:
                        rank, result = resq.get(timeout=30)
                        if isinstance(result, dict) and "error" in result:
                            raise RuntimeError(result["error"])
                        rank_res[rank] = result
                    except Empty:
                        dead_procs = [
                            idx
                            for idx, p in enumerate(procs)
                            if not p.is_alive() and idx not in rank_res
                        ]
                        if dead_procs:
                            raise RuntimeError(
                                f"Worker processes {dead_procs} died unexpectedly"
Baber's avatar
types  
Baber committed
464
                            ) from None
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
                        continue

                results = [rank_res[i] for i in range(len(procs))]
                return undistribute(results)

            # cleanup
            finally:
                try:
                    resq.close()
                    resq.join_thread()
                except Exception:
                    eval_logger.debug(
                        "Failed to close vllm DP results queue", exc_info=True
                    )
                for proc in procs:
                    proc.join(timeout=10)
                    if proc.is_alive():
                        proc.terminate()
                        proc.join(timeout=5)
                        if proc.is_alive():
                            proc.kill()
baberabb's avatar
baberabb committed
486

487
488
489
490
        else:
            outputs = self.model.generate(
                prompt_token_ids=requests,
                sampling_params=sampling_params,
Baber's avatar
types  
Baber committed
491
                use_tqdm=self.batch_size == "auto",
492
493
494
                lora_request=self.lora_request,
            )
            return outputs
baberabb's avatar
baberabb committed
495

496
    def loglikelihood_rolling(
Baber's avatar
types  
Baber committed
497
498
        self, requests: list[Instance], disable_tqdm: bool = False
    ) -> list[float]:
499
500
501
502
503
504
505
506
507
508
509
510
511
512
        adaptive_batch_size = None
        if self.batch_size == "auto":
            adaptive_batch_size = len(requests)

        # First, collect all windows from all requests
        all_windows = []  # List of (request_idx, window) tuples
        request_window_counts = []  # Track number of windows per request

        for req_idx, (string,) in enumerate(
            tqdm(
                [req.args for req in requests],
                disable=(disable_tqdm or (self.rank != 0)),
            )
        ):
Baber's avatar
types  
Baber committed
513
            rolling_token_windows: list[tuple[list[int], list[int]]] = list(
baberabb's avatar
baberabb committed
514
                map(
515
516
                    make_disjoint_window,
                    get_rolling_token_windows(
baberabb's avatar
baberabb committed
517
                        token_list=self.tok_encode(string),
518
519
                        prefix_token=self.prefix_token_id,
                        # max_seq_len - (1 for context)
baberabb's avatar
baberabb committed
520
                        max_seq_len=self.max_length - 1,
baberabb's avatar
baberabb committed
521
522
523
524
525
                        context_len=1,
                    ),
                )
            )

526
527
            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
            windows = [(None,) + x for x in rolling_token_windows]
baberabb's avatar
baberabb committed
528

529
530
531
            # Store windows with their request index
            all_windows.extend((req_idx, window) for window in windows)
            request_window_counts.append(len(windows))
baberabb's avatar
baberabb committed
532

533
534
535
536
537
538
        all_nlls = []
        batch_size = adaptive_batch_size or int(self.batch_size)
        for i in range(0, len(all_windows), batch_size):
            batch = all_windows[i : i + batch_size]
            # Extract just the windows for processing, keeping track of request indices
            batch_indices, batch_windows = zip(*batch)
baberabb's avatar
baberabb committed
539

540
541
542
543
544
545
            batch_nlls = self._loglikelihood_tokens(
                requests=batch_windows,
                disable_tqdm=False,
            )
            # Store results with their request indices
            all_nlls.extend(zip(batch_indices, batch_nlls))
546

547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
        # Reconstruct per-request loglikelihoods
        loglikelihoods = []
        current_idx = 0
        for window_count in request_window_counts:
            # Get all nlls for this request
            request_nlls = all_nlls[current_idx : current_idx + window_count]
            # Sum up the nlls for this request (discarding is_greedy)
            request_total = sum(nll[0] for _, nll in request_nlls)
            loglikelihoods.append(request_total)
            current_idx += window_count

            string = requests[len(loglikelihoods) - 1].args[0]
            self.cache_hook.add_partial(
                "loglikelihood_rolling", (string,), request_total
            )
562

baberabb's avatar
baberabb committed
563
564
        return loglikelihoods

565
    def generate_until(
Baber's avatar
types  
Baber committed
566
567
        self, requests: list[Instance], disable_tqdm: bool = False
    ) -> list[str]:
568
        res = []
baberabb's avatar
baberabb committed
569
570
571

        # batch tokenize contexts
        context, all_gen_kwargs = zip(*(req.args for req in requests))
Baber's avatar
types  
Baber committed
572
        context_encoding: list[list[int]] = self.tok_encode(
573
574
            context, add_special_tokens=self.add_bos_token
        )
baberabb's avatar
baberabb committed
575
576
577
        requests = [
            ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs)
        ]
baberabb's avatar
baberabb committed
578
579
580
581
582
583
584
585

        def _collate_gen(_requests):
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end
586
            return -len(_requests[0][1]), _requests[0][0]
baberabb's avatar
baberabb committed
587
588
589
590

        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
Baber Abbasi's avatar
Baber Abbasi committed
591
        re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs")
592
593
594
        chunks = re_ords.get_batched(
            n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
        )
baberabb's avatar
baberabb committed
595

596
597
        pbar = tqdm(
            total=len(requests),
598
            disable=(disable_tqdm or (self.rank != 0)),
599
600
            desc="Running generate_until requests",
        )
baberabb's avatar
baberabb committed
601
        # for each different set of kwargs, we execute all requests, by batch.
602
        eos = self.tokenizer.decode(self.eot_token_id)
603
604
605
606
607
608
609
610
611
        for chunk in chunks:
            context_and_encoding, all_gen_kwargs = zip(*chunk)
            context, context_encoding = zip(*context_and_encoding)
            # we assume all gen kwargs in the batch are the same
            # this is safe to assume because the `grouper` object ensures it.
            gen_kwargs = all_gen_kwargs[0]
            # unpack our keyword arguments.
            if isinstance(gen_kwargs, dict):
                kwargs = copy.deepcopy(gen_kwargs)  # edge case for repeats > 1
612
613
                # add EOS token to stop sequences
                until = handle_stop_sequences(kwargs.pop("until", None), eos=eos)
614
615
            else:
                raise ValueError(
616
                    f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
baberabb's avatar
baberabb committed
617
                )
Baber's avatar
types  
Baber committed
618
            if "max_gen_toks" in kwargs:
619
620
621
622
623
624
625
                max_gen_toks = kwargs.pop("max_gen_toks")
            else:
                max_gen_toks = self.max_gen_toks

            # set the max length in tokens of inputs ("context_enc")
            # max len for inputs = max length, minus room to generate the max new tokens
            max_ctx_len = self.max_length - max_gen_toks
626
627
628
629
630
631
            all_lengths = [len(x) for x in context_encoding]
            for length in all_lengths:
                if length > max_ctx_len:
                    eval_logger.warning(
                        f"Context length {length} exceeds max length (context + max gen tokens): {max_ctx_len}. Truncating context."
                    )
632
633
634
635
636
637
638
639
640
641
            context_encoding = [x[-max_ctx_len:] for x in context_encoding]

            # perform batched generation
            cont = self._model_generate(
                requests=context_encoding,
                generate=True,
                max_tokens=max_gen_toks,
                stop=until,
                **kwargs,
            )
baberabb's avatar
baberabb committed
642

643
            # cache generations
Baber's avatar
types  
Baber committed
644
            for output, context_ in zip(cont, context):
645
                generated_text: str = output.outputs[0].text
646
                # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
647
648
649
                generated_text = postprocess_generated_text(
                    generated_text, until, self.think_end_token
                )
650
651
                res.append(generated_text)
                self.cache_hook.add_partial(
Baber's avatar
types  
Baber committed
652
                    "generate_until", (context_, gen_kwargs), generated_text
653
654
                )
                pbar.update(1)
baberabb's avatar
baberabb committed
655
656

        pbar.close()
657
658
        # reorder all group of results back to original unsorted form
        return re_ords.get_original(res)
baberabb's avatar
baberabb committed
659
660

    def _loglikelihood_tokens(
baberabb's avatar
baberabb committed
661
        self,
Baber's avatar
types  
Baber committed
662
        requests: list[tuple[tuple[str, str], list[int], list[int]]],
baberabb's avatar
baberabb committed
663
        disable_tqdm: bool = False,
Baber's avatar
types  
Baber committed
664
    ) -> list[tuple[float, bool]]:
baberabb's avatar
baberabb committed
665
666
667
668
669
670
        res = []

        def _collate(x):
            toks = x[1] + x[2]
            return -len(toks), tuple(toks)

671
672
673
674
        # Reorder requests by length and batch
        re_ord = Collator(requests, sort_fn=_collate)
        chunks = re_ord.get_batched(
            n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
baberabb's avatar
baberabb committed
675
        )
676

677
678
679
680
681
        pbar = tqdm(
            total=len(requests),
            disable=disable_tqdm,
            desc="Running loglikelihood requests",
        )
baberabb's avatar
baberabb committed
682
        for chunk in chunks:
683
            inputs = []
baberabb's avatar
baberabb committed
684
            ctxlens = []
Baber's avatar
types  
Baber committed
685
            for _cache_key, context_enc, continuation_enc in chunk:
686
687
                if (
                    full_length := len(context_enc + continuation_enc)
688
                ) > self.max_length:
689
690
691
                    eval_logger.warning(
                        f"Context length {full_length} exceeds max length ({self.max_length}). Truncating context."
                    )
baberabb's avatar
baberabb committed
692
693
694
695
696
                inp = (context_enc + continuation_enc)[-(self.max_length) :]
                ctxlen = len(context_enc) - max(
                    0, len(context_enc) + len(continuation_enc) - (self.max_length)
                )

697
                inputs.append(inp)
baberabb's avatar
baberabb committed
698
699
                ctxlens.append(ctxlen)

700
            outputs = self._model_generate(requests=inputs, generate=False)
baberabb's avatar
baberabb committed
701

702
703
            for output, ctxlen, (cache_key, _, _), inp in zip(
                outputs, ctxlens, chunk, inputs
baberabb's avatar
baberabb committed
704
705
            ):
                answer = self._parse_logprobs(
706
707
708
                    tokens=inp,
                    outputs=output,
                    ctxlen=ctxlen,
baberabb's avatar
baberabb committed
709
710
711
712
713
                )

                res.append(answer)

                if cache_key is not None:
714
715
716
                    # special case: loglikelihood_rolling produces a number of loglikelihood requests
                    # all with cache key None. instead do add_partial on the per-example level
                    # in the loglikelihood_rolling() function for those.
baberabb's avatar
baberabb committed
717
                    self.cache_hook.add_partial("loglikelihood", cache_key, answer)
718
                pbar.update(1)
baberabb's avatar
baberabb committed
719
720
721
722
        pbar.close()
        return re_ord.get_original(res)

    @staticmethod
Baber's avatar
types  
Baber committed
723
    def _parse_logprobs(tokens: list, outputs, ctxlen: int) -> tuple[float, bool]:
baberabb's avatar
baberabb committed
724
725
726
        """Process logprobs and tokens.

        :param tokens: list
727
            Input tokens (potentially left-truncated)
baberabb's avatar
bugfix  
baberabb committed
728
        :param outputs: RequestOutput
729
            Contains prompt_logprobs
baberabb's avatar
baberabb committed
730
731
732
733
734
735
736
737
738
        :param ctxlen: int
            Length of context (so we can slice them away and only keep the predictions)
        :return:
            continuation_logprobs: float
                Log probabilities of continuation tokens
            is_greedy: bool
                Whether argmax matches given continuation exactly
        """

739
        # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on.
baberabb's avatar
bugfix  
baberabb committed
740
741
        continuation_logprobs_dicts = outputs.prompt_logprobs

742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
        def coerce_logprob_to_num(logprob):
            # vLLM changed the return type of logprobs from float
            # to a Logprob object storing the float value + extra data
            # (https://github.com/vllm-project/vllm/pull/3065).
            # If we are dealing with vllm's Logprob object, return
            # the logprob value stored as an attribute. Otherwise,
            # return the object itself (which should be a float
            # for older versions of vLLM).
            return getattr(logprob, "logprob", logprob)

        continuation_logprobs_dicts = [
            {
                token: coerce_logprob_to_num(logprob)
                for token, logprob in logprob_dict.items()
            }
            if logprob_dict is not None
            else None
            for logprob_dict in continuation_logprobs_dicts
        ]

baberabb's avatar
baberabb committed
762
        # Calculate continuation_logprobs
763
        # assume ctxlen always >= 1
baberabb's avatar
baberabb committed
764
        continuation_logprobs = sum(
baberabb's avatar
baberabb committed
765
            logprob_dict.get(token)
baberabb's avatar
baberabb committed
766
            for token, logprob_dict in zip(
baberabb's avatar
bugfix  
baberabb committed
767
                tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
baberabb's avatar
baberabb committed
768
769
770
771
772
            )
        )

        # Determine if is_greedy
        is_greedy = True
baberabb's avatar
baberabb committed
773
774
775
        for token, logprob_dict in zip(
            tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
        ):
baberabb's avatar
bugfix  
baberabb committed
776
777
778
779
780
781
            # Get the token with the maximum log probability from the logprob_dict
            if logprob_dict:  # Ensure the logprob_dict is not None
                top_token = max(logprob_dict, key=logprob_dict.get)
                if top_token != token:
                    is_greedy = False
                    break
baberabb's avatar
baberabb committed
782
783

        return continuation_logprobs, is_greedy
784
785
786
787

    @staticmethod
    def modify_gen_kwargs(kwargs: dict) -> dict:
        # sampling_params
788
        kwargs["temperature"] = kwargs.get("temperature", 0.0)
789
        do_sample = kwargs.pop("do_sample", None)
790
791
792
793
        if do_sample is False and "temperature" not in kwargs:
            eval_logger.debug(
                "Got `do_sample=False` and no temperature value, setting VLLM temperature to 0.0 ..."
            )
794
795
796
797
798
799
800
            kwargs["temperature"] = 0.0
        # hf defaults
        kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False)
        kwargs["spaces_between_special_tokens"] = kwargs.get(
            "spaces_between_special_tokens", False
        )
        return kwargs