vllm_causallms.py 31 KB
Newer Older
1
import copy
2
import gc
3
import inspect
Lintang Sutawika's avatar
Lintang Sutawika committed
4
import logging
5
import os
Baber Abbasi's avatar
Baber Abbasi committed
6
from importlib.metadata import version
7
from importlib.util import find_spec
8
9
10
from multiprocessing import Process, Queue
from queue import Empty
from time import sleep
11
from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
12

13
import jinja2
14
from more_itertools import distribute
Baber Abbasi's avatar
Baber Abbasi committed
15
from packaging.version import parse as parse_version
16
17
from tqdm import tqdm

baberabb's avatar
baberabb committed
18
from lm_eval.api.instance import Instance
19
from lm_eval.api.model import TemplateLM
baberabb's avatar
baberabb committed
20
from lm_eval.api.registry import register_model
21
22
23
24
from lm_eval.models.utils import (
    Collator,
    configure_pad_token,
    handle_stop_sequences,
25
    postprocess_generated_text,
26
27
    undistribute,
)
28
29
30
31
from lm_eval.utils import (
    get_rolling_token_windows,
    make_disjoint_window,
)
32

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
33

34
try:
35
    import ray
36
    from vllm import LLM, SamplingParams
37
    from vllm.lora.request import LoRARequest
baberabb's avatar
baberabb committed
38
    from vllm.transformers_utils.tokenizer import get_tokenizer
39
    from vllm.utils import get_open_port
40
41
42

    if parse_version(version("vllm")) >= parse_version("0.8.3"):
        from vllm.entrypoints.chat_utils import resolve_hf_chat_template
43
44
except ModuleNotFoundError:
    pass
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
45

46
47
if TYPE_CHECKING:
    pass
bcicc's avatar
bcicc committed
48

Lintang Sutawika's avatar
Lintang Sutawika committed
49
eval_logger = logging.getLogger(__name__)
baberabb's avatar
baberabb committed
50

baberabb's avatar
baberabb committed
51

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
def _vllm_mp_worker(
    model_args: dict,
    sampling_params: "SamplingParams",
    requests: list[list[int]],
    lora_request: "LoRARequest",
    result_queue: "Queue",
    dp_size: int,
    local_dp_rank: int,
    dp_master_port: int,
    dp_master_ip: str = "127.0.0.1",
) -> None:
    """
    Worker process for vLLM multiprocessing.
    Initializes a vLLM engine, processes requests, and puts results or errors
    onto the result_queue.
    """

    if not requests:
        result_queue.put((local_dp_rank, []))
        return None

    os.environ["VLLM_DP_RANK"] = os.environ["VLLM_DP_RANK_LOCAL"] = str(local_dp_rank)
    os.environ["VLLM_DP_SIZE"] = str(dp_size)
    os.environ["VLLM_DP_MASTER_IP"] = str(dp_master_ip)
    os.environ["VLLM_DP_MASTER_PORT"] = str(dp_master_port)

    llm = None
    try:
        llm = LLM(**model_args)
        res = llm.generate(
            prompt_token_ids=requests,
            sampling_params=sampling_params,
            lora_request=lora_request,
        )
        # Give engines time to pause their processing loops before exiting."
        sleep(1)
        result_queue.put((local_dp_rank, res))

    except Exception as e:
        error_message = f"Worker {local_dp_rank} failed during generation: {type(e).__name__}: {str(e)}"
        eval_logger.error(error_message, exc_info=True)
        result_queue.put((local_dp_rank, {"error": error_message}))

    finally:
        if llm is not None:
            try:
                del llm
                gc.collect()
            except Exception as e_cleanup:
                eval_logger.warning(
                    f"Worker {local_dp_rank} encountered an error during LLM cleanup: {type(e_cleanup).__name__}: {str(e_cleanup)}",
                    exc_info=True,
                )

    return None


baberabb's avatar
baberabb committed
109
@register_model("vllm")
110
class VLLM(TemplateLM):
baberabb's avatar
baberabb committed
111
112
113
114
    _DEFAULT_MAX_LENGTH = 2048

    def __init__(
        self,
115
        pretrained: str,
baberabb's avatar
baberabb committed
116
117
118
        dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto",
        revision: Optional[str] = None,
        trust_remote_code: Optional[bool] = False,
baberabb's avatar
baberabb committed
119
        tokenizer: Optional[str] = None,
baberabb's avatar
baberabb committed
120
        tokenizer_mode: Literal["auto", "slow"] = "auto",
baberabb's avatar
baberabb committed
121
        tokenizer_revision: Optional[str] = None,
122
        add_bos_token: Optional[bool] = False,
123
        prefix_token_id: Optional[int] = None,
baberabb's avatar
baberabb committed
124
        tensor_parallel_size: int = 1,
125
        quantization: Optional[str] = None,
baberabb's avatar
baberabb committed
126
127
        max_gen_toks: int = 256,
        swap_space: int = 4,
baberabb's avatar
baberabb committed
128
        batch_size: Union[str, int] = 1,
baberabb's avatar
baberabb committed
129
        max_batch_size=None,
baberabb's avatar
baberabb committed
130
        max_length: int = None,
131
        max_model_len: int = None,
baberabb's avatar
baberabb committed
132
        seed: int = 1234,
133
        gpu_memory_utilization: float = 0.9,
134
        data_parallel_size: int = 1,
bcicc's avatar
bcicc committed
135
        lora_local_path: str = None,
136
137
        # VLLM: enable thinking tags in the prompt.
        enable_thinking: bool = True,
138
        chat_template_args: Optional[dict] = None,
139
140
        # End marker for thinking tags - splits to get response after this token (if provided).
        think_end_token: Optional[str] = None,
MaYongQing's avatar
MaYongQing committed
141
        max_lora_rank: int = 16,
Baber Abbasi's avatar
Baber Abbasi committed
142
        **kwargs,
baberabb's avatar
baberabb committed
143
144
    ):
        super().__init__()
145

146
        if not find_spec("vllm"):
147
            raise ModuleNotFoundError(
148
149
                "attempted to use 'vllm' LM type, but package `vllm` is not installed. "
                "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
150
151
            )

Baber Abbasi's avatar
Baber Abbasi committed
152
153
154
        assert max_length is None or max_model_len is None, (
            "Either max_length or max_model_len may be provided, but not both"
        )
Baber Abbasi's avatar
Baber Abbasi committed
155
        kwargs.pop("device", None)
156
        self.think_end_token = think_end_token
157
        self.V1 = os.environ.get("VLLM_USE_V1", "1") != "0"
158
        self._max_length = max_model_len if max_model_len is not None else max_length
baberabb's avatar
baberabb committed
159
        self.tensor_parallel_size = int(tensor_parallel_size)
160
        self.data_parallel_size = int(data_parallel_size)
baberabb's avatar
baberabb committed
161
162
163
164
165
        self.model_args = {
            "model": pretrained,
            "gpu_memory_utilization": float(gpu_memory_utilization),
            "revision": revision,
            "dtype": dtype,
baberabb's avatar
baberabb committed
166
            "tokenizer": tokenizer,
baberabb's avatar
baberabb committed
167
            "tokenizer_mode": tokenizer_mode,
baberabb's avatar
baberabb committed
168
            "tokenizer_revision": tokenizer_revision,
baberabb's avatar
baberabb committed
169
170
            "trust_remote_code": trust_remote_code,
            "tensor_parallel_size": int(tensor_parallel_size),
171
            "max_model_len": int(self._max_length) if self._max_length else None,
172
            "max_num_seqs": kwargs.get("max_num_seqs", max_batch_size),
baberabb's avatar
baberabb committed
173
174
175
            "swap_space": int(swap_space),
            "quantization": quantization,
            "seed": int(seed),
MaYongQing's avatar
MaYongQing committed
176
177
            "enable_lora": True if lora_local_path else False,
            "max_lora_rank": int(max_lora_rank),
baberabb's avatar
baberabb committed
178
        }
Baber Abbasi's avatar
Baber Abbasi committed
179
        self.model_args.update(kwargs)
180
181
182
        self.batch_size = (
            "auto"
            if isinstance(batch_size, str) and "auto" in batch_size
183
            else int(batch_size)
184
        )
185
        if self.data_parallel_size <= 1:
baberabb's avatar
baberabb committed
186
            self.model = LLM(**self.model_args)
baberabb's avatar
baberabb committed
187
        else:
Baber Abbasi's avatar
Baber Abbasi committed
188
189
190
            eval_logger.warning(
                "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached."
            )
191
192
193
194
195
            self.model_args["distributed_executor_backend"] = (
                "ray"
                if not self.V1
                else self.model_args.get("distributed_executor_backend", None)
            )
196
197
198
            self.batch_size = "auto"
            eval_logger.info("Manual batching is not compatible with data parallelism.")

199
        from transformers import AutoConfig
200

201
202
203
        self._config = AutoConfig.from_pretrained(
            pretrained, trust_remote_code=trust_remote_code, revision=revision
        )
baberabb's avatar
nits  
baberabb committed
204
205
206
207
        self.tokenizer = get_tokenizer(
            tokenizer if tokenizer else pretrained,
            tokenizer_mode=tokenizer_mode,
            trust_remote_code=trust_remote_code,
208
            revision=tokenizer_revision,
209
            add_bos_token=add_bos_token,
baberabb's avatar
nits  
baberabb committed
210
        )
211
        self.tokenizer = configure_pad_token(self.tokenizer, model_config=self._config)
212
        self.chat_template_args = chat_template_args or {}
213
        self.enable_thinking = self.chat_template_args.pop(
214
215
            "enable_thinking", enable_thinking
        )
216
        self.add_bos_token = add_bos_token
217
218
219
        if "gemma" in pretrained.lower():
            self.add_bos_token = True
            eval_logger.info(
220
                "Found 'gemma' in model name, a BOS token will be used as Gemma series models underperform without it."
221
222
            )

223
        if parse_version(version("vllm")) >= parse_version("0.8.3"):
224
225
226
227
228
229
230
            kwargs_resolve_hf_chat_template = {
                "tokenizer": self.tokenizer,
                "chat_template": None,
                "tools": None,
            }

            if parse_version(version("vllm")) >= parse_version("0.9.0"):
231
232
233
234
235
236
237
238
239
240
241
                if self.data_parallel_size <= 1:
                    kwargs_resolve_hf_chat_template["model_config"] = (
                        self.model.llm_engine.model_config
                    )
                else:
                    from vllm.engine.arg_utils import EngineArgs

                    engine_args = EngineArgs(**self.model_args)
                    model_config = engine_args.create_model_config()

                    kwargs_resolve_hf_chat_template["model_config"] = model_config
242
243
244
245
246
247
248
249
250
251

            # https://github.com/vllm-project/vllm/pull/18259
            if (
                "trsut_remote_code"
                in inspect.signature(resolve_hf_chat_template).parameters
            ):
                kwargs_resolve_hf_chat_template["trsut_remote_code"] = trust_remote_code
            else:
                kwargs_resolve_hf_chat_template["trust_remote_code"] = trust_remote_code

252
            self.hf_chat_template = resolve_hf_chat_template(
253
                **kwargs_resolve_hf_chat_template
254
255
256
            )
        else:
            self.hf_chat_template = None
257

258
259
260
261
262
        self.custom_prefix_token_id = prefix_token_id
        if prefix_token_id is not None:
            eval_logger.info(
                f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
            )
263

baberabb's avatar
baberabb committed
264
265
        self._max_gen_toks = max_gen_toks

bcicc's avatar
bcicc committed
266
        if lora_local_path is not None:
Baber Abbasi's avatar
Baber Abbasi committed
267
268
269
            assert parse_version(version("vllm")) > parse_version("0.3.0"), (
                "lora adapters only compatible with vllm > v0.3.0."
            )
bcicc's avatar
bcicc committed
270
271
272
273
            self.lora_request = LoRARequest("finetuned", 1, lora_local_path)
        else:
            self.lora_request = None

baberabb's avatar
baberabb committed
274
275
276
277
278
    @property
    def eot_token_id(self):
        # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
        return self.tokenizer.eos_token_id

279
280
281
282
283
284
285
286
287
    @property
    def prefix_token_id(self):
        # it is used as prefix for loglikelihood
        if self.custom_prefix_token_id is not None:
            return self.custom_prefix_token_id
        if self.tokenizer.bos_token_id is not None:
            return self.tokenizer.bos_token_id
        return self.tokenizer.eos_token_id

baberabb's avatar
baberabb committed
288
289
290
291
    @property
    def max_length(self):
        if self._max_length:  # if max length manually set, return it
            return self._max_length
292
293
294
295
296
297
298
299
300
301
302
303
        if self.data_parallel_size <= 1:
            return self.model.llm_engine.model_config.max_model_len
        else:
            seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
            for attr in seqlen_config_attrs:
                if hasattr(self._config, attr):
                    return getattr(self._config, attr)
            if hasattr(self.tokenizer, "model_max_length"):
                if self.tokenizer.model_max_length == 1000000000000000019884624838656:
                    return self._DEFAULT_MAX_LENGTH
                return self.tokenizer.model_max_length
            return self._DEFAULT_MAX_LENGTH
baberabb's avatar
baberabb committed
304
305
306
307
308

    @property
    def max_gen_toks(self):
        return self._max_gen_toks

Baber Abbasi's avatar
Baber Abbasi committed
309
310
311
    def apply_chat_template(
        self, chat_history: List[Dict[str, str]], add_generation_prompt: bool = True
    ) -> str:
312
313
314
        """
        Method to apply a chat template to a list of chat history between user and model.
        """
315
316
317
318
319
320
321
322
        try:
            chat_templated = self.tokenizer.apply_chat_template(
                chat_history,
                tokenize=False,
                add_generation_prompt=add_generation_prompt,
                continue_final_message=not add_generation_prompt,
                chat_template=self.hf_chat_template,
                enable_thinking=self.enable_thinking,
323
                **self.chat_template_args,
324
325
326
327
328
329
330
331
332
333
334
335
            )
        except jinja2.exceptions.TemplateError:
            eval_logger.warning(
                "Failed to apply chat template. removing the system role in chat history."
            )
            chat_templated = self.tokenizer.apply_chat_template(
                [msg for msg in chat_history if msg["role"] != "system"],
                tokenize=False,
                add_generation_prompt=add_generation_prompt,
                continue_final_message=not add_generation_prompt,
                chat_template=self.hf_chat_template,
                enable_thinking=self.enable_thinking,
336
                **self.chat_template_args,
337
            )
338

Baber Abbasi's avatar
Baber Abbasi committed
339
340
        return chat_templated

341
342
343
344
    @property
    def tokenizer_name(self) -> str:
        return self.tokenizer.name_or_path.replace("/", "__")

baberabb's avatar
baberabb committed
345
346
    def tok_encode(
        self,
347
348
349
350
351
        string: Union[str, List[str]],
        left_truncate_len: int = None,
        add_special_tokens: bool = False,
        truncation: bool = False,
    ) -> Union[List[int], List[List[int]]]:
352
353
        if not add_special_tokens:
            add_special_tokens = False or self.add_bos_token
354
355
356
357
358
359
        encoding: Union[List[List[int]], List[int]] = self.tokenizer(
            string,
            add_special_tokens=add_special_tokens,
            truncation=truncation,
            return_attention_mask=False,
        ).input_ids
baberabb's avatar
baberabb committed
360
361
362

        # left-truncate the encoded context to be at most `left_truncate_len` tokens long
        if left_truncate_len:
363
364
365
366
            if not isinstance(string, str):
                encoding = [enc[-left_truncate_len:] for enc in encoding]
            else:
                encoding = encoding[-left_truncate_len:]
baberabb's avatar
baberabb committed
367
368
369
370
371

        return encoding

    def _model_generate(
        self,
baberabb's avatar
baberabb committed
372
        requests: List[List[int]] = None,
baberabb's avatar
baberabb committed
373
374
375
376
377
378
        generate: bool = False,
        max_tokens: int = None,
        stop: Optional[List[str]] = None,
        **kwargs,
    ):
        if generate:
379
            kwargs = self.modify_gen_kwargs(kwargs)
baberabb's avatar
baberabb committed
380
            sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs)
baberabb's avatar
baberabb committed
381
        else:
baberabb's avatar
baberabb committed
382
            sampling_params = SamplingParams(
383
                temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False
baberabb's avatar
baberabb committed
384
            )
385
        if self.data_parallel_size > 1 and not self.V1:
Baber Abbasi's avatar
Baber Abbasi committed
386
            # vLLM hangs if resources are set in ray.remote
Baber Abbasi's avatar
Baber Abbasi committed
387
388
            # also seems to only work with decorator and not with ray.remote() fn
            # see https://github.com/vllm-project/vllm/issues/973
Baber Abbasi's avatar
Baber Abbasi committed
389
            @ray.remote
Baber Abbasi's avatar
Baber Abbasi committed
390
            def run_inference_one_model(
391
                model_args: dict,
Baber Abbasi's avatar
Baber Abbasi committed
392
                sampling_params: SamplingParams,
393
394
                requests: List[List[int]],
                lora_request: LoRARequest,
Baber Abbasi's avatar
Baber Abbasi committed
395
396
397
            ):
                llm = LLM(**model_args)
                return llm.generate(
398
399
400
                    prompt_token_ids=requests,
                    sampling_params=sampling_params,
                    lora_request=lora_request,
Baber Abbasi's avatar
Baber Abbasi committed
401
402
                )

403
404
405
            # dispatch requests to all self.data_parallel_size workers, in interleaved fashion
            # interleaved important to balance context lengths across workers
            requests = [list(x) for x in distribute(self.data_parallel_size, requests)]
406
407
408
409
            inputs = (
                (self.model_args, sampling_params, req, self.lora_request)
                for req in requests
            )
Baber Abbasi's avatar
Baber Abbasi committed
410
411
            object_refs = [run_inference_one_model.remote(*x) for x in inputs]
            results = ray.get(object_refs)
412
413
            # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required.
            ray.shutdown()
baberabb's avatar
baberabb committed
414
            # flatten results
415
            return undistribute(results)
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
        elif self.data_parallel_size > 1:
            # based on https://github.com/vllm-project/vllm/blob/a04720bc36401d831cb048c3917b9e58173d9c1d/examples/offline_inference/data_parallel.py
            dp_size = self.data_parallel_size
            dp_master_ip = os.environ.get("VLLM_DP_MASTER_IP", "127.0.0.1")
            dp_master_port = os.environ.get("VLLM_DP_MASTER_PORT") or get_open_port()

            requests = (list(x) for x in distribute(self.data_parallel_size, requests))

            procs, resq = [], Queue()
            # We use Process as it is non-daemonic
            try:
                for rank, req in enumerate(requests):
                    proc = Process(
                        target=_vllm_mp_worker,
                        args=(
                            self.model_args.copy(),
                            sampling_params,
                            req,
                            self.lora_request,
                            resq,
                            dp_size,
                            rank,
                            dp_master_port,
                            dp_master_ip,
                        ),
                    )
                    proc.start()
                    procs.append(proc)

                # Collect results
                rank_res = {}
                while len(rank_res) < len(procs):
                    try:
                        rank, result = resq.get(timeout=30)
                        if isinstance(result, dict) and "error" in result:
                            raise RuntimeError(result["error"])
                        rank_res[rank] = result
                    except Empty:
                        dead_procs = [
                            idx
                            for idx, p in enumerate(procs)
                            if not p.is_alive() and idx not in rank_res
                        ]
                        if dead_procs:
                            raise RuntimeError(
                                f"Worker processes {dead_procs} died unexpectedly"
                            )
                        continue

                results = [rank_res[i] for i in range(len(procs))]
                return undistribute(results)

            # cleanup
            finally:
                try:
                    resq.close()
                    resq.join_thread()
                except Exception:
                    eval_logger.debug(
                        "Failed to close vllm DP results queue", exc_info=True
                    )
                for proc in procs:
                    proc.join(timeout=10)
                    if proc.is_alive():
                        proc.terminate()
                        proc.join(timeout=5)
                        if proc.is_alive():
                            proc.kill()
baberabb's avatar
baberabb committed
484

485
486
487
488
489
490
491
492
        else:
            outputs = self.model.generate(
                prompt_token_ids=requests,
                sampling_params=sampling_params,
                use_tqdm=True if self.batch_size == "auto" else False,
                lora_request=self.lora_request,
            )
            return outputs
baberabb's avatar
baberabb committed
493

494
495
496
    def loglikelihood_rolling(
        self, requests: List[Instance], disable_tqdm: bool = False
    ) -> List[float]:
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
        adaptive_batch_size = None
        if self.batch_size == "auto":
            adaptive_batch_size = len(requests)

        # First, collect all windows from all requests
        all_windows = []  # List of (request_idx, window) tuples
        request_window_counts = []  # Track number of windows per request

        for req_idx, (string,) in enumerate(
            tqdm(
                [req.args for req in requests],
                disable=(disable_tqdm or (self.rank != 0)),
            )
        ):
            rolling_token_windows: List[Tuple[List[int], List[int]]] = list(
baberabb's avatar
baberabb committed
512
                map(
513
514
                    make_disjoint_window,
                    get_rolling_token_windows(
baberabb's avatar
baberabb committed
515
                        token_list=self.tok_encode(string),
516
517
                        prefix_token=self.prefix_token_id,
                        # max_seq_len - (1 for context)
baberabb's avatar
baberabb committed
518
                        max_seq_len=self.max_length - 1,
baberabb's avatar
baberabb committed
519
520
521
522
523
                        context_len=1,
                    ),
                )
            )

524
525
            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
            windows = [(None,) + x for x in rolling_token_windows]
baberabb's avatar
baberabb committed
526

527
528
529
            # Store windows with their request index
            all_windows.extend((req_idx, window) for window in windows)
            request_window_counts.append(len(windows))
baberabb's avatar
baberabb committed
530

531
532
533
534
535
536
        all_nlls = []
        batch_size = adaptive_batch_size or int(self.batch_size)
        for i in range(0, len(all_windows), batch_size):
            batch = all_windows[i : i + batch_size]
            # Extract just the windows for processing, keeping track of request indices
            batch_indices, batch_windows = zip(*batch)
baberabb's avatar
baberabb committed
537

538
539
540
541
542
543
            batch_nlls = self._loglikelihood_tokens(
                requests=batch_windows,
                disable_tqdm=False,
            )
            # Store results with their request indices
            all_nlls.extend(zip(batch_indices, batch_nlls))
544

545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
        # Reconstruct per-request loglikelihoods
        loglikelihoods = []
        current_idx = 0
        for window_count in request_window_counts:
            # Get all nlls for this request
            request_nlls = all_nlls[current_idx : current_idx + window_count]
            # Sum up the nlls for this request (discarding is_greedy)
            request_total = sum(nll[0] for _, nll in request_nlls)
            loglikelihoods.append(request_total)
            current_idx += window_count

            string = requests[len(loglikelihoods) - 1].args[0]
            self.cache_hook.add_partial(
                "loglikelihood_rolling", (string,), request_total
            )
560

baberabb's avatar
baberabb committed
561
562
        return loglikelihoods

563
564
565
    def generate_until(
        self, requests: List[Instance], disable_tqdm: bool = False
    ) -> List[str]:
566
        res = []
baberabb's avatar
baberabb committed
567
568
569

        # batch tokenize contexts
        context, all_gen_kwargs = zip(*(req.args for req in requests))
570
571
572
        context_encoding: List[List[int]] = self.tok_encode(
            context, add_special_tokens=self.add_bos_token
        )
baberabb's avatar
baberabb committed
573
574
575
        requests = [
            ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs)
        ]
baberabb's avatar
baberabb committed
576
577
578
579
580
581
582
583

        def _collate_gen(_requests):
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end
584
            return -len(_requests[0][1]), _requests[0][0]
baberabb's avatar
baberabb committed
585
586
587
588

        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
Baber Abbasi's avatar
Baber Abbasi committed
589
        re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs")
590
591
592
        chunks = re_ords.get_batched(
            n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
        )
baberabb's avatar
baberabb committed
593

594
595
        pbar = tqdm(
            total=len(requests),
596
            disable=(disable_tqdm or (self.rank != 0)),
597
598
            desc="Running generate_until requests",
        )
baberabb's avatar
baberabb committed
599
        # for each different set of kwargs, we execute all requests, by batch.
600
        eos = self.tokenizer.decode(self.eot_token_id)
601
602
603
604
605
606
607
608
609
        for chunk in chunks:
            context_and_encoding, all_gen_kwargs = zip(*chunk)
            context, context_encoding = zip(*context_and_encoding)
            # we assume all gen kwargs in the batch are the same
            # this is safe to assume because the `grouper` object ensures it.
            gen_kwargs = all_gen_kwargs[0]
            # unpack our keyword arguments.
            if isinstance(gen_kwargs, dict):
                kwargs = copy.deepcopy(gen_kwargs)  # edge case for repeats > 1
610
611
                # add EOS token to stop sequences
                until = handle_stop_sequences(kwargs.pop("until", None), eos=eos)
612
613
            else:
                raise ValueError(
614
                    f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
baberabb's avatar
baberabb committed
615
                )
616
617
618
619
620
621
622
623
            if "max_gen_toks" in kwargs.keys():
                max_gen_toks = kwargs.pop("max_gen_toks")
            else:
                max_gen_toks = self.max_gen_toks

            # set the max length in tokens of inputs ("context_enc")
            # max len for inputs = max length, minus room to generate the max new tokens
            max_ctx_len = self.max_length - max_gen_toks
624
625
626
627
628
629
            all_lengths = [len(x) for x in context_encoding]
            for length in all_lengths:
                if length > max_ctx_len:
                    eval_logger.warning(
                        f"Context length {length} exceeds max length (context + max gen tokens): {max_ctx_len}. Truncating context."
                    )
630
631
632
633
634
635
636
637
638
639
            context_encoding = [x[-max_ctx_len:] for x in context_encoding]

            # perform batched generation
            cont = self._model_generate(
                requests=context_encoding,
                generate=True,
                max_tokens=max_gen_toks,
                stop=until,
                **kwargs,
            )
baberabb's avatar
baberabb committed
640

641
642
            # cache generations
            for output, context in zip(cont, context):
643
                generated_text: str = output.outputs[0].text
644
                # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
645
646
647
                generated_text = postprocess_generated_text(
                    generated_text, until, self.think_end_token
                )
648
649
650
651
652
                res.append(generated_text)
                self.cache_hook.add_partial(
                    "generate_until", (context, gen_kwargs), generated_text
                )
                pbar.update(1)
baberabb's avatar
baberabb committed
653
654

        pbar.close()
655
656
        # reorder all group of results back to original unsorted form
        return re_ords.get_original(res)
baberabb's avatar
baberabb committed
657
658

    def _loglikelihood_tokens(
baberabb's avatar
baberabb committed
659
660
661
        self,
        requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
        disable_tqdm: bool = False,
baberabb's avatar
baberabb committed
662
663
664
665
666
667
668
    ) -> List[Tuple[float, bool]]:
        res = []

        def _collate(x):
            toks = x[1] + x[2]
            return -len(toks), tuple(toks)

669
670
671
672
        # Reorder requests by length and batch
        re_ord = Collator(requests, sort_fn=_collate)
        chunks = re_ord.get_batched(
            n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
baberabb's avatar
baberabb committed
673
        )
674

675
676
677
678
679
        pbar = tqdm(
            total=len(requests),
            disable=disable_tqdm,
            desc="Running loglikelihood requests",
        )
baberabb's avatar
baberabb committed
680
        for chunk in chunks:
681
            inputs = []
baberabb's avatar
baberabb committed
682
683
            ctxlens = []
            for cache_key, context_enc, continuation_enc in chunk:
684
685
                if (
                    full_length := len(context_enc + continuation_enc)
686
                ) > self.max_length:
687
688
689
                    eval_logger.warning(
                        f"Context length {full_length} exceeds max length ({self.max_length}). Truncating context."
                    )
baberabb's avatar
baberabb committed
690
691
692
693
694
                inp = (context_enc + continuation_enc)[-(self.max_length) :]
                ctxlen = len(context_enc) - max(
                    0, len(context_enc) + len(continuation_enc) - (self.max_length)
                )

695
                inputs.append(inp)
baberabb's avatar
baberabb committed
696
697
                ctxlens.append(ctxlen)

698
            outputs = self._model_generate(requests=inputs, generate=False)
baberabb's avatar
baberabb committed
699

700
701
            for output, ctxlen, (cache_key, _, _), inp in zip(
                outputs, ctxlens, chunk, inputs
baberabb's avatar
baberabb committed
702
703
            ):
                answer = self._parse_logprobs(
704
705
706
                    tokens=inp,
                    outputs=output,
                    ctxlen=ctxlen,
baberabb's avatar
baberabb committed
707
708
709
710
711
                )

                res.append(answer)

                if cache_key is not None:
712
713
714
                    # special case: loglikelihood_rolling produces a number of loglikelihood requests
                    # all with cache key None. instead do add_partial on the per-example level
                    # in the loglikelihood_rolling() function for those.
baberabb's avatar
baberabb committed
715
                    self.cache_hook.add_partial("loglikelihood", cache_key, answer)
716
                pbar.update(1)
baberabb's avatar
baberabb committed
717
718
719
720
        pbar.close()
        return re_ord.get_original(res)

    @staticmethod
baberabb's avatar
baberabb committed
721
    def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]:
baberabb's avatar
baberabb committed
722
723
724
        """Process logprobs and tokens.

        :param tokens: list
725
            Input tokens (potentially left-truncated)
baberabb's avatar
bugfix  
baberabb committed
726
        :param outputs: RequestOutput
727
            Contains prompt_logprobs
baberabb's avatar
baberabb committed
728
729
730
731
732
733
734
735
736
        :param ctxlen: int
            Length of context (so we can slice them away and only keep the predictions)
        :return:
            continuation_logprobs: float
                Log probabilities of continuation tokens
            is_greedy: bool
                Whether argmax matches given continuation exactly
        """

737
        # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on.
baberabb's avatar
bugfix  
baberabb committed
738
739
        continuation_logprobs_dicts = outputs.prompt_logprobs

740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
        def coerce_logprob_to_num(logprob):
            # vLLM changed the return type of logprobs from float
            # to a Logprob object storing the float value + extra data
            # (https://github.com/vllm-project/vllm/pull/3065).
            # If we are dealing with vllm's Logprob object, return
            # the logprob value stored as an attribute. Otherwise,
            # return the object itself (which should be a float
            # for older versions of vLLM).
            return getattr(logprob, "logprob", logprob)

        continuation_logprobs_dicts = [
            {
                token: coerce_logprob_to_num(logprob)
                for token, logprob in logprob_dict.items()
            }
            if logprob_dict is not None
            else None
            for logprob_dict in continuation_logprobs_dicts
        ]

baberabb's avatar
baberabb committed
760
        # Calculate continuation_logprobs
761
        # assume ctxlen always >= 1
baberabb's avatar
baberabb committed
762
        continuation_logprobs = sum(
baberabb's avatar
baberabb committed
763
            logprob_dict.get(token)
baberabb's avatar
baberabb committed
764
            for token, logprob_dict in zip(
baberabb's avatar
bugfix  
baberabb committed
765
                tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
baberabb's avatar
baberabb committed
766
767
768
769
770
            )
        )

        # Determine if is_greedy
        is_greedy = True
baberabb's avatar
baberabb committed
771
772
773
        for token, logprob_dict in zip(
            tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
        ):
baberabb's avatar
bugfix  
baberabb committed
774
775
776
777
778
779
            # Get the token with the maximum log probability from the logprob_dict
            if logprob_dict:  # Ensure the logprob_dict is not None
                top_token = max(logprob_dict, key=logprob_dict.get)
                if top_token != token:
                    is_greedy = False
                    break
baberabb's avatar
baberabb committed
780
781

        return continuation_logprobs, is_greedy
782
783
784
785

    @staticmethod
    def modify_gen_kwargs(kwargs: dict) -> dict:
        # sampling_params
786
        kwargs["temperature"] = kwargs.get("temperature", 0.0)
787
        do_sample = kwargs.pop("do_sample", None)
788
789
790
791
        if do_sample is False and "temperature" not in kwargs:
            eval_logger.debug(
                "Got `do_sample=False` and no temperature value, setting VLLM temperature to 0.0 ..."
            )
792
793
794
795
796
797
798
            kwargs["temperature"] = 0.0
        # hf defaults
        kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False)
        kwargs["spaces_between_special_tokens"] = kwargs.get(
            "spaces_between_special_tokens", False
        )
        return kwargs