vllm_causallms.py 23.3 KB
Newer Older
1
import copy
Lintang Sutawika's avatar
Lintang Sutawika committed
2
import logging
Baber Abbasi's avatar
Baber Abbasi committed
3
from importlib.metadata import version
4
from importlib.util import find_spec
5
from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
6

7
from more_itertools import distribute
Baber Abbasi's avatar
Baber Abbasi committed
8
from packaging.version import parse as parse_version
9
10
from tqdm import tqdm

baberabb's avatar
baberabb committed
11
from lm_eval.api.instance import Instance
12
from lm_eval.api.model import TemplateLM
baberabb's avatar
baberabb committed
13
from lm_eval.api.registry import register_model
14
15
16
17
18
19
from lm_eval.models.utils import (
    Collator,
    configure_pad_token,
    handle_stop_sequences,
    undistribute,
)
20
21
22
23
from lm_eval.utils import (
    get_rolling_token_windows,
    make_disjoint_window,
)
24

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
25

26
try:
27
    import ray
28
    from vllm import LLM, SamplingParams
29
    from vllm.lora.request import LoRARequest
baberabb's avatar
baberabb committed
30
    from vllm.transformers_utils.tokenizer import get_tokenizer
31
32
33

    if parse_version(version("vllm")) >= parse_version("0.8.3"):
        from vllm.entrypoints.chat_utils import resolve_hf_chat_template
34
35
except ModuleNotFoundError:
    pass
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
36

37
38
if TYPE_CHECKING:
    pass
bcicc's avatar
bcicc committed
39

Lintang Sutawika's avatar
Lintang Sutawika committed
40
eval_logger = logging.getLogger(__name__)
baberabb's avatar
baberabb committed
41

baberabb's avatar
baberabb committed
42
43

@register_model("vllm")
44
class VLLM(TemplateLM):
baberabb's avatar
baberabb committed
45
46
47
48
    _DEFAULT_MAX_LENGTH = 2048

    def __init__(
        self,
49
        pretrained: str,
baberabb's avatar
baberabb committed
50
51
52
        dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto",
        revision: Optional[str] = None,
        trust_remote_code: Optional[bool] = False,
baberabb's avatar
baberabb committed
53
        tokenizer: Optional[str] = None,
baberabb's avatar
baberabb committed
54
        tokenizer_mode: Literal["auto", "slow"] = "auto",
baberabb's avatar
baberabb committed
55
        tokenizer_revision: Optional[str] = None,
56
        add_bos_token: Optional[bool] = False,
57
        prefix_token_id: Optional[int] = None,
baberabb's avatar
baberabb committed
58
        tensor_parallel_size: int = 1,
59
        quantization: Optional[str] = None,
baberabb's avatar
baberabb committed
60
61
        max_gen_toks: int = 256,
        swap_space: int = 4,
baberabb's avatar
baberabb committed
62
        batch_size: Union[str, int] = 1,
baberabb's avatar
baberabb committed
63
        max_batch_size=None,
baberabb's avatar
baberabb committed
64
        max_length: int = None,
65
        max_model_len: int = None,
baberabb's avatar
baberabb committed
66
        seed: int = 1234,
67
        gpu_memory_utilization: float = 0.9,
baberabb's avatar
baberabb committed
68
        device: str = "cuda",
69
        data_parallel_size: int = 1,
bcicc's avatar
bcicc committed
70
        lora_local_path: str = None,
71
        enable_thinking: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
72
        **kwargs,
baberabb's avatar
baberabb committed
73
74
    ):
        super().__init__()
75

76
        if not find_spec("vllm"):
77
            raise ModuleNotFoundError(
78
79
                "attempted to use 'vllm' LM type, but package `vllm` is not installed. "
                "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
80
81
            )

Baber Abbasi's avatar
Baber Abbasi committed
82
83
84
        assert max_length is None or max_model_len is None, (
            "Either max_length or max_model_len may be provided, but not both"
        )
85
86

        self._max_length = max_model_len if max_model_len is not None else max_length
baberabb's avatar
baberabb committed
87
        self.tensor_parallel_size = int(tensor_parallel_size)
88
        self.data_parallel_size = int(data_parallel_size)
baberabb's avatar
baberabb committed
89
90
91
92
93
        self.model_args = {
            "model": pretrained,
            "gpu_memory_utilization": float(gpu_memory_utilization),
            "revision": revision,
            "dtype": dtype,
baberabb's avatar
baberabb committed
94
            "tokenizer": tokenizer,
baberabb's avatar
baberabb committed
95
            "tokenizer_mode": tokenizer_mode,
baberabb's avatar
baberabb committed
96
            "tokenizer_revision": tokenizer_revision,
baberabb's avatar
baberabb committed
97
98
            "trust_remote_code": trust_remote_code,
            "tensor_parallel_size": int(tensor_parallel_size),
99
            "max_model_len": int(self._max_length) if self._max_length else None,
baberabb's avatar
baberabb committed
100
101
102
103
            "swap_space": int(swap_space),
            "quantization": quantization,
            "seed": int(seed),
        }
Baber Abbasi's avatar
Baber Abbasi committed
104
        self.model_args.update(kwargs)
105
106
107
        self.batch_size = (
            "auto"
            if isinstance(batch_size, str) and "auto" in batch_size
108
            else int(batch_size)
109
        )
110
        if self.data_parallel_size <= 1:
baberabb's avatar
baberabb committed
111
            self.model = LLM(**self.model_args)
baberabb's avatar
baberabb committed
112
        else:
Baber Abbasi's avatar
Baber Abbasi committed
113
114
115
            eval_logger.warning(
                "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached."
            )
Baber Abbasi's avatar
Baber Abbasi committed
116
            self.model_args["distributed_executor_backend"] = "ray"
117
118
119
            self.batch_size = "auto"
            eval_logger.info("Manual batching is not compatible with data parallelism.")

120
        from transformers import AutoConfig
121

122
123
124
        self._config = AutoConfig.from_pretrained(
            pretrained, trust_remote_code=trust_remote_code, revision=revision
        )
baberabb's avatar
nits  
baberabb committed
125
126
127
128
        self.tokenizer = get_tokenizer(
            tokenizer if tokenizer else pretrained,
            tokenizer_mode=tokenizer_mode,
            trust_remote_code=trust_remote_code,
129
            revision=tokenizer_revision,
130
            add_bos_token=add_bos_token,
baberabb's avatar
nits  
baberabb committed
131
        )
132
        self.tokenizer = configure_pad_token(self.tokenizer, model_config=self._config)
133
        self.enable_thinking = enable_thinking
134
        self.add_bos_token = add_bos_token
135
136
137
        if "gemma" in pretrained.lower():
            self.add_bos_token = True
            eval_logger.info(
138
                "Found 'gemma' in model name, a BOS token will be used as Gemma series models underperform without it."
139
140
            )

141
142
143
144
145
146
147
148
149
        if parse_version(version("vllm")) >= parse_version("0.8.3"):
            self.hf_chat_template = resolve_hf_chat_template(
                tokenizer=self.tokenizer,
                chat_template=None,
                tools=None,
                trust_remote_code=trust_remote_code,
            )
        else:
            self.hf_chat_template = None
150

151
152
153
154
155
        self.custom_prefix_token_id = prefix_token_id
        if prefix_token_id is not None:
            eval_logger.info(
                f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
            )
156

baberabb's avatar
baberabb committed
157
158
        self._max_gen_toks = max_gen_toks

bcicc's avatar
bcicc committed
159
        if lora_local_path is not None:
Baber Abbasi's avatar
Baber Abbasi committed
160
161
162
            assert parse_version(version("vllm")) > parse_version("0.3.0"), (
                "lora adapters only compatible with vllm > v0.3.0."
            )
bcicc's avatar
bcicc committed
163
164
165
166
            self.lora_request = LoRARequest("finetuned", 1, lora_local_path)
        else:
            self.lora_request = None

baberabb's avatar
baberabb committed
167
168
169
170
171
    @property
    def eot_token_id(self):
        # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
        return self.tokenizer.eos_token_id

172
173
174
175
176
177
178
179
180
    @property
    def prefix_token_id(self):
        # it is used as prefix for loglikelihood
        if self.custom_prefix_token_id is not None:
            return self.custom_prefix_token_id
        if self.tokenizer.bos_token_id is not None:
            return self.tokenizer.bos_token_id
        return self.tokenizer.eos_token_id

baberabb's avatar
baberabb committed
181
182
183
184
    @property
    def max_length(self):
        if self._max_length:  # if max length manually set, return it
            return self._max_length
185
186
187
188
189
190
191
192
193
194
195
196
        if self.data_parallel_size <= 1:
            return self.model.llm_engine.model_config.max_model_len
        else:
            seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
            for attr in seqlen_config_attrs:
                if hasattr(self._config, attr):
                    return getattr(self._config, attr)
            if hasattr(self.tokenizer, "model_max_length"):
                if self.tokenizer.model_max_length == 1000000000000000019884624838656:
                    return self._DEFAULT_MAX_LENGTH
                return self.tokenizer.model_max_length
            return self._DEFAULT_MAX_LENGTH
baberabb's avatar
baberabb committed
197
198
199
200
201

    @property
    def max_gen_toks(self):
        return self._max_gen_toks

Baber Abbasi's avatar
Baber Abbasi committed
202
203
204
    def apply_chat_template(
        self, chat_history: List[Dict[str, str]], add_generation_prompt: bool = True
    ) -> str:
205
206
207
        """
        Method to apply a chat template to a list of chat history between user and model.
        """
Baber Abbasi's avatar
Baber Abbasi committed
208
209
210
211
212
        chat_templated = self.tokenizer.apply_chat_template(
            chat_history,
            tokenize=False,
            add_generation_prompt=add_generation_prompt,
            continue_final_message=not add_generation_prompt,
213
            chat_template=self.hf_chat_template,
214
            enable_thinking=self.enable_thinking,
215
216
        )

Baber Abbasi's avatar
Baber Abbasi committed
217
218
        return chat_templated

219
220
221
222
    @property
    def tokenizer_name(self) -> str:
        return self.tokenizer.name_or_path.replace("/", "__")

baberabb's avatar
baberabb committed
223
224
    def tok_encode(
        self,
225
226
227
228
229
        string: Union[str, List[str]],
        left_truncate_len: int = None,
        add_special_tokens: bool = False,
        truncation: bool = False,
    ) -> Union[List[int], List[List[int]]]:
230
231
        if not add_special_tokens:
            add_special_tokens = False or self.add_bos_token
232
233
234
235
236
237
        encoding: Union[List[List[int]], List[int]] = self.tokenizer(
            string,
            add_special_tokens=add_special_tokens,
            truncation=truncation,
            return_attention_mask=False,
        ).input_ids
baberabb's avatar
baberabb committed
238
239
240

        # left-truncate the encoded context to be at most `left_truncate_len` tokens long
        if left_truncate_len:
241
242
243
244
            if not isinstance(string, str):
                encoding = [enc[-left_truncate_len:] for enc in encoding]
            else:
                encoding = encoding[-left_truncate_len:]
baberabb's avatar
baberabb committed
245
246
247
248
249

        return encoding

    def _model_generate(
        self,
baberabb's avatar
baberabb committed
250
        requests: List[List[int]] = None,
baberabb's avatar
baberabb committed
251
252
253
254
255
256
        generate: bool = False,
        max_tokens: int = None,
        stop: Optional[List[str]] = None,
        **kwargs,
    ):
        if generate:
257
            kwargs = self.modify_gen_kwargs(kwargs)
baberabb's avatar
baberabb committed
258
            sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs)
baberabb's avatar
baberabb committed
259
        else:
baberabb's avatar
baberabb committed
260
            sampling_params = SamplingParams(
261
                temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False
baberabb's avatar
baberabb committed
262
            )
263
        if self.data_parallel_size > 1:
Baber Abbasi's avatar
Baber Abbasi committed
264
            # vLLM hangs if resources are set in ray.remote
Baber Abbasi's avatar
Baber Abbasi committed
265
266
            # also seems to only work with decorator and not with ray.remote() fn
            # see https://github.com/vllm-project/vllm/issues/973
Baber Abbasi's avatar
Baber Abbasi committed
267
            @ray.remote
Baber Abbasi's avatar
Baber Abbasi committed
268
            def run_inference_one_model(
269
                model_args: dict,
Baber Abbasi's avatar
Baber Abbasi committed
270
                sampling_params: SamplingParams,
271
272
                requests: List[List[int]],
                lora_request: LoRARequest,
Baber Abbasi's avatar
Baber Abbasi committed
273
274
275
            ):
                llm = LLM(**model_args)
                return llm.generate(
276
277
278
                    prompt_token_ids=requests,
                    sampling_params=sampling_params,
                    lora_request=lora_request,
Baber Abbasi's avatar
Baber Abbasi committed
279
280
                )

281
282
283
            # dispatch requests to all self.data_parallel_size workers, in interleaved fashion
            # interleaved important to balance context lengths across workers
            requests = [list(x) for x in distribute(self.data_parallel_size, requests)]
284
285
286
287
            inputs = (
                (self.model_args, sampling_params, req, self.lora_request)
                for req in requests
            )
Baber Abbasi's avatar
Baber Abbasi committed
288
289
            object_refs = [run_inference_one_model.remote(*x) for x in inputs]
            results = ray.get(object_refs)
290
291
            # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required.
            ray.shutdown()
baberabb's avatar
baberabb committed
292
            # flatten results
293
            return undistribute(results)
baberabb's avatar
baberabb committed
294

295
296
297
298
299
300
        outputs = self.model.generate(
            prompt_token_ids=requests,
            sampling_params=sampling_params,
            use_tqdm=True if self.batch_size == "auto" else False,
            lora_request=self.lora_request,
        )
baberabb's avatar
baberabb committed
301
302
        return outputs

303
304
305
    def loglikelihood_rolling(
        self, requests: List[Instance], disable_tqdm: bool = False
    ) -> List[float]:
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
        adaptive_batch_size = None
        if self.batch_size == "auto":
            adaptive_batch_size = len(requests)

        # First, collect all windows from all requests
        all_windows = []  # List of (request_idx, window) tuples
        request_window_counts = []  # Track number of windows per request

        for req_idx, (string,) in enumerate(
            tqdm(
                [req.args for req in requests],
                disable=(disable_tqdm or (self.rank != 0)),
            )
        ):
            rolling_token_windows: List[Tuple[List[int], List[int]]] = list(
baberabb's avatar
baberabb committed
321
                map(
322
323
                    make_disjoint_window,
                    get_rolling_token_windows(
baberabb's avatar
baberabb committed
324
                        token_list=self.tok_encode(string),
325
326
                        prefix_token=self.prefix_token_id,
                        # max_seq_len - (1 for context)
baberabb's avatar
baberabb committed
327
                        max_seq_len=self.max_length - 1,
baberabb's avatar
baberabb committed
328
329
330
331
332
                        context_len=1,
                    ),
                )
            )

333
334
            # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
            windows = [(None,) + x for x in rolling_token_windows]
baberabb's avatar
baberabb committed
335

336
337
338
            # Store windows with their request index
            all_windows.extend((req_idx, window) for window in windows)
            request_window_counts.append(len(windows))
baberabb's avatar
baberabb committed
339

340
341
342
343
344
345
        all_nlls = []
        batch_size = adaptive_batch_size or int(self.batch_size)
        for i in range(0, len(all_windows), batch_size):
            batch = all_windows[i : i + batch_size]
            # Extract just the windows for processing, keeping track of request indices
            batch_indices, batch_windows = zip(*batch)
baberabb's avatar
baberabb committed
346

347
348
349
350
351
352
            batch_nlls = self._loglikelihood_tokens(
                requests=batch_windows,
                disable_tqdm=False,
            )
            # Store results with their request indices
            all_nlls.extend(zip(batch_indices, batch_nlls))
353

354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
        # Reconstruct per-request loglikelihoods
        loglikelihoods = []
        current_idx = 0
        for window_count in request_window_counts:
            # Get all nlls for this request
            request_nlls = all_nlls[current_idx : current_idx + window_count]
            # Sum up the nlls for this request (discarding is_greedy)
            request_total = sum(nll[0] for _, nll in request_nlls)
            loglikelihoods.append(request_total)
            current_idx += window_count

            string = requests[len(loglikelihoods) - 1].args[0]
            self.cache_hook.add_partial(
                "loglikelihood_rolling", (string,), request_total
            )
369

baberabb's avatar
baberabb committed
370
371
        return loglikelihoods

372
373
374
    def generate_until(
        self, requests: List[Instance], disable_tqdm: bool = False
    ) -> List[str]:
375
        res = []
baberabb's avatar
baberabb committed
376
377
378

        # batch tokenize contexts
        context, all_gen_kwargs = zip(*(req.args for req in requests))
379
380
381
        context_encoding: List[List[int]] = self.tok_encode(
            context, add_special_tokens=self.add_bos_token
        )
baberabb's avatar
baberabb committed
382
383
384
        requests = [
            ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs)
        ]
baberabb's avatar
baberabb committed
385
386
387
388
389
390
391
392

        def _collate_gen(_requests):
            # the negative sign on len(toks) sorts descending - this has a few advantages:
            # - time estimates will always be over not underestimates, which is more useful for planning
            # - to know the size of a batch when going through the list, you know the first one is always the batch
            #   padded context length. this is useful to simplify the batching logic and more importantly to make
            #   automatic adaptive batches much much easier to implement
            # - any OOMs will happen right away rather than near the end
393
            return -len(_requests[0][1]), _requests[0][0]
baberabb's avatar
baberabb committed
394
395
396
397

        # we group requests by their generation_kwargs,
        # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
        # in the same batch.
Baber Abbasi's avatar
Baber Abbasi committed
398
        re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs")
399
400
401
        chunks = re_ords.get_batched(
            n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
        )
baberabb's avatar
baberabb committed
402

403
404
        pbar = tqdm(
            total=len(requests),
405
            disable=(disable_tqdm or (self.rank != 0)),
406
407
            desc="Running generate_until requests",
        )
baberabb's avatar
baberabb committed
408
        # for each different set of kwargs, we execute all requests, by batch.
409
        eos = self.tokenizer.decode(self.eot_token_id)
410
411
412
413
414
415
416
417
418
        for chunk in chunks:
            context_and_encoding, all_gen_kwargs = zip(*chunk)
            context, context_encoding = zip(*context_and_encoding)
            # we assume all gen kwargs in the batch are the same
            # this is safe to assume because the `grouper` object ensures it.
            gen_kwargs = all_gen_kwargs[0]
            # unpack our keyword arguments.
            if isinstance(gen_kwargs, dict):
                kwargs = copy.deepcopy(gen_kwargs)  # edge case for repeats > 1
419
420
                # add EOS token to stop sequences
                until = handle_stop_sequences(kwargs.pop("until", None), eos=eos)
421
422
            else:
                raise ValueError(
423
                    f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
baberabb's avatar
baberabb committed
424
                )
425
426
427
428
429
430
431
432
            if "max_gen_toks" in kwargs.keys():
                max_gen_toks = kwargs.pop("max_gen_toks")
            else:
                max_gen_toks = self.max_gen_toks

            # set the max length in tokens of inputs ("context_enc")
            # max len for inputs = max length, minus room to generate the max new tokens
            max_ctx_len = self.max_length - max_gen_toks
433
434
435
436
437
438
            all_lengths = [len(x) for x in context_encoding]
            for length in all_lengths:
                if length > max_ctx_len:
                    eval_logger.warning(
                        f"Context length {length} exceeds max length (context + max gen tokens): {max_ctx_len}. Truncating context."
                    )
439
440
441
442
443
444
445
446
447
448
            context_encoding = [x[-max_ctx_len:] for x in context_encoding]

            # perform batched generation
            cont = self._model_generate(
                requests=context_encoding,
                generate=True,
                max_tokens=max_gen_toks,
                stop=until,
                **kwargs,
            )
baberabb's avatar
baberabb committed
449

450
451
452
453
454
455
456
457
            # cache generations
            for output, context in zip(cont, context):
                generated_text = output.outputs[0].text
                res.append(generated_text)
                self.cache_hook.add_partial(
                    "generate_until", (context, gen_kwargs), generated_text
                )
                pbar.update(1)
baberabb's avatar
baberabb committed
458
459

        pbar.close()
460
461
        # reorder all group of results back to original unsorted form
        return re_ords.get_original(res)
baberabb's avatar
baberabb committed
462
463

    def _loglikelihood_tokens(
baberabb's avatar
baberabb committed
464
465
466
        self,
        requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
        disable_tqdm: bool = False,
baberabb's avatar
baberabb committed
467
468
469
470
471
472
473
    ) -> List[Tuple[float, bool]]:
        res = []

        def _collate(x):
            toks = x[1] + x[2]
            return -len(toks), tuple(toks)

474
475
476
477
        # Reorder requests by length and batch
        re_ord = Collator(requests, sort_fn=_collate)
        chunks = re_ord.get_batched(
            n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
baberabb's avatar
baberabb committed
478
        )
479

480
481
482
483
484
        pbar = tqdm(
            total=len(requests),
            disable=disable_tqdm,
            desc="Running loglikelihood requests",
        )
baberabb's avatar
baberabb committed
485
        for chunk in chunks:
486
            inputs = []
baberabb's avatar
baberabb committed
487
488
            ctxlens = []
            for cache_key, context_enc, continuation_enc in chunk:
489
490
491
492
                if full_length := (context_enc + continuation_enc) >= self.max_length:
                    eval_logger.warning(
                        f"Context length {full_length} exceeds max length ({self.max_length}). Truncating context."
                    )
baberabb's avatar
baberabb committed
493
494
495
496
497
                inp = (context_enc + continuation_enc)[-(self.max_length) :]
                ctxlen = len(context_enc) - max(
                    0, len(context_enc) + len(continuation_enc) - (self.max_length)
                )

498
                inputs.append(inp)
baberabb's avatar
baberabb committed
499
500
                ctxlens.append(ctxlen)

501
            outputs = self._model_generate(requests=inputs, generate=False)
baberabb's avatar
baberabb committed
502

503
504
            for output, ctxlen, (cache_key, _, _), inp in zip(
                outputs, ctxlens, chunk, inputs
baberabb's avatar
baberabb committed
505
506
            ):
                answer = self._parse_logprobs(
507
508
509
                    tokens=inp,
                    outputs=output,
                    ctxlen=ctxlen,
baberabb's avatar
baberabb committed
510
511
512
513
514
                )

                res.append(answer)

                if cache_key is not None:
515
516
517
                    # special case: loglikelihood_rolling produces a number of loglikelihood requests
                    # all with cache key None. instead do add_partial on the per-example level
                    # in the loglikelihood_rolling() function for those.
baberabb's avatar
baberabb committed
518
                    self.cache_hook.add_partial("loglikelihood", cache_key, answer)
519
                pbar.update(1)
baberabb's avatar
baberabb committed
520
521
522
523
        pbar.close()
        return re_ord.get_original(res)

    @staticmethod
baberabb's avatar
baberabb committed
524
    def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]:
baberabb's avatar
baberabb committed
525
526
527
        """Process logprobs and tokens.

        :param tokens: list
528
            Input tokens (potentially left-truncated)
baberabb's avatar
bugfix  
baberabb committed
529
        :param outputs: RequestOutput
530
            Contains prompt_logprobs
baberabb's avatar
baberabb committed
531
532
533
534
535
536
537
538
539
        :param ctxlen: int
            Length of context (so we can slice them away and only keep the predictions)
        :return:
            continuation_logprobs: float
                Log probabilities of continuation tokens
            is_greedy: bool
                Whether argmax matches given continuation exactly
        """

540
        # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on.
baberabb's avatar
bugfix  
baberabb committed
541
542
        continuation_logprobs_dicts = outputs.prompt_logprobs

543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
        def coerce_logprob_to_num(logprob):
            # vLLM changed the return type of logprobs from float
            # to a Logprob object storing the float value + extra data
            # (https://github.com/vllm-project/vllm/pull/3065).
            # If we are dealing with vllm's Logprob object, return
            # the logprob value stored as an attribute. Otherwise,
            # return the object itself (which should be a float
            # for older versions of vLLM).
            return getattr(logprob, "logprob", logprob)

        continuation_logprobs_dicts = [
            {
                token: coerce_logprob_to_num(logprob)
                for token, logprob in logprob_dict.items()
            }
            if logprob_dict is not None
            else None
            for logprob_dict in continuation_logprobs_dicts
        ]

baberabb's avatar
baberabb committed
563
        # Calculate continuation_logprobs
564
        # assume ctxlen always >= 1
baberabb's avatar
baberabb committed
565
        continuation_logprobs = sum(
baberabb's avatar
baberabb committed
566
            logprob_dict.get(token)
baberabb's avatar
baberabb committed
567
            for token, logprob_dict in zip(
baberabb's avatar
bugfix  
baberabb committed
568
                tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
baberabb's avatar
baberabb committed
569
570
571
572
573
            )
        )

        # Determine if is_greedy
        is_greedy = True
baberabb's avatar
baberabb committed
574
575
576
        for token, logprob_dict in zip(
            tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
        ):
baberabb's avatar
bugfix  
baberabb committed
577
578
579
580
581
582
            # Get the token with the maximum log probability from the logprob_dict
            if logprob_dict:  # Ensure the logprob_dict is not None
                top_token = max(logprob_dict, key=logprob_dict.get)
                if top_token != token:
                    is_greedy = False
                    break
baberabb's avatar
baberabb committed
583
584

        return continuation_logprobs, is_greedy
585
586
587
588

    @staticmethod
    def modify_gen_kwargs(kwargs: dict) -> dict:
        # sampling_params
589
        kwargs["temperature"] = kwargs.get("temperature", 0.0)
590
        do_sample = kwargs.pop("do_sample", None)
591
592
593
594
        if do_sample is False and "temperature" not in kwargs:
            eval_logger.debug(
                "Got `do_sample=False` and no temperature value, setting VLLM temperature to 0.0 ..."
            )
595
596
597
598
599
600
601
            kwargs["temperature"] = 0.0
        # hf defaults
        kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False)
        kwargs["spaces_between_special_tokens"] = kwargs.get(
            "spaces_between_special_tokens", False
        )
        return kwargs