Unverified Commit f7c67f0e authored by Baber Abbasi's avatar Baber Abbasi Committed by GitHub
Browse files

`use_tqdm=False` if batch_size != auto (#1144)

parent bd0f2414
...@@ -139,7 +139,6 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`" ...@@ -139,7 +139,6 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
generate: bool = False, generate: bool = False,
max_tokens: int = None, max_tokens: int = None,
stop: Optional[List[str]] = None, stop: Optional[List[str]] = None,
use_tqdm=True,
**kwargs, **kwargs,
): ):
if "do_sample" in kwargs.keys(): if "do_sample" in kwargs.keys():
...@@ -169,7 +168,7 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`" ...@@ -169,7 +168,7 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
outputs = self.model.generate( outputs = self.model.generate(
prompt_token_ids=requests, prompt_token_ids=requests,
sampling_params=sampling_params, sampling_params=sampling_params,
use_tqdm=use_tqdm, use_tqdm=True if self.batch_size == "auto" else False,
) )
return outputs return outputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment