Commit 2362ab41 authored by baberabb's avatar baberabb
Browse files

bugfix

parent e5491709
...@@ -160,7 +160,7 @@ class VLLM(LM): ...@@ -160,7 +160,7 @@ class VLLM(LM):
# batch tokenize contexts # batch tokenize contexts
context, all_gen_kwargs = zip(*(req.args for req in requests)) context, all_gen_kwargs = zip(*(req.args for req in requests))
context_encoding = self.tokenizer(context) context_encoding = self.tokenizer(context).input_ids
requests = [ requests = [
((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs) ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs)
] ]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment