Commit d424f26b authored by Jeffrey Quesnelle's avatar Jeffrey Quesnelle
Browse files

fix adaptive batch crash when there are no new requests (e.g. when pulling from cache)

parent 8fc04fe5
...@@ -254,6 +254,7 @@ class BaseLM(LM): ...@@ -254,6 +254,7 @@ class BaseLM(LM):
# automatic (variable) batch size detection for vectorization # automatic (variable) batch size detection for vectorization
# pull longest context sample from request # pull longest context sample from request
if len(re_ord.get_reordered()) > 0:
_, context_enc, continuation_enc = re_ord.get_reordered()[0] _, context_enc, continuation_enc = re_ord.get_reordered()[0]
max_context = len((context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]) max_context = len((context_enc + continuation_enc)[-(self.max_length + 1) :][:-1])
if (self.batch_size == 'auto'): if (self.batch_size == 'auto'):
...@@ -273,6 +274,8 @@ class BaseLM(LM): ...@@ -273,6 +274,8 @@ class BaseLM(LM):
else: else:
adaptive_batch_size = override_bs adaptive_batch_size = override_bs
else:
adaptive_batch_size = 0 if override_bs is None else override_bs
for chunk in utils.chunks( for chunk in utils.chunks(
tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size if self.batch_size != "auto" else adaptive_batch_size tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size if self.batch_size != "auto" else adaptive_batch_size
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment