Commit 8a89b30c authored by Benjamin Fattori's avatar Benjamin Fattori
Browse files

empty cuda cache after determing largest possible batch size

parent 97f936be
...@@ -268,12 +268,14 @@ class BaseLM(LM): ...@@ -268,12 +268,14 @@ class BaseLM(LM):
return batch_size return batch_size
batch_size = forward_batch() batch_size = forward_batch()
print(f"Determined Largest batch size: {batch_size}") print(f"Determined largest batch size: {batch_size}")
adaptive_batch_size = batch_size adaptive_batch_size = batch_size
else: else:
adaptive_batch_size = override_bs adaptive_batch_size = override_bs
torch.cuda.empty_cache() # empty cache after determining batch size
for chunk in utils.chunks( for chunk in utils.chunks(
tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size if self.batch_size != "auto" else adaptive_batch_size tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size if self.batch_size != "auto" else adaptive_batch_size
): ):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment