Commit c7ef3da7 authored by Til Jasper Ullrich's avatar Til Jasper Ullrich
Browse files

Use evaluation_mode=True for accelerate to prevent OOM

parent 7634a6ec
...@@ -289,7 +289,7 @@ class HFLM(LM): ...@@ -289,7 +289,7 @@ class HFLM(LM):
"Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes`. If the desired GPU is being used, this message is safe to ignore." "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes`. If the desired GPU is being used, this message is safe to ignore."
) )
else: else:
self._model = accelerator.prepare(self.model) self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
self._device = torch.device(f"cuda:{accelerator.local_process_index}") self._device = torch.device(f"cuda:{accelerator.local_process_index}")
self.accelerator = accelerator self.accelerator = accelerator
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment