Commit 650d3c76 authored by Benjamin Fattori's avatar Benjamin Fattori
Browse files

remove device_placement flag and fix rank/world_size assignments

parent 71388a7e
...@@ -70,15 +70,15 @@ class HFLM(LM): ...@@ -70,15 +70,15 @@ class HFLM(LM):
# multigpu support with accelerate # multigpu support with accelerate
if gpus > 1: if gpus > 1:
accelerator = Accelerator(device_placement=False) accelerator = Accelerator()
if gpus > accelerator.num_processes: if gpus > accelerator.num_processes:
warning = ("WARNING: The number of total system GPUs does not match the number of spawned processes. " warning = ("WARNING: The number of total system GPUs does not match the number of spawned processes. "
"If you would like to use data parallelism, please launch the script " "If you would like to use data parallelism, please launch the script "
"with 'accelerate launch *script*'. " "with 'accelerate launch *script*'. "
f"Current run will proceed with {accelerator.num_processes} devices.") f"Current run will proceed with {accelerator.num_processes} devices.")
print(warning) print(warning)
self._rank = self.accelerator.local_process_index self._rank = accelerator.local_process_index
self._world_size = self.accelerator.num_processes self._world_size = accelerator.num_processes
else: else:
self.gpt2 = accelerator.prepare(self.gpt2) self.gpt2 = accelerator.prepare(self.gpt2)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment