Commit 82786023 authored by lintangsutawika's avatar lintangsutawika
Browse files

fixed warning and info with proper logging

parent 47b888bb
...@@ -37,10 +37,10 @@ class HFLM(LM): ...@@ -37,10 +37,10 @@ class HFLM(LM):
if device not in ["cuda", "cpu"]: if device not in ["cuda", "cpu"]:
device = int(device) device = int(device)
self._device = torch.device(device) self._device = torch.device(device)
print(f"Using device '{device}'") eval_logger.info(f"Using device '{device}'")
else: else:
print("Device not specified") eval_logger.info("Device not specified")
print(f"Cuda Available? {torch.cuda.is_available()}") eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}")
self._device = ( self._device = (
torch.device("cuda") torch.device("cuda")
if torch.cuda.is_available() if torch.cuda.is_available()
...@@ -74,13 +74,12 @@ class HFLM(LM): ...@@ -74,13 +74,12 @@ class HFLM(LM):
if gpus > 1: if gpus > 1:
accelerator = Accelerator() accelerator = Accelerator()
if gpus > accelerator.num_processes: if gpus > accelerator.num_processes:
warning = ( eval_logger.warning(
"WARNING: The number of total system GPUs does not match the number of spawned processes. " "WARNING: The number of total system GPUs does not match the number of spawned processes. "
"If you would like to use data parallelism, please launch the script " "If you would like to use data parallelism, please launch the script "
"with 'accelerate launch *script*'. " "with 'accelerate launch *script*'. "
f"Current run will proceed with {accelerator.num_processes} devices." f"Current run will proceed with {accelerator.num_processes} devices."
) )
print(warning)
self._rank = accelerator.local_process_index self._rank = accelerator.local_process_index
self._world_size = accelerator.num_processes self._world_size = accelerator.num_processes
else: else:
...@@ -89,7 +88,7 @@ class HFLM(LM): ...@@ -89,7 +88,7 @@ class HFLM(LM):
self.accelerator = accelerator self.accelerator = accelerator
if self.accelerator.is_local_main_process: if self.accelerator.is_local_main_process:
print(f"Using {gpus} devices with data parallelism") eval_logger.info(f"Using {gpus} devices with data parallelism")
self._rank = self.accelerator.local_process_index self._rank = self.accelerator.local_process_index
self._world_size = self.accelerator.num_processes self._world_size = self.accelerator.num_processes
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment