Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
5cb7af7e
Commit
5cb7af7e
authored
Jul 03, 2023
by
haileyschoelkopf
Browse files
revert gpus > num processes case
parent
62f4d663
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
20 additions
and
25 deletions
+20
-25
lm_eval/models/huggingface.py
lm_eval/models/huggingface.py
+20
-25
No files found.
lm_eval/models/huggingface.py
View file @
5cb7af7e
...
...
@@ -174,39 +174,34 @@ class HFLM(LM):
)
else
:
pass
else
:
if
gpus
>
accelerator
.
num_processes
:
# TODO: make sure there's still never an edge case where we unintentionally default to CPU
eval_logger
.
warning
(
"WARNING: The number of total system GPUs does not match the number of spawned processes. "
"If you would like to use data parallelism, please launch the script "
"with 'accelerate launch *script*'. "
f
"Current run will proceed with
{
accelerator
.
num_processes
}
devices."
)
elif
gpus
>
accelerator
.
num_processes
:
# TODO: make sure there's still never an edge case where we unintentionally default to CPU
eval_logger
.
warning
(
"WARNING: The number of total system GPUs does not match the number of spawned processes. "
"If you would like to use data parallelism, please launch the script "
"with 'accelerate launch *script*'. "
f
"Current run will proceed with
{
accelerator
.
num_processes
}
devices."
)
self
.
_rank
=
accelerator
.
local_process_index
self
.
_world_size
=
accelerator
.
num_processes
# manually set model to use gpu, for case where many GPUs available but
# only seek to use one
self
.
_device
=
(
torch
.
device
(
f
"cuda:
{
accelerator
.
local_process_index
}
"
)
if
torch
.
cuda
.
is_available
()
else
torch
.
device
(
"cpu"
)
)
self
.
model
.
to
(
self
.
device
)
else
:
self
.
_model
=
accelerator
.
prepare
(
self
.
model
)
self
.
_device
=
torch
.
device
(
f
"cuda:
{
accelerator
.
local_process_index
}
"
)
self
.
accelerator
=
accelerator
if
self
.
accelerator
.
is_local_main_process
:
eval_logger
.
info
(
f
"Using
{
gpus
}
devices with data parallelism"
)
# manually set model to use gpu, for case where many GPUs available but
# only seek to use one
# self._device = (
# torch.device(f"cuda:{accelerator.local_process_index}")
# if torch.cuda.is_available()
# else torch.device("cpu")
# )
# self.model.to(self.device)
# else:
# self._model = accelerator.prepare(self.model)
# self._device = torch.device(f"cuda:{accelerator.local_process_index}")
# self.accelerator = accelerator
# self._rank = self.accelerator.local_process_index
# self._world_size = self.accelerator.num_processes
self
.
_rank
=
self
.
accelerator
.
local_process_index
self
.
_world_size
=
self
.
accelerator
.
num_processes
@
property
def
config
(
self
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment