Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
fairscale
Commits
eab1551a
Unverified
Commit
eab1551a
authored
Jan 25, 2021
by
Benjamin Lefaudeux
Committed by
GitHub
Jan 25, 2021
Browse files
[OSS] Fix for torch dist broadcast randomly failing on dummy object (#323)
* fix for torch dist broadcast failing on dummy object
parent
1ece280a
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
6 additions
and
3 deletions
+6
-3
fairscale/optim/oss.py
fairscale/optim/oss.py
+5
-2
pyproject.toml
pyproject.toml
+1
-1
No files found.
fairscale/optim/oss.py
View file @
eab1551a
...
@@ -327,6 +327,9 @@ class OSS(Optimizer):
...
@@ -327,6 +327,9 @@ class OSS(Optimizer):
self
.
local_state_dict
(),
non_blocking
=
True
,
device
=
torch
.
device
(
"cpu"
)
self
.
local_state_dict
(),
non_blocking
=
True
,
device
=
torch
.
device
(
"cpu"
)
)
)
# Tensor cannot be really empty, even if its size is meaningless
dummy_sync_tensor
=
torch
.
tensor
([
1
],
device
=
self
.
_device
)
for
rank
in
range
(
self
.
world_size
):
for
rank
in
range
(
self
.
world_size
):
if
rank
==
self
.
rank
:
if
rank
==
self
.
rank
:
# Send the state to the reference replica
# Send the state to the reference replica
...
@@ -346,10 +349,10 @@ class OSS(Optimizer):
...
@@ -346,10 +349,10 @@ class OSS(Optimizer):
# Discard this tensor/rank, broadcast necessary for syncing and because NCCL does not support gather
# Discard this tensor/rank, broadcast necessary for syncing and because NCCL does not support gather
if
_torch_broadcast_object
:
if
_torch_broadcast_object
:
dist
.
broadcast_object_list
([
0
],
src
=
global_rank
,
group
=
self
.
group
)
dist
.
broadcast_object_list
([
dummy_sync_tensor
],
src
=
global_rank
,
group
=
self
.
group
)
else
:
else
:
broadcast_object
(
broadcast_object
(
torch
.
tensor
([
0
],
dtype
=
torch
.
uint8
,
device
=
self
.
_device
),
torch
.
tensor
([
dummy_sync_tensor
],
dtype
=
torch
.
uint8
,
device
=
self
.
_device
),
src_rank
=
global_rank
,
src_rank
=
global_rank
,
group
=
self
.
group
,
group
=
self
.
group
,
dist_device
=
self
.
_device
,
dist_device
=
self
.
_device
,
...
...
pyproject.toml
View file @
eab1551a
...
@@ -28,4 +28,4 @@ use_parentheses = true
...
@@ -28,4 +28,4 @@ use_parentheses = true
skip_glob
=
[
"build/*"
,
"stubs/*"
]
skip_glob
=
[
"build/*"
,
"stubs/*"
]
# Don't split "import" and "from".
# Don't split "import" and "from".
force_sort_within_sections
=
true
force_sort_within_sections
=
true
known_third_party
=
[
"datasets"
,
"golden_configs"
,
"helpers"
,
"models"
,
"numpy"
,
"pytest"
,
"recommonmark"
,
"setuptools"
,
"torch"
,
"torch_pg"
,
"torchtext"
,
"torchvision"
]
known_third_party
=
[
"benchmark_dataset"
,
"datasets"
,
"golden_configs"
,
"helpers"
,
"models"
,
"numpy"
,
"pytest"
,
"recommonmark"
,
"setuptools"
,
"torch"
,
"torch_pg"
,
"torchtext"
,
"torchvision"
]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment