Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
bc81b1c1
Commit
bc81b1c1
authored
Apr 29, 2020
by
Thor Johnsen
Browse files
Bug fix
parent
44f54712
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
2 additions
and
1 deletion
+2
-1
apex/contrib/optimizers/distributed_fused_adam.py
apex/contrib/optimizers/distributed_fused_adam.py
+2
-1
No files found.
apex/contrib/optimizers/distributed_fused_adam.py
View file @
bc81b1c1
...
@@ -106,7 +106,7 @@ class DistributedFusedAdam(torch.optim.Optimizer):
...
@@ -106,7 +106,7 @@ class DistributedFusedAdam(torch.optim.Optimizer):
if
not
p
.
requires_grad
:
if
not
p
.
requires_grad
:
continue
continue
self
.
_model_params
.
append
(
p
)
self
.
_model_params
.
append
(
p
)
state
=
self
.
state
[
'p'
]
state
=
self
.
state
[
p
]
if
len
(
state
)
==
0
:
if
len
(
state
)
==
0
:
state
[
'step'
]
=
0
state
[
'step'
]
=
0
if
self
.
_param_state
is
None
:
if
self
.
_param_state
is
None
:
...
@@ -543,6 +543,7 @@ class DistributedFusedAdam(torch.optim.Optimizer):
...
@@ -543,6 +543,7 @@ class DistributedFusedAdam(torch.optim.Optimizer):
self
.
revert_step
()
self
.
revert_step
()
else
:
else
:
# Copy self._new_params to model params
# Copy self._new_params to model params
for
p
in
self
.
_model_params
:
self
.
state
[
p
][
'step'
]
+=
1
multi_tensor_applier
(
multi_tensor_applier
(
fused_adam_cuda
.
maybe_cast_mt
,
fused_adam_cuda
.
maybe_cast_mt
,
self
.
_overflow_buf
,
self
.
_overflow_buf
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment