Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
a0bf956a
Commit
a0bf956a
authored
Apr 03, 2020
by
Kexin Yu
Browse files
more debugging
parent
feb93a2a
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
0 deletions
+3
-0
apex/contrib/optimizers/fused_lamb.py
apex/contrib/optimizers/fused_lamb.py
+3
-0
No files found.
apex/contrib/optimizers/fused_lamb.py
View file @
a0bf956a
...
@@ -83,6 +83,7 @@ class FusedLAMB(torch.optim.Optimizer):
...
@@ -83,6 +83,7 @@ class FusedLAMB(torch.optim.Optimizer):
self
.
adam_w_mode
=
1
if
adam_w_mode
else
0
self
.
adam_w_mode
=
1
if
adam_w_mode
else
0
self
.
set_grad_none
=
set_grad_none
self
.
set_grad_none
=
set_grad_none
print
(
"debugging LAMB"
)
def
zero_grad
(
self
):
def
zero_grad
(
self
):
if
self
.
set_grad_none
:
if
self
.
set_grad_none
:
...
@@ -130,6 +131,8 @@ class FusedLAMB(torch.optim.Optimizer):
...
@@ -130,6 +131,8 @@ class FusedLAMB(torch.optim.Optimizer):
# blend two grad norms to get global grad norm
# blend two grad norms to get global grad norm
global_grad_norm
=
math
.
sqrt
(
g_norm_32
*
g_norm_32
+
g_norm_16
*
g_norm_16
)
global_grad_norm
=
math
.
sqrt
(
g_norm_32
*
g_norm_32
+
g_norm_16
*
g_norm_16
)
max_grad_norm
=
self
.
defaults
[
'max_grad_norm'
]
max_grad_norm
=
self
.
defaults
[
'max_grad_norm'
]
print
(
"====global_grad_norm:"
,
global_grad_norm
)
print
(
"====max_grad_norm:"
,
max_grad_norm
)
for
group
in
self
.
param_groups
:
for
group
in
self
.
param_groups
:
bias_correction
=
1
if
group
[
'bias_correction'
]
else
0
bias_correction
=
1
if
group
[
'bias_correction'
]
else
0
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment