Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
8405d436
Commit
8405d436
authored
Mar 23, 2020
by
Kexin Yu
Browse files
revert to gradient pre-normalization
parent
a3ffb8a7
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
8 additions
and
9 deletions
+8
-9
apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu
apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu
+8
-8
apex/contrib/optimizers/fused_lamb.py
apex/contrib/optimizers/fused_lamb.py
+0
-1
No files found.
apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu
View file @
8405d436
...
@@ -104,20 +104,20 @@ struct LAMBStage1Functor
...
@@ -104,20 +104,20 @@ struct LAMBStage1Functor
for
(
int
ii
=
0
;
ii
<
ILP
;
ii
++
)
for
(
int
ii
=
0
;
ii
<
ILP
;
ii
++
)
{
{
if
(
mode
==
MOMENT_MODE_0
)
{
if
(
mode
==
MOMENT_MODE_0
)
{
// no gradient pre-normalization
MATH_T
scaled_grad
=
r_g
[
ii
]
/
clipped_global_grad_norm
;
MATH_T
grad
=
r_g
[
ii
];
// L2 on scaled grad
grad
=
grad
+
decay
*
r_p
[
ii
];
scaled_grad
=
scaled_
grad
+
decay
*
r_p
[
ii
];
r_m
[
ii
]
=
r_m
[
ii
]
*
beta1
+
beta3
*
grad
;
r_m
[
ii
]
=
r_m
[
ii
]
*
beta1
+
beta3
*
scaled_
grad
;
r_v
[
ii
]
=
r_v
[
ii
]
*
beta2
+
(
1
-
beta2
)
*
grad
*
grad
;
r_v
[
ii
]
=
r_v
[
ii
]
*
beta2
+
(
1
-
beta2
)
*
scaled_grad
*
scaled_
grad
;
MATH_T
next_m_unbiased
=
r_m
[
ii
]
/
beta1_correction
;
MATH_T
next_m_unbiased
=
r_m
[
ii
]
/
beta1_correction
;
MATH_T
next_v_unbiased
=
r_v
[
ii
]
/
beta2_correction
;
MATH_T
next_v_unbiased
=
r_v
[
ii
]
/
beta2_correction
;
MATH_T
denom
=
sqrtf
(
next_v_unbiased
)
+
epsilon
;
MATH_T
denom
=
sqrtf
(
next_v_unbiased
)
+
epsilon
;
r_p
[
ii
]
=
next_m_unbiased
/
denom
;
r_p
[
ii
]
=
next_m_unbiased
/
denom
;
}
}
else
{
else
{
MATH_T
grad
=
r_g
[
ii
]
/
clipped_global_grad_norm
;
MATH_T
scaled_
grad
=
r_g
[
ii
]
/
clipped_global_grad_norm
;
r_m
[
ii
]
=
r_m
[
ii
]
*
beta1
+
beta3
*
grad
;
r_m
[
ii
]
=
r_m
[
ii
]
*
beta1
+
beta3
*
scaled_
grad
;
r_v
[
ii
]
=
r_v
[
ii
]
*
beta2
+
(
1
-
beta2
)
*
grad
*
grad
;
r_v
[
ii
]
=
r_v
[
ii
]
*
beta2
+
(
1
-
beta2
)
*
scaled_grad
*
scaled_
grad
;
MATH_T
next_m_unbiased
=
r_m
[
ii
]
/
beta1_correction
;
MATH_T
next_m_unbiased
=
r_m
[
ii
]
/
beta1_correction
;
MATH_T
next_v_unbiased
=
r_v
[
ii
]
/
beta2_correction
;
MATH_T
next_v_unbiased
=
r_v
[
ii
]
/
beta2_correction
;
MATH_T
denom
=
sqrtf
(
next_v_unbiased
)
+
epsilon
;
MATH_T
denom
=
sqrtf
(
next_v_unbiased
)
+
epsilon
;
...
...
apex/contrib/optimizers/fused_lamb.py
View file @
8405d436
...
@@ -80,7 +80,6 @@ class FusedLAMB(torch.optim.Optimizer):
...
@@ -80,7 +80,6 @@ class FusedLAMB(torch.optim.Optimizer):
self
.
adam_w_mode
=
1
if
adam_w_mode
else
0
self
.
adam_w_mode
=
1
if
adam_w_mode
else
0
self
.
set_grad_none
=
set_grad_none
self
.
set_grad_none
=
set_grad_none
print
(
"using apex.contrib.optimizers.FusedLamb"
)
def
zero_grad
(
self
):
def
zero_grad
(
self
):
if
self
.
set_grad_none
:
if
self
.
set_grad_none
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment