Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
ad50ce9a
"vscode:/vscode.git/clone" did not exist on "194a084640bca80f99af121c2cd64755c31f64f3"
Commit
ad50ce9a
authored
Jun 23, 2020
by
Kexin Yu
Browse files
add test case for non-zero weight decay
parent
cd3d6d12
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
45 additions
and
39 deletions
+45
-39
tests/L0/run_optimizers/test_lamb.py
tests/L0/run_optimizers/test_lamb.py
+45
-39
No files found.
tests/L0/run_optimizers/test_lamb.py
View file @
ad50ce9a
...
@@ -190,9 +190,11 @@ class TestFusedLAMB(unittest.TestCase):
...
@@ -190,9 +190,11 @@ class TestFusedLAMB(unittest.TestCase):
def
gen_single_type_test
(
self
,
param_type
=
torch
.
float
):
def
gen_single_type_test
(
self
,
param_type
=
torch
.
float
):
nelem
=
278011
nelem
=
278011
lamb_option
=
{
'lr'
:
5e-4
,
'betas'
:(
0.9
,
0.999
),
'eps'
:
1e-08
,
'weight_decay'
:
0
}
tensor
=
torch
.
rand
(
nelem
,
dtype
=
param_type
,
device
=
'cuda'
)
tensor
=
torch
.
rand
(
nelem
,
dtype
=
param_type
,
device
=
'cuda'
)
weight_decay
=
[
0
,
0.01
]
for
wd
in
weight_decay
:
lamb_option
=
{
'lr'
:
5e-4
,
'betas'
:(
0.9
,
0.999
),
'eps'
:
1e-08
,
'weight_decay'
:
wd
}
ref_param
,
tst_param
,
ref_optim
,
tst_optim
=
\
ref_param
,
tst_param
,
ref_optim
,
tst_optim
=
\
self
.
gen_param_optim
([
tensor
],
lamb_option
)
self
.
gen_param_optim
([
tensor
],
lamb_option
)
...
@@ -214,8 +216,10 @@ class TestFusedLAMB(unittest.TestCase):
...
@@ -214,8 +216,10 @@ class TestFusedLAMB(unittest.TestCase):
def
test_multi_params
(
self
):
def
test_multi_params
(
self
):
sizes
=
[[
4096
,
1024
],
[
4096
],
[
4096
,
2048
],
[
32320
,
1024
],
[
1
]]
sizes
=
[[
4096
,
1024
],
[
4096
],
[
4096
,
2048
],
[
32320
,
1024
],
[
1
]]
lamb_option
=
{
'lr'
:
5e-4
,
'betas'
:(
0.9
,
0.999
),
'eps'
:
1e-08
,
'weight_decay'
:
0
}
weight_decay
=
[
0
,
0.01
]
for
wd
in
weight_decay
:
lamb_option
=
{
'lr'
:
5e-4
,
'betas'
:(
0.9
,
0.999
),
'eps'
:
1e-08
,
'weight_decay'
:
wd
}
tensors
=
[]
tensors
=
[]
for
size
in
sizes
:
for
size
in
sizes
:
tensors
.
append
(
torch
.
rand
(
size
,
dtype
=
torch
.
float
,
device
=
'cuda'
))
tensors
.
append
(
torch
.
rand
(
size
,
dtype
=
torch
.
float
,
device
=
'cuda'
))
...
@@ -232,9 +236,11 @@ class TestFusedLAMB(unittest.TestCase):
...
@@ -232,9 +236,11 @@ class TestFusedLAMB(unittest.TestCase):
def
test_lamb_option
(
self
):
def
test_lamb_option
(
self
):
nelem
=
1
nelem
=
1
lamb_option
=
{
'lr'
:
0.01
,
'betas'
:(
0.6
,
0.9
),
'eps'
:
3e-06
,
'weight_decay'
:
0
}
tensor
=
torch
.
rand
(
nelem
,
dtype
=
torch
.
float
,
device
=
'cuda'
)
tensor
=
torch
.
rand
(
nelem
,
dtype
=
torch
.
float
,
device
=
'cuda'
)
weight_decay
=
[
0
,
0.01
]
for
wd
in
weight_decay
:
lamb_option
=
{
'lr'
:
0.01
,
'betas'
:(
0.6
,
0.9
),
'eps'
:
3e-06
,
'weight_decay'
:
wd
}
ref_param
,
tst_param
,
ref_optim
,
tst_optim
=
\
ref_param
,
tst_param
,
ref_optim
,
tst_optim
=
\
self
.
gen_param_optim
([
tensor
],
lamb_option
)
self
.
gen_param_optim
([
tensor
],
lamb_option
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment