Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
77703f37
Unverified
Commit
77703f37
authored
Apr 29, 2022
by
Double_V
Committed by
GitHub
Apr 29, 2022
Browse files
Merge pull request #6103 from LDOUBLEV/dygraph
fix det cml + pact + distribute training bug
parents
21a0efea
6c193662
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
24 additions
and
9 deletions
+24
-9
ppocr/optimizer/optimizer.py
ppocr/optimizer/optimizer.py
+24
-9
No files found.
ppocr/optimizer/optimizer.py
View file @
77703f37
...
@@ -43,12 +43,15 @@ class Momentum(object):
...
@@ -43,12 +43,15 @@ class Momentum(object):
self
.
grad_clip
=
grad_clip
self
.
grad_clip
=
grad_clip
def
__call__
(
self
,
model
):
def
__call__
(
self
,
model
):
train_params
=
[
param
for
param
in
model
.
parameters
()
if
param
.
trainable
is
True
]
opt
=
optim
.
Momentum
(
opt
=
optim
.
Momentum
(
learning_rate
=
self
.
learning_rate
,
learning_rate
=
self
.
learning_rate
,
momentum
=
self
.
momentum
,
momentum
=
self
.
momentum
,
weight_decay
=
self
.
weight_decay
,
weight_decay
=
self
.
weight_decay
,
grad_clip
=
self
.
grad_clip
,
grad_clip
=
self
.
grad_clip
,
parameters
=
model
.
parameters
()
)
parameters
=
train_params
)
return
opt
return
opt
...
@@ -76,6 +79,9 @@ class Adam(object):
...
@@ -76,6 +79,9 @@ class Adam(object):
self
.
lazy_mode
=
lazy_mode
self
.
lazy_mode
=
lazy_mode
def
__call__
(
self
,
model
):
def
__call__
(
self
,
model
):
train_params
=
[
param
for
param
in
model
.
parameters
()
if
param
.
trainable
is
True
]
opt
=
optim
.
Adam
(
opt
=
optim
.
Adam
(
learning_rate
=
self
.
learning_rate
,
learning_rate
=
self
.
learning_rate
,
beta1
=
self
.
beta1
,
beta1
=
self
.
beta1
,
...
@@ -85,7 +91,7 @@ class Adam(object):
...
@@ -85,7 +91,7 @@ class Adam(object):
grad_clip
=
self
.
grad_clip
,
grad_clip
=
self
.
grad_clip
,
name
=
self
.
name
,
name
=
self
.
name
,
lazy_mode
=
self
.
lazy_mode
,
lazy_mode
=
self
.
lazy_mode
,
parameters
=
model
.
parameters
()
)
parameters
=
train_params
)
return
opt
return
opt
...
@@ -118,6 +124,9 @@ class RMSProp(object):
...
@@ -118,6 +124,9 @@ class RMSProp(object):
self
.
grad_clip
=
grad_clip
self
.
grad_clip
=
grad_clip
def
__call__
(
self
,
model
):
def
__call__
(
self
,
model
):
train_params
=
[
param
for
param
in
model
.
parameters
()
if
param
.
trainable
is
True
]
opt
=
optim
.
RMSProp
(
opt
=
optim
.
RMSProp
(
learning_rate
=
self
.
learning_rate
,
learning_rate
=
self
.
learning_rate
,
momentum
=
self
.
momentum
,
momentum
=
self
.
momentum
,
...
@@ -125,7 +134,7 @@ class RMSProp(object):
...
@@ -125,7 +134,7 @@ class RMSProp(object):
epsilon
=
self
.
epsilon
,
epsilon
=
self
.
epsilon
,
weight_decay
=
self
.
weight_decay
,
weight_decay
=
self
.
weight_decay
,
grad_clip
=
self
.
grad_clip
,
grad_clip
=
self
.
grad_clip
,
parameters
=
model
.
parameters
()
)
parameters
=
train_params
)
return
opt
return
opt
...
@@ -149,6 +158,9 @@ class Adadelta(object):
...
@@ -149,6 +158,9 @@ class Adadelta(object):
self
.
name
=
name
self
.
name
=
name
def
__call__
(
self
,
model
):
def
__call__
(
self
,
model
):
train_params
=
[
param
for
param
in
model
.
parameters
()
if
param
.
trainable
is
True
]
opt
=
optim
.
Adadelta
(
opt
=
optim
.
Adadelta
(
learning_rate
=
self
.
learning_rate
,
learning_rate
=
self
.
learning_rate
,
epsilon
=
self
.
epsilon
,
epsilon
=
self
.
epsilon
,
...
@@ -156,7 +168,7 @@ class Adadelta(object):
...
@@ -156,7 +168,7 @@ class Adadelta(object):
weight_decay
=
self
.
weight_decay
,
weight_decay
=
self
.
weight_decay
,
grad_clip
=
self
.
grad_clip
,
grad_clip
=
self
.
grad_clip
,
name
=
self
.
name
,
name
=
self
.
name
,
parameters
=
model
.
parameters
()
)
parameters
=
train_params
)
return
opt
return
opt
...
@@ -190,17 +202,20 @@ class AdamW(object):
...
@@ -190,17 +202,20 @@ class AdamW(object):
self
.
one_dim_param_no_weight_decay
=
one_dim_param_no_weight_decay
self
.
one_dim_param_no_weight_decay
=
one_dim_param_no_weight_decay
def
__call__
(
self
,
model
):
def
__call__
(
self
,
model
):
parameters
=
model
.
parameters
()
parameters
=
[
param
for
param
in
model
.
parameters
()
if
param
.
trainable
is
True
]
self
.
no_weight_decay_param_name_list
=
[
self
.
no_weight_decay_param_name_list
=
[
p
.
name
for
n
,
p
in
model
.
named_parameters
()
if
any
(
nd
in
n
for
nd
in
self
.
no_weight_decay_name_list
)
p
.
name
for
n
,
p
in
model
.
named_parameters
()
if
any
(
nd
in
n
for
nd
in
self
.
no_weight_decay_name_list
)
]
]
if
self
.
one_dim_param_no_weight_decay
:
if
self
.
one_dim_param_no_weight_decay
:
self
.
no_weight_decay_param_name_list
+=
[
self
.
no_weight_decay_param_name_list
+=
[
p
.
name
for
n
,
p
in
model
.
named_parameters
()
if
len
(
p
.
shape
)
==
1
p
.
name
for
n
,
p
in
model
.
named_parameters
()
if
len
(
p
.
shape
)
==
1
]
]
opt
=
optim
.
AdamW
(
opt
=
optim
.
AdamW
(
learning_rate
=
self
.
learning_rate
,
learning_rate
=
self
.
learning_rate
,
beta1
=
self
.
beta1
,
beta1
=
self
.
beta1
,
...
@@ -216,4 +231,4 @@ class AdamW(object):
...
@@ -216,4 +231,4 @@ class AdamW(object):
return
opt
return
opt
def
_apply_decay_param_fun
(
self
,
name
):
def
_apply_decay_param_fun
(
self
,
name
):
return
name
not
in
self
.
no_weight_decay_param_name_list
return
name
not
in
self
.
no_weight_decay_param_name_list
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment