Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
15b2d7b5
Commit
15b2d7b5
authored
May 21, 2016
by
Davis King
Browse files
Added get_learning_rate_multiplier() and get_weight_decay_multiplier() global
functions.
parent
58496f9f
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
71 additions
and
4 deletions
+71
-4
dlib/algs.h
dlib/algs.h
+7
-0
dlib/dnn/core.h
dlib/dnn/core.h
+38
-4
dlib/dnn/core_abstract.h
dlib/dnn/core_abstract.h
+26
-0
No files found.
dlib/algs.h
View file @
15b2d7b5
...
@@ -488,6 +488,13 @@ namespace dlib
...
@@ -488,6 +488,13 @@ namespace dlib
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
struct
general_
{};
struct
special_
:
general_
{};
template
<
typename
>
struct
int_
{
typedef
int
type
;
};
// ----------------------------------------------------------------------------------------
/*!A is_same_object
/*!A is_same_object
This is a templated function which checks if both of its arguments are actually
This is a templated function which checks if both of its arguments are actually
...
...
dlib/dnn/core.h
View file @
15b2d7b5
...
@@ -24,6 +24,38 @@
...
@@ -24,6 +24,38 @@
namespace
dlib
namespace
dlib
{
{
// ----------------------------------------------------------------------------------------
namespace
impl
{
template
<
typename
T
,
typename
int_
<
decltype
(
&
T
::
get_learning_rate_multiplier
)>
::
type
=
0
>
double
get_learning_rate_multiplier
(
const
T
&
obj
,
special_
)
{
return
obj
.
get_learning_rate_multiplier
();
}
template
<
typename
T
>
double
get_learning_rate_multiplier
(
const
T
&
obj
,
general_
)
{
return
1
;
}
}
template
<
typename
T
>
double
get_learning_rate_multiplier
(
const
T
&
obj
)
{
return
impl
::
get_learning_rate_multiplier
(
obj
,
special_
());
}
// ----------------------------------------------------------------------------------------
namespace
impl
{
template
<
typename
T
,
typename
int_
<
decltype
(
&
T
::
get_weight_decay_multiplier
)>
::
type
=
0
>
double
get_weight_decay_multiplier
(
const
T
&
obj
,
special_
)
{
return
obj
.
get_weight_decay_multiplier
();
}
template
<
typename
T
>
double
get_weight_decay_multiplier
(
const
T
&
obj
,
general_
)
{
return
1
;
}
}
template
<
typename
T
>
double
get_weight_decay_multiplier
(
const
T
&
obj
)
{
return
impl
::
get_weight_decay_multiplier
(
obj
,
special_
());
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
namespace
impl
namespace
impl
...
@@ -849,8 +881,9 @@ namespace dlib
...
@@ -849,8 +881,9 @@ namespace dlib
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
{
{
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
// Don't try to adjust the parameters if this layer doesn't have any.
// Don't try to adjust the parameters if this layer doesn't have any or the
if
(
params_grad
.
size
()
!=
0
)
// learning rate is disabled for this layer.
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
{
{
const
tensor
&
step
=
solvers
.
top
()(
learning_rate
,
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
const
tensor
&
step
=
solvers
.
top
()(
learning_rate
,
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
tt
::
add
(
details
.
get_layer_params
(),
details
.
get_layer_params
(),
step
);
tt
::
add
(
details
.
get_layer_params
(),
details
.
get_layer_params
(),
step
);
...
@@ -1200,8 +1233,9 @@ namespace dlib
...
@@ -1200,8 +1233,9 @@ namespace dlib
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
{
{
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
// Don't try to adjust the parameters if this layer doesn't have any.
// Don't try to adjust the parameters if this layer doesn't have any or the
if
(
params_grad
.
size
()
!=
0
)
// learning rate is disabled for this layer.
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
{
{
const
tensor
&
step
=
solvers
.
top
()(
learning_rate
,
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
const
tensor
&
step
=
solvers
.
top
()(
learning_rate
,
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
tt
::
add
(
details
.
get_layer_params
(),
details
.
get_layer_params
(),
step
);
tt
::
add
(
details
.
get_layer_params
(),
details
.
get_layer_params
(),
step
);
...
...
dlib/dnn/core_abstract.h
View file @
15b2d7b5
...
@@ -67,6 +67,32 @@ namespace dlib
...
@@ -67,6 +67,32 @@ namespace dlib
(except computes it using a numerically accurate method)
(except computes it using a numerically accurate method)
!*/
!*/
// ----------------------------------------------------------------------------------------
template
<
typename
T
>
double
get_learning_rate_multiplier
(
const
T
&
obj
);
/*!
ensures
- if (obj has a get_learning_rate_multiplier() member function) then
- returns obj.get_learning_rate_multiplier()
- else
- returns 1
!*/
template
<
typename
T
>
double
get_weight_decay_multiplier
(
const
T
&
obj
);
/*!
ensures
- if (obj has a get_weight_decay_multiplier() member function) then
- returns obj.get_weight_decay_multiplier()
- else
- returns 1
!*/
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
bool
dnn_prefer_fastest_algorithms
(
bool
dnn_prefer_fastest_algorithms
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment