Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
fa812881
"llama/git@developer.sourcefind.cn:OpenDAS/ollama.git" did not exist on "369de832cdca7680c8f50ba196d39172a895fcad"
Commit
fa812881
authored
Sep 28, 2015
by
Davis King
Browse files
Just removed the _ from sub_net.
parent
31757a21
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
312 additions
and
312 deletions
+312
-312
dlib/dnn/core.h
dlib/dnn/core.h
+153
-153
dlib/dnn/core_abstract.h
dlib/dnn/core_abstract.h
+79
-79
dlib/dnn/layers.h
dlib/dnn/layers.h
+32
-32
dlib/dnn/layers_abstract.h
dlib/dnn/layers_abstract.h
+30
-30
dlib/dnn/loss.h
dlib/dnn/loss.h
+8
-8
dlib/dnn/loss_abstract.h
dlib/dnn/loss_abstract.h
+10
-10
No files found.
dlib/dnn/core.h
View file @
fa812881
This diff is collapsed.
Click to expand it.
dlib/dnn/core_abstract.h
View file @
fa812881
...
...
@@ -124,7 +124,7 @@ namespace dlib
template
<
typename
LAYER_DETAILS
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_layer
{
...
...
@@ -133,26 +133,26 @@ namespace dlib
- Must be a type that implements the EXAMPLE_LAYER_ interface defined in
layers_abstract.h
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET implements the EXAMPLE_INPUT_LAYER interface defined in
- SUBNET implements the EXAMPLE_INPUT_LAYER interface defined in
input_abstract.h.
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
Stacks a new layer, defined by LAYER_DETAILS, on top of SUB
_
NET type.
Stacks a new layer, defined by LAYER_DETAILS, on top of SUBNET type.
!*/
public:
typedef
LAYER_DETAILS
layer_details_type
;
typedef
SUB
_
NET
sub
_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
// If SUB
_
NET is an input layer then num_layers == 1, otherwise it has the
typedef
SUBNET
subnet_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
// If SUBNET is an input layer then num_layers == 1, otherwise it has the
// definition shown here:
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
+
1
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
+
1
;
add_layer
(
);
...
...
@@ -172,7 +172,7 @@ namespace dlib
/*!
ensures
- #layer_details() == layer_details_type(item.layer_details())
- #sub
_
net() == sub
_
net_type(item.sub
_
net())
- #subnet() == subnet_type(item.subnet())
!*/
template
<
typename
...
T
>
...
...
@@ -183,7 +183,7 @@ namespace dlib
/*!
ensures
- #layer_details() == layer_details_type(layer_det)
- #sub
_
net() == sub
_
net_type(args)
- #subnet() == subnet_type(args)
!*/
template
<
typename
...
T
>
...
...
@@ -194,7 +194,7 @@ namespace dlib
/*!
ensures
- #layer_details() == layer_details_type(layer_det)
- #sub
_
net() == sub
_
net_type(args)
- #subnet() == subnet_type(args)
!*/
template
<
typename
input_iterator
>
...
...
@@ -211,7 +211,7 @@ namespace dlib
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- Invokes data.async_copy_to_device() so that the data begins transferring
to the device.
- Ultimately this function just calls sub
_
net().sub
_
net()...sub
_
net().to_tensor(ibegin,iend,data).
- Ultimately this function just calls subnet().subnet()...subnet().to_tensor(ibegin,iend,data).
!*/
template
<
typename
input_iterator
>
...
...
@@ -247,17 +247,17 @@ namespace dlib
ensures
- Runs x through the network and returns the results. In particular, this
function performs the equivalent of:
sub
_
net().forward(x);
subnet().forward(x);
if (this is the first time forward() has been called) then
layer_details().setup(sub
_
net());
layer_details().forward(sub
_
net(), get_output());
layer_details().setup(subnet());
layer_details().forward(subnet(), get_output());
- The return value from this function is also available in #get_output().
- have_same_dimensions(#get_gradient_input(), #get_output()) == true
- All elements of #get_gradient_input() are set to 0.
!*/
{
sub
_
network
.
forward
(
x
);
const
dimpl
::
sub
_
net_wrapper
<
sub
_
net_type
>
wsub
(
sub
_
network
);
subnetwork
.
forward
(
x
);
const
dimpl
::
subnet_wrapper
<
subnet_type
>
wsub
(
subnetwork
);
if
(
!
this_layer_setup_called
)
{
details
.
setup
(
wsub
);
...
...
@@ -303,18 +303,18 @@ namespace dlib
to some loss.
!*/
{
dimpl
::
sub
_
net_wrapper
<
sub
_
net_type
>
wsub
(
sub
_
network
);
dimpl
::
subnet_wrapper
<
subnet_type
>
wsub
(
subnetwork
);
params_grad
.
copy_size
(
details
.
get_layer_params
());
params_grad
=
0
;
details
.
backward
(
get_gradient_input
(),
wsub
,
static_cast
<
tensor
&>
(
params_grad
));
// Don't try to adjust the parameters if this layer doesn't have any.
if
(
params_grad
.
size
()
!=
0
)
solvers
.
top
()(
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
sub
_
network
.
update
(
x
,
solvers
.
pop
());
subnetwork
.
update
(
x
,
solvers
.
pop
());
}
const
sub
_
net_type
&
sub
_
net
()
const
{
return
sub
_
network
;
}
sub
_
net_type
&
sub
_
net
()
{
return
sub
_
network
;
}
const
subnet_type
&
subnet
()
const
{
return
subnetwork
;
}
subnet_type
&
subnet
()
{
return
subnetwork
;
}
const
layer_details_type
&
layer_details
()
const
{
return
details
;
}
layer_details_type
&
layer_details
()
{
return
details
;
}
...
...
@@ -332,7 +332,7 @@ namespace dlib
template
<
typename
LOSS_DETAILS
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_loss_layer
{
...
...
@@ -340,26 +340,26 @@ namespace dlib
REQUIREMENTS ON LOSS_DETAILS
- Must be a type that implements the EXAMPLE_LOSS_LAYER_ interface defined
in loss_abstract.h
- LOSS_DETAILS::sample_expansion_factor == SUB
_
NET::sample_expansion_factor
- LOSS_DETAILS::sample_expansion_factor == SUBNET::sample_expansion_factor
i.e. The loss layer and input layer must agree on the sample_expansion_factor.
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
- Adds a loss layer, defined by LOSS_DETAILS, on top of SUB
_
NET.
- Adds a loss layer, defined by LOSS_DETAILS, on top of SUBNET.
!*/
public:
typedef
LOSS_DETAILS
loss_details_type
;
typedef
SUB
_
NET
sub
_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
typedef
SUBNET
subnet_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
// Note that the loss layer doesn't count as an additional layer.
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
// If LOSS_DETAILS is an unsupervised loss then label_type==no_label_type.
// Otherwise it is defined as follows:
typedef
typename
LOSS_DETAILS
::
label_type
label_type
;
...
...
@@ -379,7 +379,7 @@ namespace dlib
const
add_loss_layer
<
T
,
U
>&
item
)
:
loss
(
item
.
loss_details
()),
sub
(
item
.
sub
_
net
())
sub
(
item
.
subnet
())
{}
template
<
typename
...
T
>
...
...
@@ -450,7 +450,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_
net_wrapper
<
sub
_
net_type
>
wsub
(
sub
);
dimpl
::
subnet_wrapper
<
subnet_type
>
wsub
(
sub
);
return
loss
.
compute_loss
(
temp_tensor
,
lbegin
,
wsub
);
}
...
...
@@ -470,7 +470,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_
net_wrapper
<
sub
_
net_type
>
wsub
(
sub
);
dimpl
::
subnet_wrapper
<
subnet_type
>
wsub
(
sub
);
double
l
=
loss
.
compute_loss
(
temp_tensor
,
lbegin
,
wsub
);
sub
.
update
(
temp_tensor
,
solvers
);
return
l
;
...
...
@@ -483,8 +483,8 @@ namespace dlib
sstack
<
solver_type
,
num_layers
>&
solvers
);
const
sub
_
net_type
&
sub
_
net
()
const
{
return
sub
;
}
sub
_
net_type
&
sub
_
net
()
{
return
sub
;
}
const
subnet_type
&
subnet
()
const
{
return
sub
;
}
subnet_type
&
subnet
()
{
return
sub
;
}
const
loss_details_type
&
loss_details
()
const
{
return
loss
;
}
loss_details_type
&
loss_details
()
{
return
loss
;
}
...
...
@@ -509,7 +509,7 @@ namespace dlib
private:
loss_details_type
loss
;
sub
_
net_type
sub
;
subnet_type
sub
;
// These two objects don't logically contribute to the state of this object. They
// are here to prevent them from being reallocated over and over.
...
...
@@ -527,21 +527,21 @@ namespace dlib
template
<
unsigned
long
ID
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_tag_layer
{
/*!
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET implements the EXAMPLE_INPUT_LAYER interface defined in
- SUBNET implements the EXAMPLE_INPUT_LAYER interface defined in
input_abstract.h.
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
This object draws its inputs from sub
_
net() and performs the identity
This object draws its inputs from subnet() and performs the identity
transform. This means it is a no-op and its presence does not change
the behavior of the network. It exists solely to be used by add_skip_layer
to reference a particular part of a network.
...
...
@@ -549,48 +549,48 @@ namespace dlib
!*/
};
template
<
typename
SUB
_
NET
>
using
tag1
=
add_tag_layer
<
1
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag2
=
add_tag_layer
<
2
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag3
=
add_tag_layer
<
3
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag4
=
add_tag_layer
<
4
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag5
=
add_tag_layer
<
5
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag6
=
add_tag_layer
<
6
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag7
=
add_tag_layer
<
7
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag8
=
add_tag_layer
<
8
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag9
=
add_tag_layer
<
9
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
tag10
=
add_tag_layer
<
10
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
tag1
=
add_tag_layer
<
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag2
=
add_tag_layer
<
2
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag3
=
add_tag_layer
<
3
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag4
=
add_tag_layer
<
4
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag5
=
add_tag_layer
<
5
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag6
=
add_tag_layer
<
6
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag7
=
add_tag_layer
<
7
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag8
=
add_tag_layer
<
8
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag9
=
add_tag_layer
<
9
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
tag10
=
add_tag_layer
<
10
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
template
<
template
<
typename
>
class
TAG_TYPE
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_skip_layer
{
/*!
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
This object draws its inputs from layer<TAG_TYPE>(sub
_
net())
This object draws its inputs from layer<TAG_TYPE>(subnet())
and performs the identity transform.
!*/
};
template
<
typename
SUB
_
NET
>
using
skip1
=
add_skip_layer
<
tag1
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip2
=
add_skip_layer
<
tag2
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip3
=
add_skip_layer
<
tag3
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip4
=
add_skip_layer
<
tag4
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip5
=
add_skip_layer
<
tag5
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip6
=
add_skip_layer
<
tag6
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip7
=
add_skip_layer
<
tag7
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip8
=
add_skip_layer
<
tag8
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip9
=
add_skip_layer
<
tag9
,
SUB
_
NET
>
;
template
<
typename
SUB
_
NET
>
using
skip10
=
add_skip_layer
<
tag10
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
skip1
=
add_skip_layer
<
tag1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip2
=
add_skip_layer
<
tag2
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip3
=
add_skip_layer
<
tag3
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip4
=
add_skip_layer
<
tag4
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip5
=
add_skip_layer
<
tag5
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip6
=
add_skip_layer
<
tag6
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip7
=
add_skip_layer
<
tag7
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip8
=
add_skip_layer
<
tag8
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip9
=
add_skip_layer
<
tag9
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
skip10
=
add_skip_layer
<
tag10
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -605,16 +605,16 @@ namespace dlib
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or add_tag_layer.
ensures
- This function chains together i calls to n.sub
_
net() and returns the
- This function chains together i calls to n.subnet() and returns the
result. So for example:
- if (i == 0)
- returns n
- else if (i == 1)
- returns n.sub
_
net()
- returns n.subnet()
- else if (i == 2)
- returns n.sub
_
net().sub
_
net()
- returns n.subnet().subnet()
- else if (i == 3)
- returns n.sub
_
net().sub
_
net().sub
_
net()
- returns n.subnet().subnet().subnet()
- else
- etc.
!*/
...
...
dlib/dnn/layers.h
View file @
fa812881
...
...
@@ -23,20 +23,20 @@ namespace dlib
con_
()
{}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
// TODO
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
// TODO
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
// TODO
}
...
...
@@ -49,8 +49,8 @@ namespace dlib
resizable_tensor
params
;
};
template
<
typename
SUB
_
NET
>
using
con
=
add_layer
<
con_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
con
=
add_layer
<
con_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -71,8 +71,8 @@ namespace dlib
unsigned
long
get_num_outputs
(
)
const
{
return
num_outputs
;
}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
params
.
set_size
(
num_inputs
,
num_outputs
);
...
...
@@ -82,16 +82,16 @@ namespace dlib
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
output
.
set_size
(
sub
.
get_output
().
num_samples
(),
num_outputs
);
output
=
mat
(
sub
.
get_output
())
*
mat
(
params
);
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
// d1*W*p1 + d2*W*p2
// total gradient = [d1*W; d2*W; d3*W; ...] == D*W
...
...
@@ -116,8 +116,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
fc
=
add_layer
<
fc_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
fc
=
add_layer
<
fc_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -128,20 +128,20 @@ namespace dlib
{
}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
output
.
copy_size
(
sub
.
get_output
());
output
=
lowerbound
(
mat
(
sub
.
get_output
()),
0
);
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
const
float
*
grad
=
gradient_input
.
host
();
const
float
*
in
=
sub
.
get_output
().
host
();
...
...
@@ -163,8 +163,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
relu
=
add_layer
<
relu_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
relu
=
add_layer
<
relu_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -176,8 +176,8 @@ namespace dlib
}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
params
.
set_size
(
1
,
num_inputs
);
...
...
@@ -189,8 +189,8 @@ namespace dlib
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
params
.
size
(),
""
);
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
num_inputs
,
""
);
...
...
@@ -208,8 +208,8 @@ namespace dlib
}
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
params_grad
+=
sum_rows
(
pointwise_multiply
(
mat
(
sub
.
get_output
()),
mat
(
gradient_input
)));
...
...
@@ -230,8 +230,8 @@ namespace dlib
dlib
::
rand
rnd
;
};
template
<
typename
SUB
_
NET
>
using
multiply
=
add_layer
<
multiply_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
multiply
=
add_layer
<
multiply_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/layers_abstract.h
View file @
fa812881
...
...
@@ -12,7 +12,7 @@ namespace dlib
// ----------------------------------------------------------------------------------------
class
SUB
_
NET
class
SUBNET
{
/*!
WHAT THIS OBJECT REPRESENTS
...
...
@@ -35,8 +35,8 @@ namespace dlib
public:
// You aren't allowed to copy subnetworks from inside a layer.
SUB
_
NET
(
const
SUB
_
NET
&
)
=
delete
;
SUB
_
NET
&
operator
=
(
const
SUB
_
NET
&
)
=
delete
;
SUBNET
(
const
SUBNET
&
)
=
delete
;
SUBNET
&
operator
=
(
const
SUBNET
&
)
=
delete
;
const
tensor
&
get_output
(
)
const
;
...
...
@@ -61,21 +61,21 @@ namespace dlib
get_gradient_input().
!*/
const
NEXT_SUB
_
NET
&
sub
_
net
(
const
NEXT_SUBNET
&
subnet
(
)
const
;
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub
_
net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
!*/
NEXT_SUB
_
NET
&
sub
_
net
(
NEXT_SUBNET
&
subnet
(
);
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub
_
net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
!*/
};
...
...
@@ -126,45 +126,45 @@ namespace dlib
allows you to easily convert between related deep neural network types.
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
const
SUB
_
NET
&
sub
const
SUBNET
&
sub
);
/*!
requires
- SUB
_
NET implements the SUB
_
NET interface defined at the top of this file.
- SUBNET implements the SUBNET interface defined at the top of this file.
ensures
- performs any necessary initial memory allocations and/or sets parameters
to their initial values prior to learning. Therefore, calling setup
destroys any previously learned parameters.
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
const
SUB
_
NET
&
sub
,
const
SUBNET
&
sub
,
resizable_tensor
&
output
);
/*!
requires
- SUB
_
NET implements the SUB
_
NET interface defined at the top of this file.
- SUBNET implements the SUBNET interface defined at the top of this file.
- setup() has been called.
ensures
- Runs the output of the subnetwork through this layer and stores the
output into #output. In particular, forward() can use any of the outputs
in sub (e.g. sub.get_output(), sub.sub
_
net().get_output(), etc.) to
in sub (e.g. sub.get_output(), sub.subnet().get_output(), etc.) to
compute whatever it wants.
- #output.num_samples() == sub.get_output().num_samples()
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
SUBNET
&
sub
,
tensor
&
params_grad
);
/*!
requires
- SUB
_
NET implements the SUB
_
NET interface defined at the top of this file.
- SUBNET implements the SUBNET interface defined at the top of this file.
- setup() has been called.
- gradient_input has the same dimensions as the output of forward(sub,output).
- have_same_dimensions(sub.get_gradient_input(), sub.get_output()) == true
...
...
@@ -183,7 +183,7 @@ namespace dlib
- for all valid I:
- DATA_GRADIENT_I == gradient of f(sub,get_layer_params()) with
respect to layer<I>(sub).get_output() (recall that forward() can
draw inputs from the immediate sub layer, sub.sub
_
net(), or
draw inputs from the immediate sub layer, sub.subnet(), or
any earlier layer. So you must consider the gradients with
respect to all inputs drawn from sub)
Finally, backward() adds these gradients into the output by performing:
...
...
@@ -211,8 +211,8 @@ namespace dlib
// For each layer you define, always define an add_layer template so that layers can be
// easily composed. Moreover, the convention is that the layer class ends with an _
// while the add_layer template has the same name but without the trailing _.
template
<
typename
SUB
_
NET
>
using
EXAMPLE_LAYER
=
add_layer
<
EXAMPLE_LAYER_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
EXAMPLE_LAYER
=
add_layer
<
EXAMPLE_LAYER_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
@@ -254,9 +254,9 @@ namespace dlib
- The rest of the dimensions of T will be 1.
!*/
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
);
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
);
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
);
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
/*!
...
...
@@ -265,8 +265,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
fc
=
add_layer
<
fc_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
fc
=
add_layer
<
fc_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -277,9 +277,9 @@ namespace dlib
relu_
(
);
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
);
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
);
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
);
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
/*!
...
...
@@ -288,8 +288,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
relu
=
add_layer
<
relu_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
relu
=
add_layer
<
relu_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss.h
View file @
fa812881
...
...
@@ -43,12 +43,12 @@ namespace dlib
template
<
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
...
...
@@ -83,8 +83,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -95,11 +95,11 @@ namespace dlib
const
static
unsigned
int
sample_expansion_factor
=
1
;
template
<
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
{
return
0
;
...
...
@@ -107,8 +107,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
loss_no_label
=
add_loss_layer
<
loss_no_label_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
loss_no_label
=
add_loss_layer
<
loss_no_label_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss_abstract.h
View file @
fa812881
...
...
@@ -54,7 +54,7 @@ namespace dlib
)
const
;
/*!
requires
- SUB
_
NET implements the SUB
_
NET interface defined at the top of
- SUBNET implements the SUBNET interface defined at the top of
layers_abstract.h.
- sub.get_output().num_samples()%sample_expansion_factor == 0
- All outputs in each layer of sub have the same number of samples. That
...
...
@@ -73,16 +73,16 @@ namespace dlib
template
<
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
;
/*!
requires
- SUB
_
NET implements the SUB
_
NET interface defined at the top of
- SUBNET implements the SUBNET interface defined at the top of
layers_abstract.h.
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
...
...
@@ -114,8 +114,8 @@ namespace dlib
// layers can be easily composed. Moreover, the convention is that the layer class
// ends with an _ while the add_loss_layer template has the same name but without the
// trailing _.
template
<
typename
SUB
_
NET
>
using
EXAMPLE_LOSS_LAYER
=
add_loss_layer
<
EXAMPLE_LOSS_LAYER_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
EXAMPLE_LOSS_LAYER
=
add_loss_layer
<
EXAMPLE_LOSS_LAYER_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
@@ -151,12 +151,12 @@ namespace dlib
template
<
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
;
/*!
This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
...
...
@@ -169,8 +169,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment