Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
1974e68d
Commit
1974e68d
authored
May 27, 2016
by
Fm
Browse files
Removed friend declaration of dnn_tester from core.h
parent
d32bcdfa
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
64 additions
and
69 deletions
+64
-69
dlib/dnn/core.h
dlib/dnn/core.h
+0
-2
dlib/test/dnn.cpp
dlib/test/dnn.cpp
+64
-67
No files found.
dlib/dnn/core.h
View file @
1974e68d
...
@@ -648,7 +648,6 @@ namespace dlib
...
@@ -648,7 +648,6 @@ namespace dlib
friend
class
add_skip_layer
;
friend
class
add_skip_layer
;
template
<
size_t
N
,
template
<
typename
>
class
L
,
typename
S
>
template
<
size_t
N
,
template
<
typename
>
class
L
,
typename
S
>
friend
class
repeat
;
friend
class
repeat
;
friend
class
dnn_tester
;
// Allow copying networks from one to another as long as their corresponding
// Allow copying networks from one to another as long as their corresponding
// layers can be constructed from each other.
// layers can be constructed from each other.
...
@@ -1521,7 +1520,6 @@ namespace dlib
...
@@ -1521,7 +1520,6 @@ namespace dlib
friend
class
add_skip_layer
;
friend
class
add_skip_layer
;
template
<
size_t
N
,
template
<
typename
>
class
L
,
typename
S
>
template
<
size_t
N
,
template
<
typename
>
class
L
,
typename
S
>
friend
class
repeat
;
friend
class
repeat
;
friend
class
dnn_tester
;
// You wouldn't put a tag on a layer if you didn't want to access its forward
// You wouldn't put a tag on a layer if you didn't want to access its forward
// outputs. So this is always true.
// outputs. So this is always true.
...
...
dlib/test/dnn.cpp
View file @
1974e68d
...
@@ -11,11 +11,12 @@
...
@@ -11,11 +11,12 @@
#include "tester.h"
#include "tester.h"
namespace
dlib
namespace
{
{
using
namespace
std
;
using
namespace
test
;
using
namespace
test
;
using
namespace
dlib
;
using
namespace
std
;
logger
dlog
(
"test.dnn"
);
logger
dlog
(
"test.dnn"
);
...
@@ -1186,7 +1187,7 @@ namespace dlib
...
@@ -1186,7 +1187,7 @@ namespace dlib
r
*
stride_y
+
y_offset
,
r
*
stride_y
+
y_offset
,
window_width
,
window_width
,
window_height
)));
window_height
)));
float
err
=
std
::
abs
(
image_plane
(
A
,
s
,
k
)(
r
,
c
)
-
expected
);
float
err
=
abs
(
image_plane
(
A
,
s
,
k
)(
r
,
c
)
-
expected
);
DLIB_TEST_MSG
(
err
<
1e-5
,
err
<<
" "
<<
expected
<<
" "
<<
image_plane
(
A
,
s
,
k
)(
r
,
c
));
DLIB_TEST_MSG
(
err
<
1e-5
,
err
<<
" "
<<
expected
<<
" "
<<
image_plane
(
A
,
s
,
k
)(
r
,
c
));
}
}
}
}
...
@@ -1511,6 +1512,66 @@ namespace dlib
...
@@ -1511,6 +1512,66 @@ namespace dlib
}
}
#endif//DLIB_USE_CUDA
#endif//DLIB_USE_CUDA
template
<
typename
SUBNET
>
using
concat_block1
=
con
<
5
,
1
,
1
,
1
,
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
concat_block2
=
con
<
8
,
3
,
3
,
1
,
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
concat_block3
=
max_pool
<
3
,
3
,
1
,
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
concat_incept
=
inception3
<
concat_block1
,
concat_block2
,
concat_block3
,
SUBNET
>
;
void
test_concat
()
{
using
namespace
dlib
::
tt
;
print_spinner
();
using
net_type
=
concat_incept
<
input
<
matrix
<
float
>>>
;
resizable_tensor
data
(
10
,
1
,
111
,
222
);
data
=
matrix_cast
<
float
>
(
gaussian_randm
(
data
.
num_samples
(),
data
.
k
()
*
data
.
nr
()
*
data
.
nc
(),
1
));
net_type
net
;
auto
&
out
=
net
.
forward
(
data
);
auto
&
b1o
=
layer
<
itag1
>
(
net
).
get_output
();
auto
&
b2o
=
layer
<
itag2
>
(
net
).
get_output
();
auto
&
b3o
=
layer
<
itag3
>
(
net
).
get_output
();
resizable_tensor
dest
(
10
,
14
,
111
,
222
);
copy_tensor
(
dest
,
0
,
b1o
,
0
,
b1o
.
k
());
copy_tensor
(
dest
,
b1o
.
k
(),
b2o
,
0
,
b2o
.
k
());
copy_tensor
(
dest
,
b1o
.
k
()
+
b2o
.
k
(),
b3o
,
0
,
b3o
.
k
());
DLIB_TEST
(
dest
.
size
()
==
out
.
size
());
int
error
=
memcmp
(
dest
.
host
(),
out
.
host
(),
dest
.
size
());
DLIB_TEST
(
error
==
0
);
resizable_tensor
gr
(
10
,
14
,
111
,
222
);
gr
=
matrix_cast
<
float
>
(
gaussian_randm
(
gr
.
num_samples
(),
gr
.
k
()
*
gr
.
nr
()
*
gr
.
nc
(),
1
));
resizable_tensor
params
;
net
.
layer_details
().
backward
(
gr
,
net
,
params
);
auto
&
b1g
=
layer
<
itag1
>
(
net
).
subnet
().
get_gradient_input
();
auto
&
b2g
=
layer
<
itag2
>
(
net
).
subnet
().
get_gradient_input
();
auto
&
b3g
=
layer
<
itag3
>
(
net
).
subnet
().
get_gradient_input
();
resizable_tensor
g1
(
10
,
5
,
111
,
222
);
resizable_tensor
g2
(
10
,
8
,
111
,
222
);
resizable_tensor
g3
(
10
,
1
,
111
,
222
);
copy_tensor
(
g1
,
0
,
gr
,
0
,
g1
.
k
());
copy_tensor
(
g2
,
0
,
gr
,
g1
.
k
(),
g2
.
k
());
copy_tensor
(
g3
,
0
,
gr
,
g1
.
k
()
+
g2
.
k
(),
g3
.
k
());
DLIB_TEST
(
g1
.
size
()
==
b1g
.
size
());
error
=
memcmp
(
g1
.
host
(),
b1g
.
host
(),
b1g
.
size
());
DLIB_TEST
(
error
==
0
);
DLIB_TEST
(
g2
.
size
()
==
b2g
.
size
());
error
=
memcmp
(
g2
.
host
(),
b2g
.
host
(),
b2g
.
size
());
DLIB_TEST
(
error
==
0
);
DLIB_TEST
(
g3
.
size
()
==
b3g
.
size
());
error
=
memcmp
(
g3
.
host
(),
b3g
.
host
(),
b3g
.
size
());
DLIB_TEST
(
error
==
0
);
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
class
dnn_tester
:
public
tester
class
dnn_tester
:
public
tester
...
@@ -1522,8 +1583,6 @@ namespace dlib
...
@@ -1522,8 +1583,6 @@ namespace dlib
"Runs tests on the deep neural network tools."
)
"Runs tests on the deep neural network tools."
)
{}
{}
void
test_concat
();
void
perform_test
(
void
perform_test
(
)
)
{
{
...
@@ -1579,68 +1638,6 @@ namespace dlib
...
@@ -1579,68 +1638,6 @@ namespace dlib
test_concat
();
test_concat
();
}
}
}
a
;
}
a
;
template
<
typename
SUBNET
>
using
concat_block1
=
con
<
5
,
1
,
1
,
1
,
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
concat_block2
=
con
<
8
,
3
,
3
,
1
,
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
concat_block3
=
max_pool
<
3
,
3
,
1
,
1
,
SUBNET
>
;
template
<
typename
SUBNET
>
using
concat_incept
=
inception3
<
concat_block1
,
concat_block2
,
concat_block3
,
SUBNET
>
;
void
dnn_tester
::
test_concat
()
{
using
namespace
dlib
::
tt
;
print_spinner
();
using
net_type
=
concat_incept
<
input
<
matrix
<
float
>>>
;
resizable_tensor
data
(
10
,
1
,
111
,
222
);
data
=
matrix_cast
<
float
>
(
gaussian_randm
(
data
.
num_samples
(),
data
.
k
()
*
data
.
nr
()
*
data
.
nc
(),
1
));
net_type
net
;
auto
&
out
=
net
.
forward
(
data
);
auto
&
b1o
=
layer
<
itag1
>
(
net
).
get_output
();
auto
&
b2o
=
layer
<
itag2
>
(
net
).
get_output
();
auto
&
b3o
=
layer
<
itag3
>
(
net
).
get_output
();
resizable_tensor
dest
(
10
,
14
,
111
,
222
);
copy_tensor
(
dest
,
0
,
b1o
,
0
,
b1o
.
k
());
copy_tensor
(
dest
,
b1o
.
k
(),
b2o
,
0
,
b2o
.
k
());
copy_tensor
(
dest
,
b1o
.
k
()
+
b2o
.
k
(),
b3o
,
0
,
b3o
.
k
());
DLIB_TEST
(
dest
.
size
()
==
out
.
size
());
int
error
=
memcmp
(
dest
.
host
(),
out
.
host
(),
dest
.
size
());
DLIB_TEST
(
error
==
0
);
resizable_tensor
gr
(
10
,
14
,
111
,
222
);
gr
=
matrix_cast
<
float
>
(
gaussian_randm
(
gr
.
num_samples
(),
gr
.
k
()
*
gr
.
nr
()
*
gr
.
nc
(),
1
));
memcpy
(
net
.
get_gradient_input
(),
gr
);
net
.
back_propagate_error
(
data
);
auto
&
b1g
=
layer
<
itag1
>
(
net
).
subnet
().
x_grad
;
auto
&
b2g
=
layer
<
itag2
>
(
net
).
subnet
().
x_grad
;
auto
&
b3g
=
layer
<
itag3
>
(
net
).
subnet
().
x_grad
;
resizable_tensor
g1
(
10
,
5
,
111
,
222
);
resizable_tensor
g2
(
10
,
8
,
111
,
222
);
resizable_tensor
g3
(
10
,
1
,
111
,
222
);
copy_tensor
(
g1
,
0
,
gr
,
0
,
g1
.
k
());
copy_tensor
(
g2
,
0
,
gr
,
g1
.
k
(),
g2
.
k
());
copy_tensor
(
g3
,
0
,
gr
,
g1
.
k
()
+
g2
.
k
(),
g3
.
k
());
DLIB_TEST
(
g1
.
size
()
==
b1g
.
size
());
error
=
memcmp
(
g1
.
host
(),
b1g
.
host
(),
b1g
.
size
());
DLIB_TEST
(
error
==
0
);
DLIB_TEST
(
g2
.
size
()
==
b2g
.
size
());
error
=
memcmp
(
g2
.
host
(),
b2g
.
host
(),
b2g
.
size
());
DLIB_TEST
(
error
==
0
);
DLIB_TEST
(
g3
.
size
()
==
b3g
.
size
());
error
=
memcmp
(
g3
.
host
(),
b3g
.
host
(),
b3g
.
size
());
DLIB_TEST
(
error
==
0
);
}
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment