Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
b73dacc1
Commit
b73dacc1
authored
May 22, 2016
by
Davis King
Browse files
Fixing tests
parent
7f77ec65
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
22 additions
and
22 deletions
+22
-22
dlib/test/dnn.cpp
dlib/test/dnn.cpp
+22
-22
No files found.
dlib/test/dnn.cpp
View file @
b73dacc1
...
@@ -165,13 +165,13 @@ namespace
...
@@ -165,13 +165,13 @@ namespace
resizable_tensor
running_means
;
resizable_tensor
running_means
;
resizable_tensor
running_variances
;
resizable_tensor
running_variances
;
batch_normalize
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
const
double
scale
=
(
src
.
num_samples
())
/
(
src
.
num_samples
()
-
1.0
);
const
double
scale
=
(
src
.
num_samples
())
/
(
src
.
num_samples
()
-
1.0
);
// Turn back into biased variance estimate because that's how batch_normalize() works, so if we want to match it this is necessary.
// Turn back into biased variance estimate because that's how batch_normalize() works, so if we want to match it this is necessary.
running_variances
=
mat
(
running_variances
)
/
scale
;
running_variances
=
mat
(
running_variances
)
/
scale
;
batch_normalize_inference
(
dest2
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
batch_normalize_inference
(
DEFAULT_BATCH_NORM_EPS
,
dest2
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
DLIB_TEST_MSG
(
max
(
abs
(
mat
(
dest2
)
-
mat
(
dest
)))
<
1e-5
,
max
(
abs
(
mat
(
dest2
)
-
mat
(
dest
))));
DLIB_TEST_MSG
(
max
(
abs
(
mat
(
dest2
)
-
mat
(
dest
)))
<
1e-5
,
max
(
abs
(
mat
(
dest2
)
-
mat
(
dest
))));
cpu
::
batch_normalize_inference
(
dest3
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
cpu
::
batch_normalize_inference
(
DEFAULT_BATCH_NORM_EPS
,
dest3
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
DLIB_TEST_MSG
(
max
(
abs
(
mat
(
dest3
)
-
mat
(
dest
)))
<
1e-5
,
max
(
abs
(
mat
(
dest3
)
-
mat
(
dest
))));
DLIB_TEST_MSG
(
max
(
abs
(
mat
(
dest3
)
-
mat
(
dest
)))
<
1e-5
,
max
(
abs
(
mat
(
dest3
)
-
mat
(
dest
))));
...
@@ -179,7 +179,7 @@ namespace
...
@@ -179,7 +179,7 @@ namespace
auto
f
=
[
&
](
float
eps
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
src
.
host
()[
idx
];
const
float
old
=
src
.
host
()[
idx
];
src
.
host
()[
idx
]
+=
eps
;
src
.
host
()[
idx
]
+=
eps
;
batch_normalize
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
float
result
=
dot
(
gradient_input
,
dest
);
src
.
host
()[
idx
]
=
old
;
src
.
host
()[
idx
]
=
old
;
return
result
;
return
result
;
...
@@ -191,7 +191,7 @@ namespace
...
@@ -191,7 +191,7 @@ namespace
auto
f
=
[
&
](
float
eps
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
gamma
.
host
()[
idx
];
const
float
old
=
gamma
.
host
()[
idx
];
gamma
.
host
()[
idx
]
+=
eps
;
gamma
.
host
()[
idx
]
+=
eps
;
batch_normalize
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
float
result
=
dot
(
gradient_input
,
dest
);
gamma
.
host
()[
idx
]
=
old
;
gamma
.
host
()[
idx
]
=
old
;
return
result
;
return
result
;
...
@@ -203,7 +203,7 @@ namespace
...
@@ -203,7 +203,7 @@ namespace
auto
f
=
[
&
](
float
eps
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
beta
.
host
()[
idx
];
const
float
old
=
beta
.
host
()[
idx
];
beta
.
host
()[
idx
]
+=
eps
;
beta
.
host
()[
idx
]
+=
eps
;
batch_normalize
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
float
result
=
dot
(
gradient_input
,
dest
);
beta
.
host
()[
idx
]
=
old
;
beta
.
host
()[
idx
]
=
old
;
return
result
;
return
result
;
...
@@ -220,7 +220,7 @@ namespace
...
@@ -220,7 +220,7 @@ namespace
gamma_grad
=
8
;
gamma_grad
=
8
;
beta_grad
=
8
;
beta_grad
=
8
;
batch_normalize_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
batch_normalize_gradient
(
DEFAULT_BATCH_NORM_EPS
,
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
auto
grad_error
=
compare_gradients
(
src_grad
,
grad_src
);
auto
grad_error
=
compare_gradients
(
src_grad
,
grad_src
);
dlog
<<
LINFO
<<
"src error: "
<<
grad_error
;
dlog
<<
LINFO
<<
"src error: "
<<
grad_error
;
...
@@ -250,14 +250,14 @@ namespace
...
@@ -250,14 +250,14 @@ namespace
resizable_tensor
running_means
;
resizable_tensor
running_means
;
resizable_tensor
running_variances
;
resizable_tensor
running_variances
;
batch_normalize_conv
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize_conv
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
const
double
scale
=
(
src
.
num_samples
()
*
src
.
nr
()
*
src
.
nc
())
/
(
src
.
num_samples
()
*
src
.
nr
()
*
src
.
nc
()
-
1.0
);
const
double
scale
=
(
src
.
num_samples
()
*
src
.
nr
()
*
src
.
nc
())
/
(
src
.
num_samples
()
*
src
.
nr
()
*
src
.
nc
()
-
1.0
);
// Turn back into biased variance estimate because that's how
// Turn back into biased variance estimate because that's how
// batch_normalize_conv() works, so if we want to match it this is necessary.
// batch_normalize_conv() works, so if we want to match it this is necessary.
running_variances
=
mat
(
running_variances
)
/
scale
;
running_variances
=
mat
(
running_variances
)
/
scale
;
batch_normalize_conv_inference
(
dest2
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
batch_normalize_conv_inference
(
DEFAULT_BATCH_NORM_EPS
,
dest2
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
DLIB_TEST
(
max
(
abs
(
mat
(
dest2
)
-
mat
(
dest
)))
<
1e-5
);
DLIB_TEST
(
max
(
abs
(
mat
(
dest2
)
-
mat
(
dest
)))
<
1e-5
);
cpu
::
batch_normalize_conv_inference
(
dest3
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
cpu
::
batch_normalize_conv_inference
(
DEFAULT_BATCH_NORM_EPS
,
dest3
,
src
,
gamma
,
beta
,
running_means
,
running_variances
);
DLIB_TEST
(
max
(
abs
(
mat
(
dest3
)
-
mat
(
dest
)))
<
1e-5
);
DLIB_TEST
(
max
(
abs
(
mat
(
dest3
)
-
mat
(
dest
)))
<
1e-5
);
...
@@ -265,7 +265,7 @@ namespace
...
@@ -265,7 +265,7 @@ namespace
auto
f
=
[
&
](
float
eps
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
src
.
host
()[
idx
];
const
float
old
=
src
.
host
()[
idx
];
src
.
host
()[
idx
]
+=
eps
;
src
.
host
()[
idx
]
+=
eps
;
batch_normalize_conv
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize_conv
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
float
result
=
dot
(
gradient_input
,
dest
);
src
.
host
()[
idx
]
=
old
;
src
.
host
()[
idx
]
=
old
;
return
result
;
return
result
;
...
@@ -277,7 +277,7 @@ namespace
...
@@ -277,7 +277,7 @@ namespace
auto
f
=
[
&
](
float
eps
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
gamma
.
host
()[
idx
];
const
float
old
=
gamma
.
host
()[
idx
];
gamma
.
host
()[
idx
]
+=
eps
;
gamma
.
host
()[
idx
]
+=
eps
;
batch_normalize_conv
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize_conv
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
float
result
=
dot
(
gradient_input
,
dest
);
gamma
.
host
()[
idx
]
=
old
;
gamma
.
host
()[
idx
]
=
old
;
return
result
;
return
result
;
...
@@ -289,7 +289,7 @@ namespace
...
@@ -289,7 +289,7 @@ namespace
auto
f
=
[
&
](
float
eps
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
beta
.
host
()[
idx
];
const
float
old
=
beta
.
host
()[
idx
];
beta
.
host
()[
idx
]
+=
eps
;
beta
.
host
()[
idx
]
+=
eps
;
batch_normalize_conv
(
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
batch_normalize_conv
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
vars
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
float
result
=
dot
(
gradient_input
,
dest
);
beta
.
host
()[
idx
]
=
old
;
beta
.
host
()[
idx
]
=
old
;
return
result
;
return
result
;
...
@@ -307,7 +307,7 @@ namespace
...
@@ -307,7 +307,7 @@ namespace
gamma_grad
=
9
;
gamma_grad
=
9
;
beta_grad
=
9
;
beta_grad
=
9
;
batch_normalize_conv_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
batch_normalize_conv_gradient
(
DEFAULT_BATCH_NORM_EPS
,
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
auto
grad_error
=
compare_gradients
(
src_grad
,
grad_src
);
auto
grad_error
=
compare_gradients
(
src_grad
,
grad_src
);
...
@@ -888,8 +888,8 @@ namespace
...
@@ -888,8 +888,8 @@ namespace
rnd
.
fill_uniform
(
src
);
rnd
.
fill_uniform
(
src
);
cpu
::
batch_normalize
(
dest
,
means
,
invstds
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
cpu
::
batch_normalize
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
invstds
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
cuda
::
batch_normalize
(
dest2
,
means2
,
invstds2
,
1
,
running_means2
,
running_variances2
,
src
,
gamma
,
beta
);
cuda
::
batch_normalize
(
DEFAULT_BATCH_NORM_EPS
,
dest2
,
means2
,
invstds2
,
1
,
running_means2
,
running_variances2
,
src
,
gamma
,
beta
);
dlog
<<
LINFO
<<
"dest error: "
<<
max
(
abs
(
mat
(
dest
)
-
mat
(
dest2
)));
dlog
<<
LINFO
<<
"dest error: "
<<
max
(
abs
(
mat
(
dest
)
-
mat
(
dest2
)));
dlog
<<
LINFO
<<
"means error: "
<<
max
(
abs
(
mat
(
means
)
-
mat
(
means2
)));
dlog
<<
LINFO
<<
"means error: "
<<
max
(
abs
(
mat
(
means
)
-
mat
(
means2
)));
...
@@ -915,8 +915,8 @@ namespace
...
@@ -915,8 +915,8 @@ namespace
rnd
.
fill_uniform
(
gradient_input
);
rnd
.
fill_uniform
(
gradient_input
);
cpu
::
batch_normalize_gradient
(
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
cpu
::
batch_normalize_gradient
(
DEFAULT_BATCH_NORM_EPS
,
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
cuda
::
batch_normalize_gradient
(
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad2
,
gamma_grad2
,
beta_grad2
);
cuda
::
batch_normalize_gradient
(
DEFAULT_BATCH_NORM_EPS
,
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad2
,
gamma_grad2
,
beta_grad2
);
dlog
<<
LINFO
<<
"src_grad error: "
<<
max
(
abs
(
mat
(
src_grad
)
-
mat
(
src_grad2
)));
dlog
<<
LINFO
<<
"src_grad error: "
<<
max
(
abs
(
mat
(
src_grad
)
-
mat
(
src_grad2
)));
dlog
<<
LINFO
<<
"gamma_grad error: "
<<
max
(
abs
(
mat
(
gamma_grad
)
-
mat
(
gamma_grad2
)));
dlog
<<
LINFO
<<
"gamma_grad error: "
<<
max
(
abs
(
mat
(
gamma_grad
)
-
mat
(
gamma_grad2
)));
...
@@ -942,8 +942,8 @@ namespace
...
@@ -942,8 +942,8 @@ namespace
tt
::
tensor_rand
rnd
;
tt
::
tensor_rand
rnd
;
rnd
.
fill_uniform
(
src
);
rnd
.
fill_uniform
(
src
);
cpu
::
batch_normalize_conv
(
dest
,
means
,
invstds
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
cpu
::
batch_normalize_conv
(
DEFAULT_BATCH_NORM_EPS
,
dest
,
means
,
invstds
,
1
,
running_means
,
running_variances
,
src
,
gamma
,
beta
);
cuda
::
batch_normalize_conv
(
dest2
,
means2
,
invstds2
,
1
,
running_means2
,
running_variances2
,
src
,
gamma
,
beta
);
cuda
::
batch_normalize_conv
(
DEFAULT_BATCH_NORM_EPS
,
dest2
,
means2
,
invstds2
,
1
,
running_means2
,
running_variances2
,
src
,
gamma
,
beta
);
dlog
<<
LINFO
<<
"dest error: "
<<
max
(
abs
(
mat
(
dest
)
-
mat
(
dest2
)));
dlog
<<
LINFO
<<
"dest error: "
<<
max
(
abs
(
mat
(
dest
)
-
mat
(
dest2
)));
dlog
<<
LINFO
<<
"means error: "
<<
max
(
abs
(
mat
(
means
)
-
mat
(
means2
)));
dlog
<<
LINFO
<<
"means error: "
<<
max
(
abs
(
mat
(
means
)
-
mat
(
means2
)));
...
@@ -967,8 +967,8 @@ namespace
...
@@ -967,8 +967,8 @@ namespace
rnd
.
fill_uniform
(
gradient_input
);
rnd
.
fill_uniform
(
gradient_input
);
cpu
::
batch_normalize_conv_gradient
(
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
cpu
::
batch_normalize_conv_gradient
(
DEFAULT_BATCH_NORM_EPS
,
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
cuda
::
batch_normalize_conv_gradient
(
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad2
,
gamma_grad2
,
beta_grad2
);
cuda
::
batch_normalize_conv_gradient
(
DEFAULT_BATCH_NORM_EPS
,
gradient_input
,
means
,
invstds
,
src
,
gamma
,
src_grad2
,
gamma_grad2
,
beta_grad2
);
dlog
<<
LINFO
<<
"src_grad error: "
<<
max
(
abs
(
mat
(
src_grad
)
-
mat
(
src_grad2
)));
dlog
<<
LINFO
<<
"src_grad error: "
<<
max
(
abs
(
mat
(
src_grad
)
-
mat
(
src_grad2
)));
dlog
<<
LINFO
<<
"gamma_grad error: "
<<
max
(
abs
(
mat
(
gamma_grad
)
-
mat
(
gamma_grad2
)));
dlog
<<
LINFO
<<
"gamma_grad error: "
<<
max
(
abs
(
mat
(
gamma_grad
)
-
mat
(
gamma_grad2
)));
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment