Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
5ae7a473
Commit
5ae7a473
authored
Nov 08, 2015
by
Davis King
Browse files
Added batch normalization tests
parent
6c05ff45
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
226 additions
and
5 deletions
+226
-5
dlib/test/CMakeLists.txt
dlib/test/CMakeLists.txt
+12
-5
dlib/test/dnn.cpp
dlib/test/dnn.cpp
+214
-0
No files found.
dlib/test/CMakeLists.txt
View file @
5ae7a473
...
@@ -5,6 +5,12 @@
...
@@ -5,6 +5,12 @@
cmake_minimum_required
(
VERSION 2.8.4
)
cmake_minimum_required
(
VERSION 2.8.4
)
# create a variable called target_name and set it to the string "dtest"
set
(
target_name dtest
)
PROJECT
(
${
target_name
}
)
include
(
../cmake
)
# This variable contains a list of all the tests we are building
# This variable contains a list of all the tests we are building
# into the regression test suite.
# into the regression test suite.
set
(
tests
set
(
tests
...
@@ -144,10 +150,13 @@ set (tests
...
@@ -144,10 +150,13 @@ set (tests
vectorstream.cpp
vectorstream.cpp
)
)
# create a variable called target_name and set it to the string "dtest"
# Tests that require C++11 support
set
(
target_name dtest
)
if
(
COMPILER_CAN_DO_CPP_11
)
set
(
tests
${
tests
}
dnn.cpp
)
endif
()
PROJECT
(
${
target_name
}
)
# add all the cpp files we want to compile to this list. This tells
# add all the cpp files we want to compile to this list. This tells
# cmake that they are part of our target (which is the executable named dtest)
# cmake that they are part of our target (which is the executable named dtest)
...
@@ -159,8 +168,6 @@ if (CMAKE_COMPILER_IS_GNUCXX)
...
@@ -159,8 +168,6 @@ if (CMAKE_COMPILER_IS_GNUCXX)
endif
()
endif
()
# Tell cmake to link our target executable to dlib.
include
(
../cmake
)
TARGET_LINK_LIBRARIES
(
${
target_name
}
dlib
)
TARGET_LINK_LIBRARIES
(
${
target_name
}
dlib
)
...
...
dlib/test/dnn.cpp
0 → 100644
View file @
5ae7a473
// Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include <sstream>
#include <string>
#include <cstdlib>
#include <ctime>
#include <vector>
#include "../dnn.h"
#include "tester.h"
namespace
{
using
namespace
test
;
using
namespace
dlib
;
using
namespace
dlib
::
cpu
;
using
namespace
std
;
logger
dlog
(
"test.dnn"
);
// ----------------------------------------------------------------------------------------
template
<
typename
T
>
float
compare_gradients
(
const
tensor
&
t
,
T
grad
)
{
float
max_error
=
0
;
auto
p
=
t
.
host
();
for
(
size_t
i
=
0
;
i
<
t
.
size
();
++
i
)
{
max_error
=
std
::
max
(
max_error
,
std
::
abs
(
p
[
i
]
-
grad
(
i
)));
}
return
max_error
;
}
void
test_batch_normalize
()
{
resizable_tensor
src
(
5
,
5
),
gamma
(
1
,
5
),
beta
(
1
,
5
),
dest
,
means
,
vars
,
gradient_input
(
5
,
5
);
src
=
matrix_cast
<
float
>
(
gaussian_randm
(
5
,
5
,
0
));
gamma
=
matrix_cast
<
float
>
(
gaussian_randm
(
1
,
5
,
1
));
beta
=
matrix_cast
<
float
>
(
gaussian_randm
(
1
,
5
,
2
));
gradient_input
=
matrix_cast
<
float
>
(
gaussian_randm
(
5
,
5
,
3
));
gamma
=
1
;
beta
=
0
;
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
auto
grad_src
=
[
&
](
long
idx
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
src
.
host
()[
idx
];
src
.
host
()[
idx
]
+=
eps
;
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
src
.
host
()[
idx
]
=
old
;
return
result
;
};
const
float
eps
=
0.01
;
return
(
f
(
+
eps
)
-
f
(
-
eps
))
/
(
2
*
eps
);
};
auto
grad_gamma
=
[
&
](
long
idx
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
gamma
.
host
()[
idx
];
gamma
.
host
()[
idx
]
+=
eps
;
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
gamma
.
host
()[
idx
]
=
old
;
return
result
;
};
const
float
eps
=
0.01
;
return
(
f
(
+
eps
)
-
f
(
-
eps
))
/
(
2
*
eps
);
};
auto
grad_beta
=
[
&
](
long
idx
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
beta
.
host
()[
idx
];
beta
.
host
()[
idx
]
+=
eps
;
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
beta
.
host
()[
idx
]
=
old
;
return
result
;
};
const
float
eps
=
0.01
;
return
(
f
(
+
eps
)
-
f
(
-
eps
))
/
(
2
*
eps
);
};
resizable_tensor
src_grad
,
gamma_grad
,
beta_grad
;
src_grad
.
copy_size
(
src
);
gamma_grad
.
copy_size
(
gamma
);
beta_grad
.
copy_size
(
beta
);
src_grad
=
0
;
gamma_grad
=
0
;
beta_grad
=
0
;
batch_normalize_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
auto
grad_error
=
compare_gradients
(
src_grad
,
grad_src
);
dlog
<<
LINFO
<<
"src error: "
<<
grad_error
;
DLIB_TEST
(
grad_error
<
0.001
);
grad_error
=
compare_gradients
(
gamma_grad
,
grad_gamma
);
dlog
<<
LINFO
<<
"gamma error: "
<<
grad_error
;
DLIB_TEST
(
grad_error
<
0.001
);
grad_error
=
compare_gradients
(
beta_grad
,
grad_beta
);
dlog
<<
LINFO
<<
"beta error: "
<<
grad_error
;
DLIB_TEST
(
grad_error
<
0.001
);
}
void
test_batch_normalize_conv
()
{
resizable_tensor
src
(
5
,
5
,
4
,
4
),
gamma
(
1
,
5
),
beta
(
1
,
5
),
dest
,
means
,
vars
,
gradient_input
(
5
,
5
,
4
,
4
);
src
=
matrix_cast
<
float
>
(
gaussian_randm
(
5
,
5
*
4
*
4
,
0
));
gamma
=
matrix_cast
<
float
>
(
gaussian_randm
(
1
,
5
,
1
));
beta
=
matrix_cast
<
float
>
(
gaussian_randm
(
1
,
5
,
2
));
gradient_input
=
matrix_cast
<
float
>
(
gaussian_randm
(
5
,
5
*
4
*
4
,
3
));
gamma
=
1
;
beta
=
0
;
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
auto
grad_src
=
[
&
](
long
idx
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
src
.
host
()[
idx
];
src
.
host
()[
idx
]
+=
eps
;
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
src
.
host
()[
idx
]
=
old
;
return
result
;
};
const
float
eps
=
0.01
;
return
(
f
(
+
eps
)
-
f
(
-
eps
))
/
(
2
*
eps
);
};
auto
grad_gamma
=
[
&
](
long
idx
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
gamma
.
host
()[
idx
];
gamma
.
host
()[
idx
]
+=
eps
;
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
gamma
.
host
()[
idx
]
=
old
;
return
result
;
};
const
float
eps
=
0.01
;
return
(
f
(
+
eps
)
-
f
(
-
eps
))
/
(
2
*
eps
);
};
auto
grad_beta
=
[
&
](
long
idx
)
{
auto
f
=
[
&
](
float
eps
)
{
const
float
old
=
beta
.
host
()[
idx
];
beta
.
host
()[
idx
]
+=
eps
;
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
float
result
=
dot
(
gradient_input
,
dest
);
beta
.
host
()[
idx
]
=
old
;
return
result
;
};
const
float
eps
=
0.01
;
return
(
f
(
+
eps
)
-
f
(
-
eps
))
/
(
2
*
eps
);
};
resizable_tensor
src_grad
,
gamma_grad
,
beta_grad
;
src_grad
.
copy_size
(
src
);
gamma_grad
.
copy_size
(
gamma
);
beta_grad
.
copy_size
(
beta
);
src_grad
=
0
;
gamma_grad
=
0
;
beta_grad
=
0
;
batch_normalize_conv_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
auto
grad_error
=
compare_gradients
(
src_grad
,
grad_src
);
dlog
<<
LINFO
<<
"src error: "
<<
grad_error
;
DLIB_TEST
(
grad_error
<
0.001
);
grad_error
=
compare_gradients
(
gamma_grad
,
grad_gamma
);
dlog
<<
LINFO
<<
"gamma error: "
<<
grad_error
;
DLIB_TEST
(
grad_error
<
0.001
);
grad_error
=
compare_gradients
(
beta_grad
,
grad_beta
);
dlog
<<
LINFO
<<
"beta error: "
<<
grad_error
;
DLIB_TEST
(
grad_error
<
0.001
);
}
// ----------------------------------------------------------------------------------------
class
dnn_tester
:
public
tester
{
public:
dnn_tester
(
)
:
tester
(
"test_dnn"
,
"Runs tests on the deep neural network tools."
)
{}
void
perform_test
(
)
{
test_batch_normalize
();
test_batch_normalize_conv
();
}
}
a
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment