Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
8062663c
"tests/vscode:/vscode.git/clone" did not exist on "2ab4d0e10f0bfd23c0940539debba205801340b4"
Commit
8062663c
authored
Dec 08, 2015
by
Davis King
Browse files
Added cpu version of add() and also added new add_bias_gradient() function.
parent
20d46fc5
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
123 additions
and
3 deletions
+123
-3
dlib/dnn/cpu_dlib.cpp
dlib/dnn/cpu_dlib.cpp
+71
-0
dlib/dnn/cpu_dlib.h
dlib/dnn/cpu_dlib.h
+12
-0
dlib/dnn/tensor_tools.cpp
dlib/dnn/tensor_tools.cpp
+17
-2
dlib/dnn/tensor_tools.h
dlib/dnn/tensor_tools.h
+23
-1
No files found.
dlib/dnn/cpu_dlib.cpp
View file @
8062663c
...
...
@@ -68,6 +68,77 @@ namespace dlib
}
}
void
add
(
float
beta
,
tensor
&
dest
,
float
alpha
,
const
tensor
&
src
)
{
DLIB_CASSERT
(
(
dest
.
num_samples
()
==
src
.
num_samples
()
||
src
.
num_samples
()
==
1
)
&&
(
dest
.
nr
()
==
src
.
nr
()
||
src
.
nr
()
==
1
)
&&
(
dest
.
nc
()
==
src
.
nc
()
||
src
.
nc
()
==
1
)
&&
(
dest
.
k
()
==
src
.
k
()
||
src
.
k
()
==
1
)
&&
is_same_object
(
src
,
dest
)
==
false
,
""
);
if
(
beta
==
0
&&
alpha
==
0
)
{
dest
=
0
;
return
;
}
auto
d
=
dest
.
host
();
auto
s
=
src
.
host
();
for
(
long
n
=
0
;
n
<
dest
.
num_samples
();
++
n
)
{
const
auto
sn
=
src
.
num_samples
()
==
1
?
0
:
n
;
for
(
long
k
=
0
;
k
<
dest
.
k
();
++
k
)
{
const
auto
sk
=
src
.
k
()
==
1
?
0
:
k
;
for
(
long
r
=
0
;
r
<
dest
.
nr
();
++
r
)
{
const
auto
sr
=
src
.
nr
()
==
1
?
0
:
r
;
for
(
long
c
=
0
;
c
<
dest
.
nc
();
++
c
)
{
const
auto
sc
=
src
.
nc
()
==
1
?
0
:
c
;
const
auto
s_idx
=
((
sn
*
src
.
k
()
+
sk
)
*
src
.
nr
()
+
sr
)
*
src
.
nc
()
+
sc
;
*
d
=
beta
*
(
*
d
)
+
alpha
*
s
[
s_idx
];
++
d
;
}
}
}
}
}
// ----------------------------------------------------------------------------------------
void
add_bias_gradient
(
tensor
&
grad
,
const
tensor
&
gradient_input
)
{
DLIB_CASSERT
(
grad
.
num_samples
()
==
1
&&
gradient_input
.
k
()
==
grad
.
k
()
&&
gradient_input
.
nr
()
==
grad
.
nr
()
&&
gradient_input
.
nc
()
==
grad
.
nc
()
&&
gradient_input
.
size
()
>
0
,
""
);
auto
out
=
grad
.
host
();
auto
in
=
gradient_input
.
host
();
for
(
size_t
i
=
0
;
i
<
grad
.
size
();
++
i
)
out
[
i
]
=
*
in
++
;
for
(
long
i
=
1
;
i
<
gradient_input
.
num_samples
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
grad
.
size
();
++
i
)
out
[
i
]
+=
*
in
++
;
}
}
// -----------------------------------------------------------------------------------
void
affine_transform
(
...
...
dlib/dnn/cpu_dlib.h
View file @
8062663c
...
...
@@ -26,6 +26,18 @@ namespace dlib
const
tensor
&
src2
);
void
add
(
float
beta
,
tensor
&
dest
,
float
alpha
,
const
tensor
&
src
);
void
add_bias_gradient
(
tensor
&
grad
,
const
tensor
&
gradient_input
);
// -----------------------------------------------------------------------------------
void
affine_transform
(
...
...
dlib/dnn/tensor_tools.cpp
View file @
8062663c
...
...
@@ -259,8 +259,7 @@ namespace dlib { namespace tt
#ifdef DLIB_USE_CUDA
cuda
::
add
(
beta
,
dest
,
alpha
,
src
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
cpu
::
add
(
beta
,
dest
,
alpha
,
src
);
#endif
}
...
...
@@ -279,6 +278,22 @@ namespace dlib { namespace tt
#endif
}
// ----------------------------------------------------------------------------------------
void
add_bias_gradient
(
tensor
&
grad
,
const
tensor
&
gradient_input
)
{
#ifdef DLIB_USE_CUDA
// TODO
DLIB_CASSERT
(
false
,
""
);
//cuda::add_bias_gradient(grad,gradient_input);
#else
cpu
::
add_bias_gradient
(
grad
,
gradient_input
);
#endif
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/tensor_tools.h
View file @
8062663c
...
...
@@ -406,7 +406,29 @@ namespace dlib { namespace tt
- gradient_input.size() > 0
- is_same_object(grad,gradient_input) == false
ensures
- let BIAS be a tensor with all dimensions equal to 1 except for k which is >= 1.
- let BIAS be a tensor with the same dimensions as grad.
- let OUT be the output of add(1,OUT,1,BIAS)
- let f(gradient_input,BIAS) == dot(gradient_input,OUT)
- Then this function computes the gradient of f() with respect to BIAS and
assigns it to grad.
!*/
// ----------------------------------------------------------------------------------------
void
add_bias_gradient
(
tensor
&
grad
,
const
tensor
&
gradient_input
);
/*!
requires
- grad.num_samples() == 1
- gradient_input.k() == grad.k()
- gradient_input.nr() == grad.nr()
- gradient_input.nc() == grad.nc()
- gradient_input.size() > 0
- is_same_object(grad,gradient_input) == false
ensures
- let BIAS be a tensor with the same dimensions as grad.
- let OUT be the output of add(1,OUT,1,BIAS)
- let f(gradient_input,BIAS) == dot(gradient_input,OUT)
- Then this function computes the gradient of f() with respect to BIAS and
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment