Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
5039f0ba
Commit
5039f0ba
authored
Nov 13, 2015
by
Davis King
Browse files
Changed the API for functions that can operate in-place to a more appropriate
form.
parent
c1433b3d
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
90 additions
and
121 deletions
+90
-121
dlib/dnn/cudnn_dlibapi.cpp
dlib/dnn/cudnn_dlibapi.cpp
+25
-31
dlib/dnn/cudnn_dlibapi.h
dlib/dnn/cudnn_dlibapi.h
+25
-38
dlib/dnn/tensor_tools.cpp
dlib/dnn/tensor_tools.cpp
+15
-14
dlib/dnn/tensor_tools.h
dlib/dnn/tensor_tools.h
+25
-38
No files found.
dlib/dnn/cudnn_dlibapi.cpp
View file @
5039f0ba
...
...
@@ -639,24 +639,24 @@ namespace dlib
void
softmax_gradient
(
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
)
{
DLIB_CASSERT
(
have_same_dimensions
(
softmaxed_data
,
gradient_input
)
==
true
&&
have_same_dimensions
(
softmaxed_data
,
grad
)
==
true
,
""
);
if
(
softmaxed_data
.
size
()
==
0
)
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
return
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnSoftmaxBackward
(
context
(),
CUDNN_SOFTMAX_ACCURATE
,
CUDNN_SOFTMAX_MODE_CHANNEL
,
&
alpha
,
descriptor
(
softmaxed_data
),
softmaxed_data
.
device
(),
descriptor
(
dest
),
dest
.
device
(),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
&
beta
,
...
...
@@ -691,19 +691,17 @@ namespace dlib
void
sigmoid_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
)
{
DLIB_CASSERT
(
have_same_dimensions
(
src
,
gradient_input
)
==
true
&&
have_same_dimensions
(
src
,
grad
)
==
true
&&
have_same_dimensions
(
src
,
dest
)
==
true
,
""
);
if
(
src
.
size
()
==
0
)
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
return
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnActivationBackward
(
context
(),
CUDNN_ACTIVATION_SIGMOID
,
&
alpha
,
...
...
@@ -711,8 +709,8 @@ namespace dlib
dest
.
device
(),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
descriptor
(
src
),
src
.
device
(),
descriptor
(
dest
),
dest
.
device
(),
&
beta
,
descriptor
(
grad
),
grad
.
device
()));
...
...
@@ -744,19 +742,17 @@ namespace dlib
void
relu_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
)
{
DLIB_CASSERT
(
have_same_dimensions
(
src
,
gradient_input
)
==
true
&&
have_same_dimensions
(
src
,
grad
)
==
true
&&
have_same_dimensions
(
src
,
dest
)
==
true
,
""
);
if
(
src
.
size
()
==
0
)
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
return
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnActivationBackward
(
context
(),
CUDNN_ACTIVATION_RELU
,
&
alpha
,
...
...
@@ -764,8 +760,8 @@ namespace dlib
dest
.
device
(),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
descriptor
(
src
),
src
.
device
(),
descriptor
(
dest
),
dest
.
device
(),
&
beta
,
descriptor
(
grad
),
grad
.
device
()));
...
...
@@ -797,19 +793,17 @@ namespace dlib
void
tanh_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
)
{
DLIB_CASSERT
(
have_same_dimensions
(
src
,
gradient_input
)
==
true
&&
have_same_dimensions
(
src
,
grad
)
==
true
&&
have_same_dimensions
(
src
,
dest
)
==
true
,
""
);
if
(
src
.
size
()
==
0
)
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
return
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnActivationBackward
(
context
(),
CUDNN_ACTIVATION_TANH
,
&
alpha
,
...
...
@@ -817,8 +811,8 @@ namespace dlib
dest
.
device
(),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
descriptor
(
src
),
src
.
device
(),
descriptor
(
dest
),
dest
.
device
(),
&
beta
,
descriptor
(
grad
),
grad
.
device
()));
...
...
dlib/dnn/cudnn_dlibapi.h
View file @
5039f0ba
...
...
@@ -346,19 +346,18 @@ namespace dlib
void
softmax_gradient
(
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(
softmaxed_data
,gradient_input) == true
- have_same_dimensions(
softmaxed_data
,grad) == true
- is_same_object(grad,
softmaxed_data
)==false
- have_same_dimensions(
dest
,gradient_input) == true
- have_same_dimensions(
dest
,grad) == true
- is_same_object(grad,
dest
)==false
ensures
- We interpret softmaxed_data as the output of softmax(softmaxed_data,SRC)
for some SRC tensor. Then let f(SRC) == dot(gradient_input,softmaxed_data)
Then this function computes the gradient of f() with respect to SRC and
adds it to grad.
- We interpret dest as the output of softmax(dest,SRC) for some SRC tensor.
Then let f(SRC) == dot(gradient_input,dest) Then this function computes
the gradient of f() with respect to SRC and assigns it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
@@ -381,22 +380,18 @@ namespace dlib
void
sigmoid_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling sigmoid(dest,src)
- is_same_object(grad,src) == false
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(dest,grad) == true
- is_same_object(grad,dest) == false
ensures
- Recalling that dest is the output of sigmoid(dest,
src)
,
let f(
src
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
a
dd
s it to grad.
- Recalling that dest is the output of sigmoid(dest,
SRC) for some SRC tensor
,
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
SRC
and
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
@@ -419,22 +414,18 @@ namespace dlib
void
relu_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling relu(dest,src)
- is_same_object(grad,src) == false
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(dest,grad) == true
- is_same_object(grad,dest) == false
ensures
- Recalling that dest is the output of relu(dest,
src)
,
let f(
src
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
a
dd
s it to grad.
- Recalling that dest is the output of relu(dest,
SRC) for some SRC tensor
,
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
SRC
and
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
@@ -457,22 +448,18 @@ namespace dlib
void
tanh_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling tanh(dest,src)
- is_same_object(grad,src) == false
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(dest,grad) == true
- is_same_object(grad,dest) == false
ensures
- Recalling that dest is the output of tanh(dest,
src)
,
let f(
src
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
a
dd
s it to grad.
- Recalling that dest is the output of tanh(dest,
SRC) for some SRC tensor
,
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
SRC
and
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
dlib/dnn/tensor_tools.cpp
View file @
5039f0ba
...
...
@@ -86,7 +86,7 @@ namespace dlib { namespace tt
{
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
==
true
,
""
);
#ifdef DLIB_USE_CUDA
cuda
::
multiply
(
dest
,
src
);
//
cuda::multiply(dest, src);
#else
cpu
::
multiply
(
dest
,
src
);
#endif
...
...
@@ -103,7 +103,7 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
cuda
::
affine_transform
(
dest
,
src
,
A
,
B
);
//
cuda::affine_transform(dest,src,A,B);
#else
cpu
::
affine_transform
(
dest
,
src
,
A
,
B
);
#endif
...
...
@@ -119,7 +119,7 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
cuda
::
affine_transform
(
dest
,
src
,
A
,
B
);
//
cuda::affine_transform(dest,src,A,B);
#else
cpu
::
affine_transform
(
dest
,
src
,
A
,
B
);
#endif
...
...
@@ -137,7 +137,7 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
cuda
::
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
//
cuda::batch_normalize(dest,means,vars,src,gamma,beta);
#else
cpu
::
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
#endif
...
...
@@ -157,8 +157,10 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
/*
cuda::batch_normalize_gradient(gradient_input,means,vars,src,gamma,
src_grad,gamma_grad,beta_grad);
*/
#else
cpu
::
batch_normalize_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
...
...
@@ -177,7 +179,7 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
cuda
::
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
//
cuda::batch_normalize_conv(dest,means,vars,src,gamma,beta);
#else
cpu
::
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
#endif
...
...
@@ -197,8 +199,10 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
/*
cuda::batch_normalize_conv_gradient(gradient_input,means,vars,src,gamma,
src_grad,gamma_grad,beta_grad);
*/
#else
cpu
::
batch_normalize_conv_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
...
...
@@ -213,7 +217,7 @@ namespace dlib { namespace tt
)
{
#ifdef DLIB_USE_CUDA
cuda
::
threshold
(
data
,
thresh
);
//
cuda::threshold(data,thresh);
#else
cpu
::
threshold
(
data
,
thresh
);
#endif
...
...
@@ -417,12 +421,12 @@ namespace dlib { namespace tt
void
softmax_gradient
(
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
)
{
#ifdef DLIB_USE_CUDA
cuda
::
softmax_gradient
(
grad
,
softmaxed_data
,
gradient_input
);
cuda
::
softmax_gradient
(
grad
,
dest
,
gradient_input
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
...
...
@@ -447,12 +451,11 @@ namespace dlib { namespace tt
void
sigmoid_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
)
{
#ifdef DLIB_USE_CUDA
cuda
::
sigmoid_gradient
(
grad
,
dest
,
src
,
gradient_input
);
cuda
::
sigmoid_gradient
(
grad
,
dest
,
gradient_input
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
...
...
@@ -477,12 +480,11 @@ namespace dlib { namespace tt
void
relu_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
)
{
#ifdef DLIB_USE_CUDA
cuda
::
relu_gradient
(
grad
,
dest
,
src
,
gradient_input
);
cuda
::
relu_gradient
(
grad
,
dest
,
gradient_input
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
...
...
@@ -507,12 +509,11 @@ namespace dlib { namespace tt
void
tanh_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
)
{
#ifdef DLIB_USE_CUDA
cuda
::
tanh_gradient
(
grad
,
dest
,
src
,
gradient_input
);
cuda
::
tanh_gradient
(
grad
,
dest
,
gradient_input
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
...
...
dlib/dnn/tensor_tools.h
View file @
5039f0ba
...
...
@@ -519,19 +519,18 @@ namespace dlib { namespace tt
void
softmax_gradient
(
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(
softmaxed_data
,gradient_input) == true
- have_same_dimensions(
softmaxed_data
,grad) == true
- is_same_object(grad,
softmaxed_data
)==false
- have_same_dimensions(
dest
,gradient_input) == true
- have_same_dimensions(
dest
,grad) == true
- is_same_object(grad,
dest
)==false
ensures
- We interpret softmaxed_data as the output of softmax(softmaxed_data,SRC) for
some SRC tensor. Then let f(SRC) == dot(gradient_input,softmaxed_data) Then
this function computes the gradient of f() with respect to SRC and adds it to
grad.
- We interpret dest as the output of softmax(dest,SRC) for some SRC tensor.
Then let f(SRC) == dot(gradient_input,dest) Then this function computes the
gradient of f() with respect to SRC and adds it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
@@ -554,22 +553,18 @@ namespace dlib { namespace tt
void
sigmoid_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling sigmoid(dest,src)
- is_same_object(grad,src) == false
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(dest,grad) == true
- is_same_object(grad,dest) == false
ensures
- Recalling that dest is the output of sigmoid(dest,
src)
,
let f(
src
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
a
dd
s it to grad.
- Recalling that dest is the output of sigmoid(dest,
SRC) for some SRC tensor
,
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
SRC
and
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
@@ -592,22 +587,18 @@ namespace dlib { namespace tt
void
relu_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling relu(dest,src)
- is_same_object(grad,src) == false
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(dest,grad) == true
- is_same_object(grad,dest) == false
ensures
- Recalling that dest is the output of relu(dest,
src)
,
let f(
src
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
adds
it to grad.
- Recalling that dest is the output of relu(dest,
SRC) for some SRC tensor
,
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
SRC
and
assigns
it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
@@ -630,22 +621,18 @@ namespace dlib { namespace tt
void
tanh_gradient
(
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
);
/*!
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling tanh(dest,src)
- is_same_object(grad,src) == false
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(dest,grad) == true
- is_same_object(grad,dest) == false
ensures
- Recalling that dest is the output of tanh(dest,
src)
,
let f(
src
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
adds
it to grad.
- Recalling that dest is the output of tanh(dest,
SRC) for some SRC tensor
,
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
SRC
and
assigns
it to grad.
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
!*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment