Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
10fe74c1
Commit
10fe74c1
authored
Nov 30, 2015
by
Davis King
Browse files
Cleaned up the tensor code a bit and also added a tool for making aliased
tensors.
parent
2f7898dc
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
296 additions
and
132 deletions
+296
-132
dlib/dnn/tensor.h
dlib/dnn/tensor.h
+203
-121
dlib/dnn/tensor_abstract.h
dlib/dnn/tensor_abstract.h
+93
-11
No files found.
dlib/dnn/tensor.h
View file @
10fe74c1
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
#include "../matrix.h"
#include "../matrix.h"
#include "cudnn_dlibapi.h"
#include "cudnn_dlibapi.h"
#include "gpu_data.h"
#include "gpu_data.h"
#include <memory>
namespace
dlib
namespace
dlib
{
{
...
@@ -15,25 +16,21 @@ namespace dlib
...
@@ -15,25 +16,21 @@ namespace dlib
class
tensor
class
tensor
{
{
/*!
WHAT THIS OBJECT REPRESENTS
!*/
public:
public:
tensor
(
tensor
(
)
:
)
:
m_n
(
0
),
m_k
(
0
),
m_nr
(
0
),
m_nc
(
0
)
m_n
(
0
),
m_k
(
0
),
m_nr
(
0
),
m_nc
(
0
)
,
m_size
(
0
)
{
{
}
}
inline
virtual
~
tensor
()
=
0
;
virtual
~
tensor
()
{}
long
num_samples
()
const
{
return
m_n
;
}
long
num_samples
()
const
{
return
m_n
;
}
long
k
()
const
{
return
m_k
;
}
long
k
()
const
{
return
m_k
;
}
long
nr
()
const
{
return
m_nr
;
}
long
nr
()
const
{
return
m_nr
;
}
long
nc
()
const
{
return
m_nc
;
}
long
nc
()
const
{
return
m_nc
;
}
size_t
size
()
const
{
return
data
.
size
()
;
}
size_t
size
()
const
{
return
m_
size
;
}
typedef
float
*
iterator
;
typedef
float
*
iterator
;
typedef
const
float
*
const_iterator
;
typedef
const
float
*
const_iterator
;
...
@@ -44,13 +41,13 @@ namespace dlib
...
@@ -44,13 +41,13 @@ namespace dlib
void
async_copy_to_device
()
const
void
async_copy_to_device
()
const
{
{
data
.
async_copy_to_device
();
data
()
.
async_copy_to_device
();
}
}
const
float
*
host
()
const
{
return
data
.
host
();
}
virtual
const
float
*
host
()
const
=
0
;
float
*
host
()
{
return
data
.
host
()
;
}
virtual
float
*
host
()
=
0
;
const
float
*
device
()
const
{
return
data
.
device
();
}
virtual
const
float
*
device
()
const
=
0
;
float
*
device
()
{
return
data
.
device
();
}
virtual
float
*
device
()
=
0
;
tensor
&
operator
=
(
float
val
)
tensor
&
operator
=
(
float
val
)
{
{
...
@@ -59,15 +56,15 @@ namespace dlib
...
@@ -59,15 +56,15 @@ namespace dlib
// the GPU. So unless you seem to be actively working with the host side's
// the GPU. So unless you seem to be actively working with the host side's
// data then we do this initialization on the device side since this avoids a
// data then we do this initialization on the device side since this avoids a
// host to device transfer that would likely immediately follow.
// host to device transfer that would likely immediately follow.
if
(
data
.
device_ready
())
if
(
data
()
.
device_ready
())
{
{
cuda
::
set_tensor
(
*
this
,
val
);
cuda
::
set_tensor
(
*
this
,
val
);
return
*
this
;
return
*
this
;
}
}
#endif
#endif
auto
d
=
data
.
host
();
for
(
auto
&
d
:
*
this
)
for
(
size_t
i
=
0
;
i
<
data
.
size
();
++
i
)
d
=
val
;
d
[
i
]
=
val
;
return
*
this
;
return
*
this
;
}
}
...
@@ -77,9 +74,9 @@ namespace dlib
...
@@ -77,9 +74,9 @@ namespace dlib
cuda
::
scale_tensor
(
*
this
,
val
);
cuda
::
scale_tensor
(
*
this
,
val
);
return
*
this
;
return
*
this
;
#else
#else
auto
d
=
data
.
host
();
for
(
auto
&
d
:
*
this
)
for
(
size_t
i
=
0
;
i
<
data
.
size
();
++
i
)
d
*=
val
;
d
[
i
]
*=
val
;
return
*
this
;
return
*
this
;
#endif
#endif
}
}
...
@@ -98,7 +95,7 @@ namespace dlib
...
@@ -98,7 +95,7 @@ namespace dlib
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
data
.
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
=
item
;
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
=
item
;
return
*
this
;
return
*
this
;
}
}
...
@@ -109,7 +106,7 @@ namespace dlib
...
@@ -109,7 +106,7 @@ namespace dlib
nr
()
*
nc
()
*
k
()
==
item
.
nc
(),
""
);
nr
()
*
nc
()
*
k
()
==
item
.
nc
(),
""
);
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
data
.
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
+=
item
;
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
+=
item
;
return
*
this
;
return
*
this
;
}
}
...
@@ -120,7 +117,7 @@ namespace dlib
...
@@ -120,7 +117,7 @@ namespace dlib
nr
()
*
nc
()
*
k
()
==
item
.
nc
(),
""
);
nr
()
*
nc
()
*
k
()
==
item
.
nc
(),
""
);
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
data
.
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
-=
item
;
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
-=
item
;
return
*
this
;
return
*
this
;
}
}
...
@@ -134,7 +131,7 @@ namespace dlib
...
@@ -134,7 +131,7 @@ namespace dlib
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
(),
""
);
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
(),
""
);
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
data
.
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
=
item
;
set_ptrm
(
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
=
item
;
}
}
...
@@ -148,70 +145,30 @@ namespace dlib
...
@@ -148,70 +145,30 @@ namespace dlib
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
(),
""
);
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
(),
""
);
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
data
.
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
+=
item
;
set_ptrm
(
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
+=
item
;
}
}
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
const
cuda
::
tensor_descriptor
&
get_cudnn_tensor_descriptor
(
virtual
const
cuda
::
tensor_descriptor
&
get_cudnn_tensor_descriptor
(
)
const
{
return
cudnn_descriptor
;
}
)
const
;
#endif
#endif
protected:
protected:
tensor
&
operator
=
(
const
tensor
&
item
)
friend
class
alias_tensor
;
{
m_n
=
item
.
m_n
;
m_k
=
item
.
m_k
;
m_nr
=
item
.
m_nr
;
m_nc
=
item
.
m_nc
;
data
.
set_size
(
item
.
data
.
size
());
std
::
memcpy
(
data
.
host
(),
item
.
data
.
host
(),
data
.
size
()
*
sizeof
(
float
));
#ifdef DLIB_USE_CUDA
cudnn_descriptor
.
set_size
(
m_n
,
m_k
,
m_nr
,
m_nc
);
#endif
return
*
this
;
}
tensor
(
const
tensor
&
item
)
{
*
this
=
item
;
}
tensor
(
tensor
&&
item
)
:
tensor
()
{
swap
(
item
);
}
tensor
&
operator
=
(
tensor
&&
item
)
{
swap
(
item
);
return
*
this
;
}
void
swap
(
tensor
&
item
)
{
std
::
swap
(
m_n
,
item
.
m_n
);
std
::
swap
(
m_k
,
item
.
m_k
);
std
::
swap
(
m_nr
,
item
.
m_nr
);
std
::
swap
(
m_nc
,
item
.
m_nc
);
std
::
swap
(
data
,
item
.
data
);
#ifdef DLIB_USE_CUDA
std
::
swap
(
cudnn_descriptor
,
item
.
cudnn_descriptor
);
#endif
}
virtual
gpu_data
&
data
()
=
0
;
virtual
const
gpu_data
&
data
()
const
=
0
;
virtual
size_t
get_alias_offset
()
const
{
return
0
;
}
// needed by alias_tensor.
long
m_n
;
long
m_n
;
long
m_k
;
long
m_k
;
long
m_nr
;
long
m_nr
;
long
m_nc
;
long
m_nc
;
gpu_data
data
;
long
m_size
;
// always equal to m_n*m_k*m_nr*m_nc
#ifdef DLIB_USE_CUDA
cuda
::
tensor_descriptor
cudnn_descriptor
;
#endif
};
};
tensor
::~
tensor
()
{
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
inline
const
matrix_op
<
op_pointer_to_mat
<
float
>
>
mat
(
inline
const
matrix_op
<
op_pointer_to_mat
<
float
>
>
mat
(
...
@@ -299,11 +256,29 @@ namespace dlib
...
@@ -299,11 +256,29 @@ namespace dlib
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
)
{
{
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
,
""
);
set_size
(
n_
,
k_
,
nr_
,
nc_
);
set_size
(
n_
,
k_
,
nr_
,
nc_
);
}
}
resizable_tensor
(
const
resizable_tensor
&
)
=
default
;
resizable_tensor
(
const
resizable_tensor
&
item
)
resizable_tensor
(
resizable_tensor
&&
)
=
default
;
{
copy_size
(
item
);
std
::
memcpy
(
data_instance
.
host
(),
item
.
host
(),
data_instance
.
size
()
*
sizeof
(
float
));
}
resizable_tensor
(
const
tensor
&
item
)
{
copy_size
(
item
);
std
::
memcpy
(
data_instance
.
host
(),
item
.
host
(),
data_instance
.
size
()
*
sizeof
(
float
));
}
resizable_tensor
(
resizable_tensor
&&
item
)
{
swap
(
item
);
}
resizable_tensor
&
operator
=
(
resizable_tensor
&&
item
)
{
swap
(
item
);
return
*
this
;
}
virtual
const
float
*
host
()
const
{
return
data_instance
.
host
();
}
virtual
float
*
host
()
{
return
data_instance
.
host
();
}
virtual
const
float
*
device
()
const
{
return
data_instance
.
device
();
}
virtual
float
*
device
()
{
return
data_instance
.
device
();
}
void
clear
(
void
clear
(
)
)
...
@@ -314,10 +289,6 @@ namespace dlib
...
@@ -314,10 +289,6 @@ namespace dlib
void
copy_size
(
void
copy_size
(
const
tensor
&
item
const
tensor
&
item
)
)
/*!
ensures
- resizes *this so that: have_same_dimensions(#*this, item)==true
!*/
{
{
set_size
(
item
.
num_samples
(),
item
.
k
(),
item
.
nr
(),
item
.
nc
());
set_size
(
item
.
num_samples
(),
item
.
k
(),
item
.
nr
(),
item
.
nc
());
}
}
...
@@ -335,60 +306,67 @@ namespace dlib
...
@@ -335,60 +306,67 @@ namespace dlib
return
*
this
;
return
*
this
;
}
}
template
<
typename
EXP
>
resizable_tensor
&
operator
+=
(
const
matrix_exp
<
EXP
>&
item
)
void
set_size
(
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
{
{
tensor
::
operator
+=
(
item
);
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
,
""
);
return
*
this
;
m_n
=
n_
;
m_k
=
k_
;
m_nr
=
nr_
;
m_nc
=
nc_
;
m_size
=
n_
*
k_
*
nr_
*
nc_
;
data_instance
.
set_size
(
m_size
);
#ifdef DLIB_USE_CUDA
cudnn_descriptor
.
set_size
(
m_n
,
m_k
,
m_nr
,
m_nc
);
#endif
}
}
template
<
typename
EXP
>
resizable_tensor
&
operator
-
=
(
const
matrix_exp
<
EXP
>
&
item
)
resizable_tensor
&
operator
=
(
const
resizable_tensor
&
item
)
{
{
tensor
::
operator
-=
(
item
);
resizable_tensor
temp
(
item
);
temp
.
swap
(
*
this
);
return
*
this
;
return
*
this
;
}
}
template
<
typename
EXP
>
resizable_tensor
&
operator
=
(
const
tensor
&
item
)
void
set_sample
(
unsigned
long
idx
,
const
matrix_exp
<
EXP
>&
item
)
{
{
tensor
::
set_sample
(
idx
,
item
);
resizable_tensor
temp
(
item
);
temp
.
swap
(
*
this
);
return
*
this
;
}
}
template
<
typename
EXP
>
void
add_to_sample
(
void
swap
(
resizable_tensor
&
item
)
unsigned
long
idx
,
const
matrix_exp
<
EXP
>&
item
)
{
{
tensor
::
add_to_sample
(
idx
,
item
);
std
::
swap
(
m_n
,
item
.
m_n
);
std
::
swap
(
m_k
,
item
.
m_k
);
std
::
swap
(
m_nr
,
item
.
m_nr
);
std
::
swap
(
m_nc
,
item
.
m_nc
);
std
::
swap
(
m_size
,
item
.
m_size
);
std
::
swap
(
data_instance
,
item
.
data_instance
);
#ifdef DLIB_USE_CUDA
std
::
swap
(
cudnn_descriptor
,
item
.
cudnn_descriptor
);
#endif
}
}
resizable_tensor
&
operator
=
(
const
resizable_tensor
&
)
=
default
;
#ifdef DLIB_USE_CUDA
resizable_tensor
&
operator
=
(
resizable_tensor
&&
)
=
default
;
virtual
const
cuda
::
tensor_descriptor
&
get_cudnn_tensor_descriptor
(
)
const
{
return
cudnn_descriptor
;
}
#endif
resizable_tensor
&
operator
=
(
const
tensor
&
x
)
private:
{
tensor
::
operator
=
(
x
);
return
*
this
;
}
void
set_size
(
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
{
m_n
=
n_
;
m_k
=
k_
;
m_nr
=
nr_
;
m_nc
=
nc_
;
data
.
set_size
(
m_n
*
m_k
*
m_nr
*
m_nc
);
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cudnn_descriptor
.
set_size
(
m_n
,
m_k
,
m_nr
,
m_nc
)
;
cuda
::
tensor_descriptor
cudnn_descriptor
;
#endif
#endif
}
gpu_data
data_instance
;
virtual
gpu_data
&
data
()
{
return
data_instance
;
}
virtual
const
gpu_data
&
data
()
const
{
return
data_instance
;
}
};
};
inline
void
serialize
(
const
tensor
&
item
,
std
::
ostream
&
out
)
inline
void
serialize
(
const
tensor
&
item
,
std
::
ostream
&
out
)
...
@@ -399,9 +377,8 @@ namespace dlib
...
@@ -399,9 +377,8 @@ namespace dlib
serialize
(
item
.
k
(),
out
);
serialize
(
item
.
k
(),
out
);
serialize
(
item
.
nr
(),
out
);
serialize
(
item
.
nr
(),
out
);
serialize
(
item
.
nc
(),
out
);
serialize
(
item
.
nc
(),
out
);
auto
data
=
item
.
host
();
for
(
auto
&
d
:
item
)
for
(
size_t
i
=
0
;
i
<
item
.
size
();
++
i
)
serialize
(
d
,
out
);
serialize
(
data
[
i
],
out
);
}
}
inline
void
deserialize
(
resizable_tensor
&
item
,
std
::
istream
&
in
)
inline
void
deserialize
(
resizable_tensor
&
item
,
std
::
istream
&
in
)
...
@@ -417,9 +394,8 @@ namespace dlib
...
@@ -417,9 +394,8 @@ namespace dlib
deserialize
(
nr
,
in
);
deserialize
(
nr
,
in
);
deserialize
(
nc
,
in
);
deserialize
(
nc
,
in
);
item
.
set_size
(
num_samples
,
k
,
nr
,
nc
);
item
.
set_size
(
num_samples
,
k
,
nr
,
nc
);
auto
data
=
item
.
host
();
for
(
auto
&
d
:
item
)
for
(
size_t
i
=
0
;
i
<
item
.
size
();
++
i
)
deserialize
(
d
,
in
);
deserialize
(
data
[
i
],
in
);
}
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -439,6 +415,112 @@ namespace dlib
...
@@ -439,6 +415,112 @@ namespace dlib
return
sum
;
return
sum
;
}
}
// ----------------------------------------------------------------------------------------
class
alias_tensor_instance
:
public
tensor
{
alias_tensor_instance
(
)
:
data_instance
(
0
),
data_offset
(
0
)
{}
public:
friend
class
alias_tensor
;
alias_tensor_instance
&
operator
=
(
float
val
)
{
tensor
::
operator
=
(
val
);
return
*
this
;
}
template
<
typename
EXP
>
alias_tensor_instance
&
operator
=
(
const
matrix_exp
<
EXP
>&
item
)
{
tensor
::
operator
=
(
item
);
return
*
this
;
}
virtual
const
float
*
host
()
const
{
return
data_instance
->
host
()
+
data_offset
;
}
virtual
float
*
host
()
{
return
data_instance
->
host
()
+
data_offset
;
}
virtual
const
float
*
device
()
const
{
return
data_instance
->
device
()
+
data_offset
;
}
virtual
float
*
device
()
{
return
data_instance
->
device
()
+
data_offset
;
}
#ifdef DLIB_USE_CUDA
virtual
const
cuda
::
tensor_descriptor
&
get_cudnn_tensor_descriptor
(
)
const
{
return
*
cudnn_descriptor
;
}
#endif
private:
virtual
size_t
get_alias_offset
()
const
{
return
data_offset
;
}
#ifdef DLIB_USE_CUDA
std
::
shared_ptr
<
cuda
::
tensor_descriptor
>
cudnn_descriptor
;
#endif
gpu_data
*
data_instance
;
size_t
data_offset
;
virtual
gpu_data
&
data
()
{
return
*
data_instance
;
}
virtual
const
gpu_data
&
data
()
const
{
return
*
data_instance
;
}
};
class
alias_tensor
{
public:
alias_tensor
(
)
{}
alias_tensor
(
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
{
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
,
""
);
inst
.
m_n
=
n_
;
inst
.
m_k
=
k_
;
inst
.
m_nr
=
nr_
;
inst
.
m_nc
=
nc_
;
inst
.
m_size
=
n_
*
k_
*
nr_
*
nc_
;
}
long
num_samples
(
)
const
{
return
inst
.
m_n
;
}
long
k
(
)
const
{
return
inst
.
m_k
;
}
long
nr
(
)
const
{
return
inst
.
m_nr
;
}
long
nc
(
)
const
{
return
inst
.
m_nc
;
}
size_t
size
(
)
const
{
return
inst
.
m_size
;
}
alias_tensor_instance
operator
()
(
tensor
&
t
,
size_t
offset
)
{
DLIB_CASSERT
(
offset
+
size
()
<=
t
.
size
(),
""
);
#ifdef DLIB_USE_CUDA
if
(
!
inst
.
cudnn_descriptor
)
{
inst
.
cudnn_descriptor
=
std
::
make_shared
<
cuda
::
tensor_descriptor
>
();
inst
.
cudnn_descriptor
->
set_size
(
inst
.
m_n
,
inst
.
m_k
,
inst
.
m_nr
,
inst
.
m_nc
);
}
#endif
inst
.
data_instance
=
&
t
.
data
();
// Note that t might already be an aliasing tensor so we need to take that into
// account.
inst
.
data_offset
=
t
.
get_alias_offset
()
+
offset
;
return
inst
;
}
private:
alias_tensor_instance
inst
;
};
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
}
}
...
...
dlib/dnn/tensor_abstract.h
View file @
10fe74c1
...
@@ -43,9 +43,7 @@ namespace dlib
...
@@ -43,9 +43,7 @@ namespace dlib
public:
public:
// A tensor is an abstract type. Therefore, you use the resizable_tensor object
virtual
~
tensor
();
// below to create tensor instances.
virtual
~
tensor
()
=
0
;
long
num_samples
(
long
num_samples
(
)
const
;
)
const
;
...
@@ -109,8 +107,8 @@ namespace dlib
...
@@ -109,8 +107,8 @@ namespace dlib
- makes a tensor iterable just like the STL containers.
- makes a tensor iterable just like the STL containers.
!*/
!*/
const
float
*
host
(
virtual
const
float
*
host
(
)
const
;
)
const
=
0
;
/*!
/*!
ensures
ensures
- returns a pointer to the host memory block of size() contiguous float
- returns a pointer to the host memory block of size() contiguous float
...
@@ -120,8 +118,8 @@ namespace dlib
...
@@ -120,8 +118,8 @@ namespace dlib
the call to host() blocks.
the call to host() blocks.
!*/
!*/
float
*
host
(
virtual
float
*
host
(
);
)
=
0
;
/*!
/*!
ensures
ensures
- returns a pointer to the host memory block of size() contiguous float
- returns a pointer to the host memory block of size() contiguous float
...
@@ -135,8 +133,8 @@ namespace dlib
...
@@ -135,8 +133,8 @@ namespace dlib
calling host().
calling host().
!*/
!*/
const
float
*
device
(
virtual
const
float
*
device
(
)
const
;
)
const
=
0
;
/*!
/*!
requires
requires
- DLIB_USE_CUDA is #defined
- DLIB_USE_CUDA is #defined
...
@@ -148,8 +146,8 @@ namespace dlib
...
@@ -148,8 +146,8 @@ namespace dlib
the call to device() blocks.
the call to device() blocks.
!*/
!*/
float
*
device
(
virtual
float
*
device
(
);
)
=
0
;
/*!
/*!
requires
requires
- DLIB_USE_CUDA is #defined
- DLIB_USE_CUDA is #defined
...
@@ -445,6 +443,90 @@ namespace dlib
...
@@ -445,6 +443,90 @@ namespace dlib
!*/
!*/
// ----------------------------------------------------------------------------------------
class
alias_tensor_instance
:
public
tensor
{
/*!
WHAT THIS OBJECT REPRESENTS
This object is a tensor that aliases another tensor. That is, it doesn't
have its own block of memory but instead simply holds pointers to the
memory of another tensor object.
!*/
// You can't default initialize this object. You can only get instances of it from
// alias_tensor::operator().
alias_tensor_instance
(
);
};
class
alias_tensor
{
/*!
WHAT THIS OBJECT REPRESENTS
This is a tool for creating tensor objects that alias other tensor objects.
That is, it allows you to make a tensor that references the memory space of
another tensor object rather than owning its own memory. This allows you
to do things like interpret a single tensor in different ways or even as a
group of multiple tensors.
!*/
public:
alias_tensor
(
);
/*!
ensures
- #size() == 0
- #num_samples() == 0
- #k() == 0
- #nr() == 0
- #nc() == 0
!*/
alias_tensor
(
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
);
/*!
requires
- n_ >= 0
- k_ >= 0
- nr_ >= 0
- nc_ >= 0
ensures
- #size() == n_*k_*nr_*nc_
- #num_samples() == n_
- #k() == k_
- #nr() == nr_
- #nc() == nc_
!*/
long
num_samples
()
const
;
long
k
()
const
;
long
nr
()
const
;
long
nc
()
const
;
size_t
size
()
const
;
alias_tensor_instance
operator
()
(
tensor
&
t
,
size_t
offset
);
/*!
requires
- offset+size() <= t.size()
ensures
- Return a tensor that simply aliases the elements of t beginning with t's
offset'th element. Specifically, this function returns an aliasing
tensor T such that:
- T.size() == size()
- T.num_samples() == num_samples()
- T.k() == k()
- T.nr() == nr()
- T.nc() == nc()
- T.host() == t.host()+offset
- T.device() == t.device()+offset
!*/
};
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment