Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
91ed0085
"examples/mxnet/git@developer.sourcefind.cn:OpenDAS/dgl.git" did not exist on "3efb5d8ecf7d748655e2199d120a40888ece2282"
Commit
91ed0085
authored
Nov 26, 2016
by
Davis King
Browse files
merged
parents
d584ae9e
f9e92024
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
216 additions
and
1 deletion
+216
-1
dlib/dnn/loss.h
dlib/dnn/loss.h
+107
-0
dlib/dnn/loss_abstract.h
dlib/dnn/loss_abstract.h
+57
-0
dlib/test/dnn.cpp
dlib/test/dnn.cpp
+45
-0
dlib/unicode/unicode.h
dlib/unicode/unicode.h
+7
-1
No files found.
dlib/dnn/loss.h
View file @
91ed0085
...
@@ -1292,6 +1292,113 @@ namespace dlib
...
@@ -1292,6 +1292,113 @@ namespace dlib
template
<
typename
SUBNET
>
template
<
typename
SUBNET
>
using
loss_metric_hardish
=
add_loss_layer
<
loss_metric_hardish_
,
SUBNET
>
;
using
loss_metric_hardish
=
add_loss_layer
<
loss_metric_hardish_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
class
loss_mean_squared_
{
public:
typedef
float
training_label_type
;
typedef
float
output_label_type
;
template
<
typename
SUB_TYPE
,
typename
label_iterator
>
void
to_label
(
const
tensor
&
input_tensor
,
const
SUB_TYPE
&
sub
,
label_iterator
iter
)
const
{
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
const
tensor
&
output_tensor
=
sub
.
get_output
();
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
*
iter
++
=
out_data
[
i
];
}
}
template
<
typename
const_label_iterator
,
typename
SUBNET
>
double
compute_loss_value_and_gradient
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUBNET
&
sub
)
const
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
k
()
==
1
);
// The loss we output is the average loss over the mini-batch.
const
double
scale
=
1.0
/
output_tensor
.
num_samples
();
double
loss
=
0
;
float
*
g
=
grad
.
host_write_only
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
const
float
y
=
*
truth
++
;
const
float
temp1
=
y
-
out_data
[
i
];
const
float
temp2
=
scale
*
temp1
;
loss
+=
0.5
*
temp2
*
temp1
;
g
[
i
]
=
-
temp2
;
}
return
loss
;
}
friend
void
serialize
(
const
loss_mean_squared_
&
,
std
::
ostream
&
out
)
{
serialize
(
"loss_mean_squared_"
,
out
);
}
friend
void
deserialize
(
loss_mean_squared_
&
,
std
::
istream
&
in
)
{
std
::
string
version
;
deserialize
(
version
,
in
);
if
(
version
!=
"loss_mean_squared_"
)
throw
serialization_error
(
"Unexpected version found while deserializing dlib::loss_mean_squared_."
);
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
out
,
const
loss_mean_squared_
&
)
{
out
<<
"loss_mean_squared"
;
return
out
;
}
friend
void
to_xml
(
const
loss_mean_squared_
&
/*item*/
,
std
::
ostream
&
out
)
{
out
<<
"<loss_mean_squared/>"
;
}
};
template
<
typename
SUBNET
>
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
}
}
...
...
dlib/dnn/loss_abstract.h
View file @
91ed0085
...
@@ -527,6 +527,63 @@ namespace dlib
...
@@ -527,6 +527,63 @@ namespace dlib
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
class
loss_mean_squared_
{
/*!
WHAT THIS OBJECT REPRESENTS
This object implements the loss layer interface defined above by
EXAMPLE_LOSS_LAYER_. In particular, it implements the mean squared loss, which is
appropriate for regression problems.
!*/
public:
typedef
float
training_label_type
;
typedef
float
output_label_type
;
template
<
typename
SUB_TYPE
,
typename
label_iterator
>
void
to_label
(
const
tensor
&
input_tensor
,
const
SUB_TYPE
&
sub
,
label_iterator
iter
)
const
;
/*!
This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
it has the additional calling requirements that:
- sub.get_output().nr() == 1
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the predicted continuous variable.
!*/
template
<
typename
const_label_iterator
,
typename
SUBNET
>
double
compute_loss_value_and_gradient
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUBNET
&
sub
)
const
;
/*!
This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient()
except it has the additional calling requirements that:
- sub.get_output().nr() == 1
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
!*/
};
template
<
typename
SUBNET
>
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
}
}
#endif // DLIB_DNn_LOSS_ABSTRACT_H_
#endif // DLIB_DNn_LOSS_ABSTRACT_H_
...
...
dlib/test/dnn.cpp
View file @
91ed0085
...
@@ -7,6 +7,7 @@
...
@@ -7,6 +7,7 @@
#include <cstdlib>
#include <cstdlib>
#include <ctime>
#include <ctime>
#include <vector>
#include <vector>
#include <random>
#include "../dnn.h"
#include "../dnn.h"
#include "tester.h"
#include "tester.h"
...
@@ -1737,6 +1738,49 @@ namespace
...
@@ -1737,6 +1738,49 @@ namespace
error
=
memcmp
(
g3
.
host
(),
b3g
.
host
(),
b3g
.
size
());
error
=
memcmp
(
g3
.
host
(),
b3g
.
host
(),
b3g
.
size
());
DLIB_TEST
(
error
==
0
);
DLIB_TEST
(
error
==
0
);
}
}
// ----------------------------------------------------------------------------------------
void
test_simple_linear_regression
()
{
::
std
::
vector
<
matrix
<
double
>>
x
(
100
);
::
std
::
vector
<
float
>
y
(
100
);
::
std
::
default_random_engine
generator
(
16
);
::
std
::
normal_distribution
<
float
>
distribution
(
0
,
5
);
const
float
true_intercept
=
50.0
;
const
float
true_slope
=
10.0
;
for
(
int
ii
=
0
;
ii
<
100
;
++
ii
)
{
const
double
val
=
static_cast
<
double
>
(
ii
);
matrix
<
double
>
tmp
(
1
,
1
);
tmp
=
val
;
x
[
ii
]
=
tmp
;
y
[
ii
]
=
(
true_intercept
+
true_slope
*
static_cast
<
float
>
(
val
)
+
distribution
(
generator
));
}
using
net_type
=
loss_mean_squared
<
fc
<
1
,
input
<
matrix
<
double
>>>>
;
net_type
net
;
layer
<
1
>
(
net
).
layer_details
().
set_bias_learning_rate_multiplier
(
300
);
sgd
defsolver
;
dnn_trainer
<
net_type
>
trainer
(
net
,
defsolver
);
trainer
.
set_learning_rate
(
0.00001
);
trainer
.
set_mini_batch_size
(
50
);
trainer
.
set_max_num_epochs
(
170
);
trainer
.
train
(
x
,
y
);
const
float
slope
=
layer
<
1
>
(
net
).
layer_details
().
get_weights
().
host
()[
0
];
const
float
slope_error
=
abs
(
true_slope
-
slope
);
const
float
intercept
=
layer
<
1
>
(
net
).
layer_details
().
get_biases
().
host
()[
0
];
const
float
intercept_error
=
abs
(
true_intercept
-
intercept
);
const
float
eps_slope
=
0.5
,
eps_intercept
=
1.0
;
DLIB_TEST_MSG
(
slope_error
<=
eps_slope
,
"Expected slope = "
<<
true_slope
<<
" Estimated slope = "
<<
slope
<<
" Error limit = "
<<
eps_slope
);
DLIB_TEST_MSG
(
intercept_error
<=
eps_intercept
,
"Expected intercept = "
<<
true_intercept
<<
" Estimated intercept = "
<<
intercept
<<
" Error limit = "
<<
eps_intercept
);
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
class
dnn_tester
:
public
tester
class
dnn_tester
:
public
tester
...
@@ -1804,6 +1848,7 @@ namespace
...
@@ -1804,6 +1848,7 @@ namespace
test_visit_funcions
();
test_visit_funcions
();
test_copy_tensor_cpu
();
test_copy_tensor_cpu
();
test_concat
();
test_concat
();
test_simple_linear_regression
();
}
}
void
perform_test
()
void
perform_test
()
...
...
dlib/unicode/unicode.h
View file @
91ed0085
...
@@ -289,7 +289,10 @@ namespace dlib
...
@@ -289,7 +289,10 @@ namespace dlib
}
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
#if defined(__GNUC__) && __GNUC__ >= 6
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmisleading-indentation"
#endif
template
<
typename
T
>
template
<
typename
T
>
bool
is_combining_char
(
bool
is_combining_char
(
const
T
ch_
const
T
ch_
...
@@ -478,6 +481,9 @@ namespace dlib
...
@@ -478,6 +481,9 @@ namespace dlib
if
(
ch
<
0xE0100
)
return
false
;
if
(
ch
<
0xE01F0
)
return
true
;
if
(
ch
<
0xE0100
)
return
false
;
if
(
ch
<
0xE01F0
)
return
true
;
return
false
;
return
false
;
}
}
#if defined(__GNUC__) && __GNUC__ >= 6
#pragma GCC diagnostic pop
#endif
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment