Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
af76e826
"git@developer.sourcefind.cn:OpenDAS/nerfacc.git" did not exist on "a74f7e7b4ffaa4adacdbc83c711fa18dbdf2c5d9"
Commit
af76e826
authored
Nov 25, 2016
by
Dennis Francis
Browse files
converted tabs to spaces in the indentation
parent
cd4b62b4
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
92 additions
and
92 deletions
+92
-92
dlib/dnn/loss.h
dlib/dnn/loss.h
+52
-52
dlib/test/dnn.cpp
dlib/test/dnn.cpp
+40
-40
No files found.
dlib/dnn/loss.h
View file @
af76e826
...
@@ -1305,70 +1305,70 @@ namespace dlib
...
@@ -1305,70 +1305,70 @@ namespace dlib
typename
SUB_TYPE
,
typename
SUB_TYPE
,
typename
label_iterator
typename
label_iterator
>
>
void
to_label
(
void
to_label
(
const
tensor
&
input_tensor
,
const
tensor
&
input_tensor
,
const
SUB_TYPE
&
sub
,
const
SUB_TYPE
&
sub
,
label_iterator
iter
label_iterator
iter
)
const
)
const
{
{
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
const
float
*
out_data
=
output_tensor
.
host
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
{
*
iter
++
=
out_data
[
i
];
*
iter
++
=
out_data
[
i
];
}
}
}
}
template
<
template
<
typename
const_label_iterator
,
typename
const_label_iterator
,
typename
SUBNET
typename
SUBNET
>
>
double
compute_loss_value_and_gradient
(
double
compute_loss_value_and_gradient
(
const
tensor
&
input_tensor
,
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
const_label_iterator
truth
,
SUBNET
&
sub
SUBNET
&
sub
)
const
)
const
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
k
()
==
1
);
// The loss we output is the average loss over the mini-batch.
const
double
scale
=
1.0
/
output_tensor
.
num_samples
();
double
loss
=
0
;
float
*
g
=
grad
.
host_write_only
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
{
const
float
y
=
*
truth
++
;
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
float
temp1
=
y
-
out_data
[
i
];
tensor
&
grad
=
sub
.
get_gradient_input
();
const
float
temp2
=
scale
*
temp1
;
loss
+=
0.5
*
temp2
*
temp1
;
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
g
[
i
]
=
-
temp2
;
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
k
()
==
1
);
// The loss we output is the average loss over the mini-batch.
const
double
scale
=
1.0
/
output_tensor
.
num_samples
();
double
loss
=
0
;
float
*
g
=
grad
.
host_write_only
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
const
float
y
=
*
truth
++
;
const
float
temp1
=
y
-
out_data
[
i
];
const
float
temp2
=
scale
*
temp1
;
loss
+=
0.5
*
temp2
*
temp1
;
g
[
i
]
=
-
temp2
;
}
return
loss
;
}
}
return
loss
;
}
friend
void
serialize
(
const
loss_mean_squared_
&
,
std
::
ostream
&
out
)
friend
void
serialize
(
const
loss_mean_squared_
&
,
std
::
ostream
&
out
)
{
{
...
@@ -1397,7 +1397,7 @@ namespace dlib
...
@@ -1397,7 +1397,7 @@ namespace dlib
};
};
template
<
typename
SUBNET
>
template
<
typename
SUBNET
>
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
dlib/test/dnn.cpp
View file @
af76e826
...
@@ -1743,45 +1743,45 @@ namespace
...
@@ -1743,45 +1743,45 @@ namespace
void
test_simple_linear_regression
()
void
test_simple_linear_regression
()
{
{
::
std
::
vector
<
matrix
<
double
>>
x
(
100
);
::
std
::
vector
<
matrix
<
double
>>
x
(
100
);
::
std
::
vector
<
float
>
y
(
100
);
::
std
::
vector
<
float
>
y
(
100
);
::
std
::
default_random_engine
generator
(
16
);
::
std
::
default_random_engine
generator
(
16
);
::
std
::
normal_distribution
<
float
>
distribution
(
0
,
5
);
::
std
::
normal_distribution
<
float
>
distribution
(
0
,
5
);
const
float
true_intercept
=
50.0
;
const
float
true_intercept
=
50.0
;
const
float
true_slope
=
10.0
;
const
float
true_slope
=
10.0
;
for
(
int
ii
=
0
;
ii
<
100
;
++
ii
)
for
(
int
ii
=
0
;
ii
<
100
;
++
ii
)
{
{
const
double
val
=
static_cast
<
double
>
(
ii
);
const
double
val
=
static_cast
<
double
>
(
ii
);
matrix
<
double
>
tmp
(
1
,
1
);
matrix
<
double
>
tmp
(
1
,
1
);
tmp
=
val
;
tmp
=
val
;
x
[
ii
]
=
tmp
;
x
[
ii
]
=
tmp
;
y
[
ii
]
=
(
true_intercept
+
true_slope
*
static_cast
<
float
>
(
val
)
+
distribution
(
generator
));
y
[
ii
]
=
(
true_intercept
+
true_slope
*
static_cast
<
float
>
(
val
)
+
distribution
(
generator
));
}
}
using
net_type
=
loss_mean_squared
<
using
net_type
=
loss_mean_squared
<
fc
<
fc
<
1
,
input
<
matrix
<
double
>>
1
,
input
<
matrix
<
double
>>
>
>
>
;
>
;
net_type
net
;
net_type
net
;
layer
<
1
>
(
net
).
layer_details
().
set_bias_learning_rate_multiplier
(
300
);
layer
<
1
>
(
net
).
layer_details
().
set_bias_learning_rate_multiplier
(
300
);
sgd
defsolver
;
sgd
defsolver
;
dnn_trainer
<
net_type
>
trainer
(
net
,
defsolver
);
dnn_trainer
<
net_type
>
trainer
(
net
,
defsolver
);
trainer
.
set_learning_rate
(
0.00001
);
trainer
.
set_learning_rate
(
0.00001
);
trainer
.
set_mini_batch_size
(
50
);
trainer
.
set_mini_batch_size
(
50
);
trainer
.
set_max_num_epochs
(
170
);
trainer
.
set_max_num_epochs
(
170
);
trainer
.
train
(
x
,
y
);
trainer
.
train
(
x
,
y
);
const
float
slope
=
layer
<
1
>
(
net
).
layer_details
().
get_weights
().
host
()[
0
];
const
float
slope
=
layer
<
1
>
(
net
).
layer_details
().
get_weights
().
host
()[
0
];
const
float
slope_error
=
abs
(
true_slope
-
slope
);
const
float
slope_error
=
abs
(
true_slope
-
slope
);
const
float
intercept
=
layer
<
1
>
(
net
).
layer_details
().
get_biases
().
host
()[
0
];
const
float
intercept
=
layer
<
1
>
(
net
).
layer_details
().
get_biases
().
host
()[
0
];
const
float
intercept_error
=
abs
(
true_intercept
-
intercept
);
const
float
intercept_error
=
abs
(
true_intercept
-
intercept
);
const
float
eps_slope
=
0.5
,
eps_intercept
=
1.0
;
const
float
eps_slope
=
0.5
,
eps_intercept
=
1.0
;
DLIB_TEST_MSG
(
slope_error
<=
eps_slope
,
DLIB_TEST_MSG
(
slope_error
<=
eps_slope
,
"Expected slope = "
<<
true_slope
<<
" Estimated slope = "
<<
slope
<<
" Error limit = "
<<
eps_slope
);
"Expected slope = "
<<
true_slope
<<
" Estimated slope = "
<<
slope
<<
" Error limit = "
<<
eps_slope
);
DLIB_TEST_MSG
(
intercept_error
<=
eps_intercept
,
DLIB_TEST_MSG
(
intercept_error
<=
eps_intercept
,
"Expected intercept = "
<<
true_intercept
<<
" Estimated intercept = "
<<
intercept
<<
" Error limit = "
<<
eps_intercept
);
"Expected intercept = "
<<
true_intercept
<<
" Estimated intercept = "
<<
intercept
<<
" Error limit = "
<<
eps_intercept
);
}
}
...
@@ -1852,7 +1852,7 @@ namespace
...
@@ -1852,7 +1852,7 @@ namespace
test_visit_funcions
();
test_visit_funcions
();
test_copy_tensor_cpu
();
test_copy_tensor_cpu
();
test_concat
();
test_concat
();
test_simple_linear_regression
();
test_simple_linear_regression
();
}
}
void
perform_test
()
void
perform_test
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment