Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
d7df21a8
Commit
d7df21a8
authored
Dec 23, 2012
by
Davis King
Browse files
switched examples over to the new mat() method.
parent
a5d30218
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
15 additions
and
15 deletions
+15
-15
examples/assignment_learning_ex.cpp
examples/assignment_learning_ex.cpp
+2
-2
examples/custom_trainer_ex.cpp
examples/custom_trainer_ex.cpp
+2
-2
examples/krr_classification_ex.cpp
examples/krr_classification_ex.cpp
+2
-2
examples/matrix_expressions_ex.cpp
examples/matrix_expressions_ex.cpp
+1
-1
examples/rank_features_ex.cpp
examples/rank_features_ex.cpp
+2
-2
examples/sequence_labeler_ex.cpp
examples/sequence_labeler_ex.cpp
+4
-4
examples/using_custom_kernels_ex.cpp
examples/using_custom_kernels_ex.cpp
+2
-2
No files found.
examples/assignment_learning_ex.cpp
View file @
d7df21a8
...
...
@@ -177,8 +177,8 @@ int main()
{
// Predict the assignments for the LHS and RHS in samples[i].
std
::
vector
<
long
>
predicted_assignments
=
assigner
(
samples
[
i
]);
cout
<<
"true labels: "
<<
trans
(
vector_to_matrix
(
labels
[
i
]));
cout
<<
"predicted labels: "
<<
trans
(
vector_to_matrix
(
predicted_assignments
))
<<
endl
;
cout
<<
"true labels: "
<<
trans
(
mat
(
labels
[
i
]));
cout
<<
"predicted labels: "
<<
trans
(
mat
(
predicted_assignments
))
<<
endl
;
}
// We can also use this tool to compute the percentage of assignments predicted correctly.
...
...
examples/custom_trainer_ex.cpp
View file @
d7df21a8
...
...
@@ -118,9 +118,9 @@ public:
}
// divide by number of +1 samples
positive_center
/=
sum
(
vector_to_matrix
(
labels
)
==
+
1
);
positive_center
/=
sum
(
mat
(
labels
)
==
+
1
);
// divide by number of -1 samples
negative_center
/=
sum
(
vector_to_matrix
(
labels
)
==
-
1
);
negative_center
/=
sum
(
mat
(
labels
)
==
-
1
);
custom_decision_function
df
;
df
.
positive_center
=
positive_center
;
...
...
examples/krr_classification_ex.cpp
View file @
d7df21a8
...
...
@@ -65,8 +65,8 @@ int main()
}
cout
<<
"samples generated: "
<<
samples
.
size
()
<<
endl
;
cout
<<
" number of +1 samples: "
<<
sum
(
vector_to_matrix
(
labels
)
>
0
)
<<
endl
;
cout
<<
" number of -1 samples: "
<<
sum
(
vector_to_matrix
(
labels
)
<
0
)
<<
endl
;
cout
<<
" number of +1 samples: "
<<
sum
(
mat
(
labels
)
>
0
)
<<
endl
;
cout
<<
" number of -1 samples: "
<<
sum
(
mat
(
labels
)
<
0
)
<<
endl
;
// Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
// This is generally a good idea since it often heads off numerical stability problems and also
...
...
examples/matrix_expressions_ex.cpp
View file @
d7df21a8
...
...
@@ -384,7 +384,7 @@ void custom_matrix_expressions_example(
As an aside, note that dlib contains functions equivalent to the ones we
defined above. They are:
- dlib::trans()
- dlib::
vector_
to
_
matri
x(
)
- dlib::
mat() (converts things in
to
matri
ces
)
- operator+ (e.g. you can say my_mat + 1)
...
...
examples/rank_features_ex.cpp
View file @
d7df21a8
...
...
@@ -79,8 +79,8 @@ int main()
// Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
// This is generally a good idea since it often heads off numerical stability problems and also
// prevents one large feature from smothering others.
const
sample_type
m
(
mean
(
vector_to_matrix
(
samples
)));
// compute a mean vector
const
sample_type
sd
(
reciprocal
(
s
qrt
(
variance
(
vector_to_matrix
(
samples
))))
)
;
// compute a standard deviation vector
const
sample_type
m
(
mean
(
mat
(
samples
)));
// compute a mean vector
const
sample_type
sd
(
reciprocal
(
s
tddev
(
mat
(
samples
))));
// compute a standard deviation vector
// now normalize each sample
for
(
unsigned
long
i
=
0
;
i
<
samples
.
size
();
++
i
)
samples
[
i
]
=
pointwise_multiply
(
samples
[
i
]
-
m
,
sd
);
...
...
examples/sequence_labeler_ex.cpp
View file @
d7df21a8
...
...
@@ -228,8 +228,8 @@ int main()
// print out some of the randomly sampled sequences
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
cout
<<
"hidden states: "
<<
trans
(
vector_to_matrix
(
labels
[
i
]));
cout
<<
"observed states: "
<<
trans
(
vector_to_matrix
(
samples
[
i
]));
cout
<<
"hidden states: "
<<
trans
(
mat
(
labels
[
i
]));
cout
<<
"observed states: "
<<
trans
(
mat
(
samples
[
i
]));
cout
<<
"******************************"
<<
endl
;
}
...
...
@@ -251,8 +251,8 @@ int main()
// Test the learned labeler on one of the training samples. In this
// case it will give the correct sequence of labels.
std
::
vector
<
unsigned
long
>
predicted_labels
=
labeler
(
samples
[
0
]);
cout
<<
"true hidden states: "
<<
trans
(
vector_to_matrix
(
labels
[
0
]));
cout
<<
"predicted hidden states: "
<<
trans
(
vector_to_matrix
(
predicted_labels
));
cout
<<
"true hidden states: "
<<
trans
(
mat
(
labels
[
0
]));
cout
<<
"predicted hidden states: "
<<
trans
(
mat
(
predicted_labels
));
...
...
examples/using_custom_kernels_ex.cpp
View file @
d7df21a8
...
...
@@ -160,8 +160,8 @@ int main()
}
}
cout
<<
"samples generated: "
<<
samples
.
size
()
<<
endl
;
cout
<<
" number of +1 samples: "
<<
sum
(
vector_to_matrix
(
labels
)
>
0
)
<<
endl
;
cout
<<
" number of -1 samples: "
<<
sum
(
vector_to_matrix
(
labels
)
<
0
)
<<
endl
;
cout
<<
" number of +1 samples: "
<<
sum
(
mat
(
labels
)
>
0
)
<<
endl
;
cout
<<
" number of -1 samples: "
<<
sum
(
mat
(
labels
)
<
0
)
<<
endl
;
// A valid kernel must always give rise to kernel matrices which are symmetric
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment