Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
cc3bb499
Commit
cc3bb499
authored
Dec 10, 2014
by
Patrick Snape
Browse files
Python3 friendly printing in examples
parent
e3aee32f
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
28 additions
and
28 deletions
+28
-28
python_examples/face_detector.py
python_examples/face_detector.py
+3
-3
python_examples/max_cost_assignment.py
python_examples/max_cost_assignment.py
+2
-2
python_examples/sequence_segmenter.py
python_examples/sequence_segmenter.py
+2
-2
python_examples/svm_rank.py
python_examples/svm_rank.py
+7
-7
python_examples/svm_struct.py
python_examples/svm_struct.py
+2
-2
python_examples/train_object_detector.py
python_examples/train_object_detector.py
+12
-12
No files found.
python_examples/face_detector.py
View file @
cc3bb499
...
...
@@ -35,15 +35,15 @@ detector = dlib.get_frontal_face_detector()
win
=
dlib
.
image_window
()
for
f
in
sys
.
argv
[
1
:]:
print
"processing file: "
,
f
print
(
"processing file: "
,
f
)
img
=
io
.
imread
(
f
)
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more
# faces.
dets
=
detector
(
img
,
1
)
print
"number of faces detected: "
,
len
(
dets
)
print
(
"number of faces detected: "
,
len
(
dets
)
)
for
d
in
dets
:
print
" detection position left,top,right,bottom:"
,
d
.
left
(),
d
.
top
(),
d
.
right
(),
d
.
bottom
()
print
(
" detection position left,top,right,bottom:"
,
d
.
left
(),
d
.
top
(),
d
.
right
(),
d
.
bottom
()
)
win
.
clear_overlay
()
win
.
set_image
(
img
)
...
...
python_examples/max_cost_assignment.py
View file @
cc3bb499
...
...
@@ -40,11 +40,11 @@ assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the cost matrix to
# job 2, the middle row person to job 0, and the bottom row person to job 1.
print
"optimal assignments: "
,
assignment
print
(
"optimal assignments: "
,
assignment
)
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print
"optimal cost: "
,
dlib
.
assignment_cost
(
cost
,
assignment
)
print
(
"optimal cost: "
,
dlib
.
assignment_cost
(
cost
,
assignment
)
)
python_examples/sequence_segmenter.py
View file @
cc3bb499
...
...
@@ -176,9 +176,9 @@ else:
# We can also measure the accuracy of a model relative to some labeled data. This
# statement prints the precision, recall, and F1-score of the model relative to the data in
# training_sequences/segments.
print
"Test on training data:"
,
dlib
.
test_sequence_segmenter
(
model
,
training_sequences
,
segments
)
print
(
"Test on training data:"
,
dlib
.
test_sequence_segmenter
(
model
,
training_sequences
,
segments
)
)
# We can also do 5-fold cross-validation and print the resulting precision, recall, and F1-score.
print
"cross validation:"
,
dlib
.
cross_validate_sequence_segmenter
(
training_sequences
,
segments
,
5
,
params
)
print
(
"cross validation:"
,
dlib
.
cross_validate_sequence_segmenter
(
training_sequences
,
segments
,
5
,
params
)
)
python_examples/svm_rank.py
View file @
cc3bb499
...
...
@@ -53,8 +53,8 @@ rank = trainer.train(data)
# Now if you call rank on a vector it will output a ranking score. In
# particular, the ranking score for relevant vectors should be larger than the
# score for non-relevant vectors.
print
"ranking score for a relevant vector: "
,
rank
(
data
.
relevant
[
0
])
print
"ranking score for a non-relevant vector: "
,
rank
(
data
.
nonrelevant
[
0
])
print
(
"ranking score for a relevant vector: "
,
rank
(
data
.
relevant
[
0
])
)
print
(
"ranking score for a non-relevant vector: "
,
rank
(
data
.
nonrelevant
[
0
])
)
# The output is the following:
# ranking score for a relevant vector: 0.5
# ranking score for a non-relevant vector: -0.5
...
...
@@ -65,12 +65,12 @@ print "ranking score for a non-relevant vector: ", rank(data.nonrelevant[0])
# In this case, the ordering accuracy tells us how often a non-relevant vector
# was ranked ahead of a relevant vector. In this case, it returns 1 for both
# metrics, indicating that the rank function outputs a perfect ranking.
print
dlib
.
test_ranking_function
(
rank
,
data
)
print
(
dlib
.
test_ranking_function
(
rank
,
data
)
)
# The ranking scores are computed by taking the dot product between a learned
# weight vector and a data vector. If you want to see the learned weight vector
# you can display it like so:
print
"weights:
\n
"
,
rank
.
weights
print
(
"weights:
\n
"
,
rank
.
weights
)
# In this case the weights are:
# 0.5
# -0.5
...
...
@@ -112,7 +112,7 @@ rank = trainer.train(queries)
# splits and returns the overall ranking accuracy based on the held out data.
# Just like test_ranking_function(), it reports both the ordering accuracy and
# mean average precision.
print
"cross validation results: "
,
dlib
.
cross_validate_ranking_trainer
(
trainer
,
queries
,
4
)
print
(
"cross validation results: "
,
dlib
.
cross_validate_ranking_trainer
(
trainer
,
queries
,
4
)
)
...
...
@@ -141,8 +141,8 @@ data.nonrelevant.append(samp)
trainer
=
dlib
.
svm_rank_trainer_sparse
()
rank
=
trainer
.
train
(
data
)
print
"ranking score for a relevant vector: "
,
rank
(
data
.
relevant
[
0
])
print
"ranking score for a non-relevant vector: "
,
rank
(
data
.
nonrelevant
[
0
])
print
(
"ranking score for a relevant vector: "
,
rank
(
data
.
relevant
[
0
])
)
print
(
"ranking score for a non-relevant vector: "
,
rank
(
data
.
nonrelevant
[
0
])
)
# Just as before, the output is the following:
# ranking score for a relevant vector: 0.5
# ranking score for a non-relevant vector: -0.5
...
...
python_examples/svm_struct.py
View file @
cc3bb499
...
...
@@ -46,9 +46,9 @@ def main():
# Print the weights and then evaluate predict_label() on each of our training samples.
# Note that the correct label is predicted for each sample.
print
weights
print
(
weights
)
for
i
in
range
(
len
(
samples
)):
print
"predicted label for sample[{0}]: {1}"
.
format
(
i
,
predict_label
(
weights
,
samples
[
i
]))
print
(
"predicted label for sample[{0}]: {1}"
.
format
(
i
,
predict_label
(
weights
,
samples
[
i
]))
)
def
predict_label
(
weights
,
sample
):
"""Given the 9-dimensional weight vector which defines a 3 class classifier, predict the
...
...
python_examples/train_object_detector.py
View file @
cc3bb499
...
...
@@ -24,10 +24,10 @@ from skimage import io
# the path to this faces folder as a command line argument so we will know
# where it is.
if
(
len
(
sys
.
argv
)
!=
2
):
print
"Give the path to the examples/faces directory as the argument to this"
print
"program. For example, if you are in the python_examples folder then "
print
"execute this program by running:"
print
" ./train_object_detector.py ../examples/faces"
print
(
"Give the path to the examples/faces directory as the argument to this"
)
print
(
"program. For example, if you are in the python_examples folder then "
)
print
(
"execute this program by running:"
)
print
(
" ./train_object_detector.py ../examples/faces"
)
exit
()
faces_folder
=
sys
.
argv
[
1
]
...
...
@@ -59,18 +59,18 @@ options.be_verbose = True
# images with boxes. To see how to use it read the tools/imglab/README.txt
# file. But for this example, we just use the training.xml file included with
# dlib.
dlib
.
train_simple_object_detector
(
faces_folder
+
"/training.xml"
,
"detector.svm"
,
options
)
dlib
.
train_simple_object_detector
(
faces_folder
+
"/training.xml"
,
"detector.svm"
,
options
)
# Now that we have a face detector we can test it. The first statement tests
# it on the training data. It will print
the precision, recall, and then
# it on the training data. It will print
(
the precision, recall, and then
)
# average precision.
print
"
\n
training accuracy:"
,
dlib
.
test_simple_object_detector
(
faces_folder
+
"/training.xml"
,
"detector.svm"
)
print
(
"
\n
training accuracy:"
,
dlib
.
test_simple_object_detector
(
faces_folder
+
"/training.xml"
,
"detector.svm"
)
)
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on. The next line does this. Happily, we
# see that the object detector works perfectly on the testing images.
print
"testing accuracy: "
,
dlib
.
test_simple_object_detector
(
faces_folder
+
"/testing.xml"
,
"detector.svm"
)
print
(
"testing accuracy: "
,
dlib
.
test_simple_object_detector
(
faces_folder
+
"/testing.xml"
,
"detector.svm"
)
)
...
...
@@ -84,15 +84,15 @@ win_det.set_image(detector)
# Now let's run the detector over the images in the faces folder and display the
# results.
print
"
\n
Showing detections on the images in the faces folder..."
print
(
"
\n
Showing detections on the images in the faces folder..."
)
win
=
dlib
.
image_window
()
for
f
in
glob
.
glob
(
faces_folder
+
"/*.jpg"
):
print
"processing file:"
,
f
print
(
"processing file:"
,
f
)
img
=
io
.
imread
(
f
)
dets
=
detector
(
img
)
print
"number of faces detected:"
,
len
(
dets
)
print
(
"number of faces detected:"
,
len
(
dets
)
)
for
d
in
dets
:
print
" detection position left,top,right,bottom:"
,
d
.
left
(),
d
.
top
(),
d
.
right
(),
d
.
bottom
()
print
(
" detection position left,top,right,bottom:"
,
d
.
left
(),
d
.
top
(),
d
.
right
(),
d
.
bottom
()
)
win
.
clear_overlay
()
win
.
set_image
(
img
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment