Commit 114f677d authored by Davis King's avatar Davis King
Browse files

Fixing grammar in comments.

parent f9d3da11
...@@ -16,7 +16,7 @@ using namespace std; ...@@ -16,7 +16,7 @@ using namespace std;
int main() int main()
{ {
// Lets begin this example by using the library to solve a simple // Let's begin this example by using the library to solve a simple
// linear system. // linear system.
// //
// We will find the value of x such that y = M*x where // We will find the value of x such that y = M*x where
...@@ -32,7 +32,7 @@ int main() ...@@ -32,7 +32,7 @@ int main()
// 5.9 0.05 1 // 5.9 0.05 1
// First lets declare these 3 matrices. // First let's declare these 3 matrices.
// This declares a matrix that contains doubles and has 3 rows and 1 column. // This declares a matrix that contains doubles and has 3 rows and 1 column.
// Moreover, it's size is a compile time constant since we put it inside the <>. // Moreover, it's size is a compile time constant since we put it inside the <>.
matrix<double,3,1> y; matrix<double,3,1> y;
......
...@@ -354,7 +354,7 @@ void custom_matrix_expressions_example( ...@@ -354,7 +354,7 @@ void custom_matrix_expressions_example(
cout << x << endl; cout << x << endl;
// Finally, lets use the matrix expressions we defined above. // Finally, let's use the matrix expressions we defined above.
// prints the transpose of x // prints the transpose of x
cout << example_trans(x) << endl; cout << example_trans(x) << endl;
...@@ -382,7 +382,7 @@ void custom_matrix_expressions_example( ...@@ -382,7 +382,7 @@ void custom_matrix_expressions_example(
vect.push_back(3); vect.push_back(3);
vect.push_back(5); vect.push_back(5);
// Now lets treat our std::vector like a matrix and print some things. // Now let's treat our std::vector like a matrix and print some things.
cout << example_vector_to_matrix(vect) << endl; cout << example_vector_to_matrix(vect) << endl;
cout << add_scalar(example_vector_to_matrix(vect), 10) << endl; cout << add_scalar(example_vector_to_matrix(vect), 10) << endl;
......
...@@ -44,7 +44,7 @@ int main() ...@@ -44,7 +44,7 @@ int main()
// their default values. // their default values.
mlp::kernel_1a_c net(2,5); mlp::kernel_1a_c net(2,5);
// Now lets put some data into our sample and train on it. We do this // Now let's put some data into our sample and train on it. We do this
// by looping over 41*41 points and labeling them according to their // by looping over 41*41 points and labeling them according to their
// distance from the origin. // distance from the origin.
for (int i = 0; i < 1000; ++i) for (int i = 0; i < 1000; ++i)
...@@ -65,7 +65,7 @@ int main() ...@@ -65,7 +65,7 @@ int main()
} }
} }
// Now we have trained our mlp. Lets see how well it did. // Now we have trained our mlp. Let's see how well it did.
// Note that if you run this program multiple times you will get different results. This // Note that if you run this program multiple times you will get different results. This
// is because the mlp network is randomly initialized. // is because the mlp network is randomly initialized.
......
...@@ -101,7 +101,7 @@ int main() ...@@ -101,7 +101,7 @@ int main()
std::vector<sample_type> samples; std::vector<sample_type> samples;
std::vector<double> labels; std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this // Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their // by looping over a bunch of points and labeling them according to their
// distance from the origin. // distance from the origin.
for (double r = -20; r <= 20; r += 0.8) for (double r = -20; r <= 20; r += 0.8)
......
...@@ -92,7 +92,7 @@ int main() ...@@ -92,7 +92,7 @@ int main()
// still be solved with the rbf_trainer. // still be solved with the rbf_trainer.
trainer.set_trainer(poly_trainer, 1, 2); trainer.set_trainer(poly_trainer, 1, 2);
// Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup. // Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup.
// As an aside, always shuffle the order of the samples before doing cross validation. // As an aside, always shuffle the order of the samples before doing cross validation.
// For a discussion of why this is a good idea see the svm_ex.cpp example. // For a discussion of why this is a good idea see the svm_ex.cpp example.
randomize_samples(samples, labels); randomize_samples(samples, labels);
......
...@@ -203,7 +203,7 @@ int main() ...@@ -203,7 +203,7 @@ int main()
typedef scan_image_pyramid<pyramid_down<5>, very_simple_feature_extractor> image_scanner_type; typedef scan_image_pyramid<pyramid_down<5>, very_simple_feature_extractor> image_scanner_type;
image_scanner_type scanner; image_scanner_type scanner;
// Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, lets manually // Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, let's manually
// setup the sliding window box. We use a window with the same shape as the white boxes we // setup the sliding window box. We use a window with the same shape as the white boxes we
// are trying to detect. // are trying to detect.
const rectangle object_box = compute_box_dimensions(1, // width/height ratio const rectangle object_box = compute_box_dimensions(1, // width/height ratio
...@@ -272,7 +272,7 @@ int main() ...@@ -272,7 +272,7 @@ int main()
*/ */
// Lets display the output of the detector along with our training images. // Let's display the output of the detector along with our training images.
image_window win; image_window win;
for (unsigned long i = 0; i < images.size(); ++i) for (unsigned long i = 0; i < images.size(); ++i)
{ {
......
...@@ -226,7 +226,7 @@ int main() ...@@ -226,7 +226,7 @@ int main()
// Lets display the output of the detector along with our training images. // Let's display the output of the detector along with our training images.
image_window win; image_window win;
for (unsigned long i = 0; i < images.size(); ++i) for (unsigned long i = 0; i < images.size(); ++i)
{ {
......
...@@ -66,7 +66,7 @@ int main() ...@@ -66,7 +66,7 @@ int main()
// anomalous (i.e. not on the sinc() curve in our case). // anomalous (i.e. not on the sinc() curve in our case).
decision_function<kernel_type> df = trainer.train(samples); decision_function<kernel_type> df = trainer.train(samples);
// So for example, lets look at the output from some points on the sinc() curve. // So for example, let's look at the output from some points on the sinc() curve.
cout << "Points that are on the sinc function:\n"; cout << "Points that are on the sinc function:\n";
m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl; m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl;
m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl; m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl;
......
...@@ -201,7 +201,7 @@ int main() ...@@ -201,7 +201,7 @@ int main()
cout << "rosen solution:\n" << starting_point << endl; cout << "rosen solution:\n" << starting_point << endl;
// Now lets try doing it again with a different starting point and the version // Now let's try doing it again with a different starting point and the version
// of find_min() that doesn't require you to supply a derivative function. // of find_min() that doesn't require you to supply a derivative function.
// This version will compute a numerical approximation of the derivative since // This version will compute a numerical approximation of the derivative since
// we didn't supply one to it. // we didn't supply one to it.
...@@ -285,7 +285,7 @@ int main() ...@@ -285,7 +285,7 @@ int main()
// Now lets look at using the test_function object with the optimization // Now let's look at using the test_function object with the optimization
// functions. // functions.
cout << "\nFind the minimum of the test_function" << endl; cout << "\nFind the minimum of the test_function" << endl;
...@@ -306,7 +306,7 @@ int main() ...@@ -306,7 +306,7 @@ int main()
// At this point the correct value of (3,5,1,7) should be found and stored in starting_point // At this point the correct value of (3,5,1,7) should be found and stored in starting_point
cout << "test_function solution:\n" << starting_point << endl; cout << "test_function solution:\n" << starting_point << endl;
// Now lets try it again with the conjugate gradient algorithm. // Now let's try it again with the conjugate gradient algorithm.
starting_point = -4,5,99,3; starting_point = -4,5,99,3;
find_min_using_approximate_derivatives(cg_search_strategy(), find_min_using_approximate_derivatives(cg_search_strategy(),
objective_delta_stop_strategy(1e-7), objective_delta_stop_strategy(1e-7),
...@@ -315,7 +315,7 @@ int main() ...@@ -315,7 +315,7 @@ int main()
// Finally, lets try the BOBYQA algorithm. This is a technique specially // Finally, let's try the BOBYQA algorithm. This is a technique specially
// designed to minimize a function in the absence of derivative information. // designed to minimize a function in the absence of derivative information.
// Generally speaking, it is the method of choice if derivatives are not available. // Generally speaking, it is the method of choice if derivatives are not available.
starting_point = -4,5,99,3; starting_point = -4,5,99,3;
......
...@@ -296,8 +296,8 @@ int main() ...@@ -296,8 +296,8 @@ int main()
// Now lets test out the Shor 9 bit encoding // Now let's test out the Shor 9 bit encoding
cout << "\n\n\n\nNow lets try playing around with Shor's 9bit error correcting code" << endl; cout << "\n\n\n\nNow let's try playing around with Shor's 9bit error correcting code" << endl;
// Reset the quantum register to contain a single bit // Reset the quantum register to contain a single bit
reg.set_num_bits(1); reg.set_num_bits(1);
......
...@@ -36,7 +36,7 @@ int main() ...@@ -36,7 +36,7 @@ int main()
// Now lets make some vector objects that can hold our samples // Now let's make some vector objects that can hold our samples
std::vector<sample_type> samples; std::vector<sample_type> samples;
std::vector<double> labels; std::vector<double> labels;
......
...@@ -47,7 +47,7 @@ int main() ...@@ -47,7 +47,7 @@ int main()
std::vector<sample_type> samples; std::vector<sample_type> samples;
std::vector<double> labels; std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this // Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their // by looping over a bunch of points and labeling them according to their
// distance from the origin. // distance from the origin.
for (int r = -20; r <= 20; ++r) for (int r = -20; r <= 20; ++r)
...@@ -141,11 +141,11 @@ int main() ...@@ -141,11 +141,11 @@ int main()
learned_function.normalizer = normalizer; // save normalization information learned_function.normalizer = normalizer; // save normalization information
learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results
// print out the number of relevance vectors in the resulting decision function // Print out the number of relevance vectors in the resulting decision function.
cout << "\nnumber of relevance vectors in our learned_function is " cout << "\nnumber of relevance vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl; << learned_function.function.basis_vectors.size() << endl;
// now lets try this decision_function on some samples we haven't seen before // Now let's try this decision_function on some samples we haven't seen before
sample_type sample; sample_type sample;
sample(0) = 3.123; sample(0) = 3.123;
...@@ -209,7 +209,7 @@ int main() ...@@ -209,7 +209,7 @@ int main()
serialize(learned_pfunct,fout); serialize(learned_pfunct,fout);
fout.close(); fout.close();
// now lets open that file back up and load the function object it contains // Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary); ifstream fin("saved_function.dat",ios::binary);
deserialize(learned_pfunct, fin); deserialize(learned_pfunct, fin);
......
...@@ -95,7 +95,7 @@ int main() ...@@ -95,7 +95,7 @@ int main()
serialize(test,fout); serialize(test,fout);
fout.close(); fout.close();
// now lets open that file back up and load the function object it contains // Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary); ifstream fin("saved_function.dat",ios::binary);
deserialize(test, fin); deserialize(test, fin);
......
...@@ -192,7 +192,7 @@ int main() ...@@ -192,7 +192,7 @@ int main()
sequence_segmenter<feature_extractor> segmenter = trainer.train(samples, segments); sequence_segmenter<feature_extractor> segmenter = trainer.train(samples, segments);
// Lets print out all the segments our segmenter detects. // Let's print out all the segments our segmenter detects.
for (unsigned long i = 0; i < samples.size(); ++i) for (unsigned long i = 0; i < samples.size(); ++i)
{ {
// get all the detected segments in samples[i] // get all the detected segments in samples[i]
...@@ -205,7 +205,7 @@ int main() ...@@ -205,7 +205,7 @@ int main()
} }
// Now lets test it on a new sentence and see what it detects. // Now let's test it on a new sentence and see what it detects.
std::vector<std::string> sentence(split("There once was a man from Nantucket whose name rhymed with Bob Bucket")); std::vector<std::string> sentence(split("There once was a man from Nantucket whose name rhymed with Bob Bucket"));
std::vector<std::pair<unsigned long,unsigned long> > seg = segmenter(sentence); std::vector<std::pair<unsigned long,unsigned long> > seg = segmenter(sentence);
for (unsigned long j = 0; j < seg.size(); ++j) for (unsigned long j = 0; j < seg.size(); ++j)
......
...@@ -47,7 +47,7 @@ int main() ...@@ -47,7 +47,7 @@ int main()
std::vector<sample_type> samples; std::vector<sample_type> samples;
std::vector<double> labels; std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this by looping // Now let's put some data into our samples and labels objects. We do this by looping
// over a bunch of points and labeling them according to their distance from the // over a bunch of points and labeling them according to their distance from the
// origin. // origin.
for (int r = -20; r <= 20; ++r) for (int r = -20; r <= 20; ++r)
...@@ -149,7 +149,7 @@ int main() ...@@ -149,7 +149,7 @@ int main()
cout << "\nnumber of support vectors in our learned_function is " cout << "\nnumber of support vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl; << learned_function.function.basis_vectors.size() << endl;
// now lets try this decision_function on some samples we haven't seen before // Now let's try this decision_function on some samples we haven't seen before.
sample_type sample; sample_type sample;
sample(0) = 3.123; sample(0) = 3.123;
...@@ -214,7 +214,7 @@ int main() ...@@ -214,7 +214,7 @@ int main()
serialize(learned_pfunct,fout); serialize(learned_pfunct,fout);
fout.close(); fout.close();
// now lets open that file back up and load the function object it contains // Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary); ifstream fin("saved_function.dat",ios::binary);
deserialize(learned_pfunct, fin); deserialize(learned_pfunct, fin);
...@@ -242,7 +242,7 @@ int main() ...@@ -242,7 +242,7 @@ int main()
cout << "\ncross validation accuracy with only 10 support vectors: " cout << "\ncross validation accuracy with only 10 support vectors: "
<< cross_validate_trainer(reduced2(trainer,10), samples, labels, 3); << cross_validate_trainer(reduced2(trainer,10), samples, labels, 3);
// Lets print out the original cross validation score too for comparison. // Let's print out the original cross validation score too for comparison.
cout << "cross validation accuracy with all the original support vectors: " cout << "cross validation accuracy with all the original support vectors: "
<< cross_validate_trainer(trainer, samples, labels, 3); << cross_validate_trainer(trainer, samples, labels, 3);
......
...@@ -67,7 +67,7 @@ int main() ...@@ -67,7 +67,7 @@ int main()
center = 20, 20; center = 20, 20;
// Now lets go into a loop and randomly generate 1000 samples. // Now let's go into a loop and randomly generate 1000 samples.
srand(time(0)); srand(time(0));
for (int i = 0; i < 10000; ++i) for (int i = 0; i < 10000; ++i)
{ {
...@@ -96,7 +96,7 @@ int main() ...@@ -96,7 +96,7 @@ int main()
} }
} }
// Now we have trained our SVM. Lets see how well it did. // Now we have trained our SVM. Let's see how well it did.
// Each of these statements prints out the output of the SVM given a particular sample. // Each of these statements prints out the output of the SVM given a particular sample.
// The SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0 // The SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0
// if a sample is predicted to be in the -1 class. // if a sample is predicted to be in the -1 class.
...@@ -123,7 +123,7 @@ int main() ...@@ -123,7 +123,7 @@ int main()
// function. To support this the dlib library provides functions for converting an online // function. To support this the dlib library provides functions for converting an online
// training object like svm_pegasos into a batch training object. // training object like svm_pegasos into a batch training object.
// First lets clear out anything in the trainer object. // First let's clear out anything in the trainer object.
trainer.clear(); trainer.clear();
// Now to begin with, you might want to compute the cross validation score of a trainer object // Now to begin with, you might want to compute the cross validation score of a trainer object
......
...@@ -38,7 +38,7 @@ int main() ...@@ -38,7 +38,7 @@ int main()
typedef matrix<double,2,1> sample_type; typedef matrix<double,2,1> sample_type;
// Now lets make some testing data. To make it really simple, lets // Now let's make some testing data. To make it really simple, let's
// suppose that vectors with positive values in the first dimension // suppose that vectors with positive values in the first dimension
// should rank higher than other vectors. So what we do is make // should rank higher than other vectors. So what we do is make
// examples of relevant (i.e. high ranking) and non-relevant (i.e. low // examples of relevant (i.e. high ranking) and non-relevant (i.e. low
......
...@@ -45,7 +45,7 @@ int main() ...@@ -45,7 +45,7 @@ int main()
// description of what this parameter does. // description of what this parameter does.
trainer.set_lambda(0.00001); trainer.set_lambda(0.00001);
// Lets also use the svm trainer specially optimized for the linear_kernel and // Let's also use the svm trainer specially optimized for the linear_kernel and
// sparse_linear_kernel. // sparse_linear_kernel.
svm_c_linear_trainer<kernel_type> linear_trainer; svm_c_linear_trainer<kernel_type> linear_trainer;
// This trainer solves the "C" formulation of the SVM. See the documentation for // This trainer solves the "C" formulation of the SVM. See the documentation for
...@@ -59,7 +59,7 @@ int main() ...@@ -59,7 +59,7 @@ int main()
sample_type sample; sample_type sample;
// Now lets go into a loop and randomly generate 10000 samples. // Now let's go into a loop and randomly generate 10000 samples.
srand(time(0)); srand(time(0));
double label = +1; double label = +1;
for (int i = 0; i < 10000; ++i) for (int i = 0; i < 10000; ++i)
...@@ -87,11 +87,11 @@ int main() ...@@ -87,11 +87,11 @@ int main()
labels.push_back(label); labels.push_back(label);
} }
// In addition to the rule we learned with the pegasos trainer lets also use our linear_trainer // In addition to the rule we learned with the pegasos trainer, let's also use our
// to learn a decision rule. // linear_trainer to learn a decision rule.
decision_function<kernel_type> df = linear_trainer.train(samples, labels); decision_function<kernel_type> df = linear_trainer.train(samples, labels);
// Now we have trained our SVMs. Lets test them out a bit. // Now we have trained our SVMs. Let's test them out a bit.
// Each of these statements prints the output of the SVMs given a particular sample. // Each of these statements prints the output of the SVMs given a particular sample.
// Each SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0 // Each SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0
// if a sample is predicted to be in the -1 class. // if a sample is predicted to be in the -1 class.
......
...@@ -245,7 +245,7 @@ public: ...@@ -245,7 +245,7 @@ public:
// are the four virtual functions defined below. // are the four virtual functions defined below.
// So lets make an empty 9-dimensional PSI vector // So let's make an empty 9-dimensional PSI vector
feature_vector_type psi(get_num_dimensions()); feature_vector_type psi(get_num_dimensions());
psi = 0; // zero initialize it psi = 0; // zero initialize it
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
cmake --build . --config Release cmake --build . --config Release
Note that you may need to install CMake (www.cmake.org) for this to work. Note that you may need to install CMake (www.cmake.org) for this to work.
Next, lets assume you have a folder of images called /tmp/images. These images Next, let's assume you have a folder of images called /tmp/images. These images
should contain examples of the objects you want to learn to detect. You will should contain examples of the objects you want to learn to detect. You will
use the imglab tool to label these objects. Do this by typing the following use the imglab tool to label these objects. Do this by typing the following
./imglab -c mydataset.xml /tmp/images ./imglab -c mydataset.xml /tmp/images
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment