"vscode:/vscode.git/clone" did not exist on "2664ed2dff9d0e9e45d2349c531b460b31cec215"
Commit 114f677d authored by Davis King's avatar Davis King
Browse files

Fixing grammar in comments.

parent f9d3da11
......@@ -16,7 +16,7 @@ using namespace std;
int main()
{
// Lets begin this example by using the library to solve a simple
// Let's begin this example by using the library to solve a simple
// linear system.
//
// We will find the value of x such that y = M*x where
......@@ -32,7 +32,7 @@ int main()
// 5.9 0.05 1
// First lets declare these 3 matrices.
// First let's declare these 3 matrices.
// This declares a matrix that contains doubles and has 3 rows and 1 column.
// Moreover, it's size is a compile time constant since we put it inside the <>.
matrix<double,3,1> y;
......
......@@ -354,7 +354,7 @@ void custom_matrix_expressions_example(
cout << x << endl;
// Finally, lets use the matrix expressions we defined above.
// Finally, let's use the matrix expressions we defined above.
// prints the transpose of x
cout << example_trans(x) << endl;
......@@ -382,7 +382,7 @@ void custom_matrix_expressions_example(
vect.push_back(3);
vect.push_back(5);
// Now lets treat our std::vector like a matrix and print some things.
// Now let's treat our std::vector like a matrix and print some things.
cout << example_vector_to_matrix(vect) << endl;
cout << add_scalar(example_vector_to_matrix(vect), 10) << endl;
......
......@@ -44,7 +44,7 @@ int main()
// their default values.
mlp::kernel_1a_c net(2,5);
// Now lets put some data into our sample and train on it. We do this
// Now let's put some data into our sample and train on it. We do this
// by looping over 41*41 points and labeling them according to their
// distance from the origin.
for (int i = 0; i < 1000; ++i)
......@@ -65,7 +65,7 @@ int main()
}
}
// Now we have trained our mlp. Lets see how well it did.
// Now we have trained our mlp. Let's see how well it did.
// Note that if you run this program multiple times you will get different results. This
// is because the mlp network is randomly initialized.
......
......@@ -101,7 +101,7 @@ int main()
std::vector<sample_type> samples;
std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this
// Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their
// distance from the origin.
for (double r = -20; r <= 20; r += 0.8)
......
......@@ -92,7 +92,7 @@ int main()
// still be solved with the rbf_trainer.
trainer.set_trainer(poly_trainer, 1, 2);
// Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup.
// Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup.
// As an aside, always shuffle the order of the samples before doing cross validation.
// For a discussion of why this is a good idea see the svm_ex.cpp example.
randomize_samples(samples, labels);
......
......@@ -203,7 +203,7 @@ int main()
typedef scan_image_pyramid<pyramid_down<5>, very_simple_feature_extractor> image_scanner_type;
image_scanner_type scanner;
// Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, lets manually
// Instead of using setup_grid_detection_templates() like in object_detector_ex.cpp, let's manually
// setup the sliding window box. We use a window with the same shape as the white boxes we
// are trying to detect.
const rectangle object_box = compute_box_dimensions(1, // width/height ratio
......@@ -272,7 +272,7 @@ int main()
*/
// Lets display the output of the detector along with our training images.
// Let's display the output of the detector along with our training images.
image_window win;
for (unsigned long i = 0; i < images.size(); ++i)
{
......
......@@ -226,7 +226,7 @@ int main()
// Lets display the output of the detector along with our training images.
// Let's display the output of the detector along with our training images.
image_window win;
for (unsigned long i = 0; i < images.size(); ++i)
{
......
......@@ -66,7 +66,7 @@ int main()
// anomalous (i.e. not on the sinc() curve in our case).
decision_function<kernel_type> df = trainer.train(samples);
// So for example, lets look at the output from some points on the sinc() curve.
// So for example, let's look at the output from some points on the sinc() curve.
cout << "Points that are on the sinc function:\n";
m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl;
m(0) = -1.5; m(1) = sinc(m(0)); cout << " " << df(m) << endl;
......
......@@ -201,7 +201,7 @@ int main()
cout << "rosen solution:\n" << starting_point << endl;
// Now lets try doing it again with a different starting point and the version
// Now let's try doing it again with a different starting point and the version
// of find_min() that doesn't require you to supply a derivative function.
// This version will compute a numerical approximation of the derivative since
// we didn't supply one to it.
......@@ -285,7 +285,7 @@ int main()
// Now lets look at using the test_function object with the optimization
// Now let's look at using the test_function object with the optimization
// functions.
cout << "\nFind the minimum of the test_function" << endl;
......@@ -306,7 +306,7 @@ int main()
// At this point the correct value of (3,5,1,7) should be found and stored in starting_point
cout << "test_function solution:\n" << starting_point << endl;
// Now lets try it again with the conjugate gradient algorithm.
// Now let's try it again with the conjugate gradient algorithm.
starting_point = -4,5,99,3;
find_min_using_approximate_derivatives(cg_search_strategy(),
objective_delta_stop_strategy(1e-7),
......@@ -315,7 +315,7 @@ int main()
// Finally, lets try the BOBYQA algorithm. This is a technique specially
// Finally, let's try the BOBYQA algorithm. This is a technique specially
// designed to minimize a function in the absence of derivative information.
// Generally speaking, it is the method of choice if derivatives are not available.
starting_point = -4,5,99,3;
......
......@@ -296,8 +296,8 @@ int main()
// Now lets test out the Shor 9 bit encoding
cout << "\n\n\n\nNow lets try playing around with Shor's 9bit error correcting code" << endl;
// Now let's test out the Shor 9 bit encoding
cout << "\n\n\n\nNow let's try playing around with Shor's 9bit error correcting code" << endl;
// Reset the quantum register to contain a single bit
reg.set_num_bits(1);
......
......@@ -36,7 +36,7 @@ int main()
// Now lets make some vector objects that can hold our samples
// Now let's make some vector objects that can hold our samples
std::vector<sample_type> samples;
std::vector<double> labels;
......
......@@ -47,7 +47,7 @@ int main()
std::vector<sample_type> samples;
std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this
// Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their
// distance from the origin.
for (int r = -20; r <= 20; ++r)
......@@ -141,11 +141,11 @@ int main()
learned_function.normalizer = normalizer; // save normalization information
learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results
// print out the number of relevance vectors in the resulting decision function
// Print out the number of relevance vectors in the resulting decision function.
cout << "\nnumber of relevance vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl;
// now lets try this decision_function on some samples we haven't seen before
// Now let's try this decision_function on some samples we haven't seen before
sample_type sample;
sample(0) = 3.123;
......@@ -209,7 +209,7 @@ int main()
serialize(learned_pfunct,fout);
fout.close();
// now lets open that file back up and load the function object it contains
// Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary);
deserialize(learned_pfunct, fin);
......
......@@ -95,7 +95,7 @@ int main()
serialize(test,fout);
fout.close();
// now lets open that file back up and load the function object it contains
// Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary);
deserialize(test, fin);
......
......@@ -192,7 +192,7 @@ int main()
sequence_segmenter<feature_extractor> segmenter = trainer.train(samples, segments);
// Lets print out all the segments our segmenter detects.
// Let's print out all the segments our segmenter detects.
for (unsigned long i = 0; i < samples.size(); ++i)
{
// get all the detected segments in samples[i]
......@@ -205,7 +205,7 @@ int main()
}
// Now lets test it on a new sentence and see what it detects.
// Now let's test it on a new sentence and see what it detects.
std::vector<std::string> sentence(split("There once was a man from Nantucket whose name rhymed with Bob Bucket"));
std::vector<std::pair<unsigned long,unsigned long> > seg = segmenter(sentence);
for (unsigned long j = 0; j < seg.size(); ++j)
......
......@@ -47,7 +47,7 @@ int main()
std::vector<sample_type> samples;
std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this by looping
// Now let's put some data into our samples and labels objects. We do this by looping
// over a bunch of points and labeling them according to their distance from the
// origin.
for (int r = -20; r <= 20; ++r)
......@@ -149,7 +149,7 @@ int main()
cout << "\nnumber of support vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl;
// now lets try this decision_function on some samples we haven't seen before
// Now let's try this decision_function on some samples we haven't seen before.
sample_type sample;
sample(0) = 3.123;
......@@ -214,7 +214,7 @@ int main()
serialize(learned_pfunct,fout);
fout.close();
// now lets open that file back up and load the function object it contains
// Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary);
deserialize(learned_pfunct, fin);
......@@ -242,7 +242,7 @@ int main()
cout << "\ncross validation accuracy with only 10 support vectors: "
<< cross_validate_trainer(reduced2(trainer,10), samples, labels, 3);
// Lets print out the original cross validation score too for comparison.
// Let's print out the original cross validation score too for comparison.
cout << "cross validation accuracy with all the original support vectors: "
<< cross_validate_trainer(trainer, samples, labels, 3);
......
......@@ -67,7 +67,7 @@ int main()
center = 20, 20;
// Now lets go into a loop and randomly generate 1000 samples.
// Now let's go into a loop and randomly generate 1000 samples.
srand(time(0));
for (int i = 0; i < 10000; ++i)
{
......@@ -96,7 +96,7 @@ int main()
}
}
// Now we have trained our SVM. Lets see how well it did.
// Now we have trained our SVM. Let's see how well it did.
// Each of these statements prints out the output of the SVM given a particular sample.
// The SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0
// if a sample is predicted to be in the -1 class.
......@@ -123,7 +123,7 @@ int main()
// function. To support this the dlib library provides functions for converting an online
// training object like svm_pegasos into a batch training object.
// First lets clear out anything in the trainer object.
// First let's clear out anything in the trainer object.
trainer.clear();
// Now to begin with, you might want to compute the cross validation score of a trainer object
......
......@@ -38,7 +38,7 @@ int main()
typedef matrix<double,2,1> sample_type;
// Now lets make some testing data. To make it really simple, lets
// Now let's make some testing data. To make it really simple, let's
// suppose that vectors with positive values in the first dimension
// should rank higher than other vectors. So what we do is make
// examples of relevant (i.e. high ranking) and non-relevant (i.e. low
......
......@@ -45,7 +45,7 @@ int main()
// description of what this parameter does.
trainer.set_lambda(0.00001);
// Lets also use the svm trainer specially optimized for the linear_kernel and
// Let's also use the svm trainer specially optimized for the linear_kernel and
// sparse_linear_kernel.
svm_c_linear_trainer<kernel_type> linear_trainer;
// This trainer solves the "C" formulation of the SVM. See the documentation for
......@@ -59,7 +59,7 @@ int main()
sample_type sample;
// Now lets go into a loop and randomly generate 10000 samples.
// Now let's go into a loop and randomly generate 10000 samples.
srand(time(0));
double label = +1;
for (int i = 0; i < 10000; ++i)
......@@ -87,11 +87,11 @@ int main()
labels.push_back(label);
}
// In addition to the rule we learned with the pegasos trainer lets also use our linear_trainer
// to learn a decision rule.
// In addition to the rule we learned with the pegasos trainer, let's also use our
// linear_trainer to learn a decision rule.
decision_function<kernel_type> df = linear_trainer.train(samples, labels);
// Now we have trained our SVMs. Lets test them out a bit.
// Now we have trained our SVMs. Let's test them out a bit.
// Each of these statements prints the output of the SVMs given a particular sample.
// Each SVM outputs a number > 0 if a sample is predicted to be in the +1 class and < 0
// if a sample is predicted to be in the -1 class.
......
......@@ -245,7 +245,7 @@ public:
// are the four virtual functions defined below.
// So lets make an empty 9-dimensional PSI vector
// So let's make an empty 9-dimensional PSI vector
feature_vector_type psi(get_num_dimensions());
psi = 0; // zero initialize it
......
......@@ -23,7 +23,7 @@
cmake --build . --config Release
Note that you may need to install CMake (www.cmake.org) for this to work.
Next, lets assume you have a folder of images called /tmp/images. These images
Next, let's assume you have a folder of images called /tmp/images. These images
should contain examples of the objects you want to learn to detect. You will
use the imglab tool to label these objects. Do this by typing the following
./imglab -c mydataset.xml /tmp/images
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment