Commit 114f677d authored by Davis King's avatar Davis King
Browse files

Fixing grammar in comments.

parent f9d3da11
...@@ -161,7 +161,7 @@ int main() ...@@ -161,7 +161,7 @@ int main()
// We have now finished setting up our bayesian network. So lets compute some // We have now finished setting up our bayesian network. So let's compute some
// probability values. The first thing we will do is compute the prior probability // probability values. The first thing we will do is compute the prior probability
// of each node in the network. To do this we will use the join tree algorithm which // of each node in the network. To do this we will use the join tree algorithm which
// is an algorithm for performing exact inference in a bayesian network. // is an algorithm for performing exact inference in a bayesian network.
...@@ -198,7 +198,7 @@ int main() ...@@ -198,7 +198,7 @@ int main()
cout << "\n\n\n"; cout << "\n\n\n";
// Now to make things more interesting lets say that we have discovered that the C // Now to make things more interesting let's say that we have discovered that the C
// node really has a value of 1. That is to say, we now have evidence that // node really has a value of 1. That is to say, we now have evidence that
// C is 1. We can represent this in the network using the following two function // C is 1. We can represent this in the network using the following two function
// calls. // calls.
......
...@@ -44,7 +44,7 @@ int main(int argc, char** argv) ...@@ -44,7 +44,7 @@ int main(int argc, char** argv)
cout << "Number of nodes in the network: " << bn.number_of_nodes() << endl; cout << "Number of nodes in the network: " << bn.number_of_nodes() << endl;
// Lets compute some probability values using the loaded network using the join tree (aka. Junction // Let's compute some probability values using the loaded network using the join tree (aka. Junction
// Tree) algorithm. // Tree) algorithm.
// First we need to create an undirected graph which contains set objects at each node and // First we need to create an undirected graph which contains set objects at each node and
......
...@@ -413,7 +413,7 @@ initialize_node_cpt_if_necessary ( ...@@ -413,7 +413,7 @@ initialize_node_cpt_if_necessary (
{ {
node_type& node = graph_drawer.graph_node(index); node_type& node = graph_drawer.graph_node(index);
// if the cpt for this node isn't properly filled out then lets clear it out // if the cpt for this node isn't properly filled out then let's clear it out
// and populate it with some reasonable default values // and populate it with some reasonable default values
if (node_cpt_filled_out(graph_drawer.graph(), index) == false) if (node_cpt_filled_out(graph_drawer.graph(), index) == false)
{ {
......
...@@ -103,7 +103,7 @@ void run_example_1( ...@@ -103,7 +103,7 @@ void run_example_1(
// Now lets put some things into the out pipe // Now let's put some things into the out pipe
int value = 1; int value = 1;
out.enqueue(value); out.enqueue(value);
...@@ -308,7 +308,7 @@ void run_example_4( ...@@ -308,7 +308,7 @@ void run_example_4(
bridge_status bs; bridge_status bs;
// Once a connection is established it will generate a status message from each bridge. // Once a connection is established it will generate a status message from each bridge.
// Lets get those and print them. // Let's get those and print them.
b1_status.dequeue(bs); b1_status.dequeue(bs);
cout << "bridge 1 status: is_connected: " << boolalpha << bs.is_connected << endl; cout << "bridge 1 status: is_connected: " << boolalpha << bs.is_connected << endl;
cout << "bridge 1 status: foreign_ip: " << bs.foreign_ip << endl; cout << "bridge 1 status: foreign_ip: " << bs.foreign_ip << endl;
......
...@@ -75,7 +75,7 @@ int main() ...@@ -75,7 +75,7 @@ int main()
// Use our recursive function to print everything in the config file. // Use our recursive function to print everything in the config file.
print_config_reader_contents(cr); print_config_reader_contents(cr);
// Now lets access some of the fields of the config file directly. You // Now let's access some of the fields of the config file directly. You
// use [] for accessing key values and .block() for accessing sub-blocks. // use [] for accessing key values and .block() for accessing sub-blocks.
// Print out the string value assigned to key1 in the config file // Print out the string value assigned to key1 in the config file
......
...@@ -174,7 +174,7 @@ int main() ...@@ -174,7 +174,7 @@ int main()
trainer.set_trainer(rbf_trainer, "upper_left", "lower_right"); trainer.set_trainer(rbf_trainer, "upper_left", "lower_right");
// Now lets do 5-fold cross-validation using the one_vs_one_trainer we just setup. // Now let's do 5-fold cross-validation using the one_vs_one_trainer we just setup.
// As an aside, always shuffle the order of the samples before doing cross validation. // As an aside, always shuffle the order of the samples before doing cross validation.
// For a discussion of why this is a good idea see the svm_ex.cpp example. // For a discussion of why this is a good idea see the svm_ex.cpp example.
randomize_samples(samples, labels); randomize_samples(samples, labels);
...@@ -201,7 +201,7 @@ int main() ...@@ -201,7 +201,7 @@ int main()
*/ */
// Finally, lets save our multiclass decision rule to disk. Remember that we have // Finally, let's save our multiclass decision rule to disk. Remember that we have
// to specify the types of binary decision function used inside the one_vs_one_decision_function. // to specify the types of binary decision function used inside the one_vs_one_decision_function.
one_vs_one_decision_function<ovo_trainer, one_vs_one_decision_function<ovo_trainer,
custom_decision_function, // This is the output of the simple_custom_trainer custom_decision_function, // This is the output of the simple_custom_trainer
......
...@@ -76,7 +76,7 @@ using namespace dlib; ...@@ -76,7 +76,7 @@ using namespace dlib;
// ---------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------
// First lets make a typedef for the kind of samples we will be using. // First let's make a typedef for the kind of samples we will be using.
typedef matrix<double, 0, 1> sample_type; typedef matrix<double, 0, 1> sample_type;
// We will be using the radial_basis_kernel in this example program. // We will be using the radial_basis_kernel in this example program.
...@@ -213,7 +213,7 @@ void test_empirical_kernel_map ( ...@@ -213,7 +213,7 @@ void test_empirical_kernel_map (
// Now lets do something more interesting. The following loop finds the centroids // Now let's do something more interesting. The following loop finds the centroids
// of the two classes of data. // of the two classes of data.
sample_type class1_center; sample_type class1_center;
sample_type class2_center; sample_type class2_center;
...@@ -254,7 +254,7 @@ void test_empirical_kernel_map ( ...@@ -254,7 +254,7 @@ void test_empirical_kernel_map (
// Next, note that classifying a point based on its distance between two other // Next, note that classifying a point based on its distance between two other
// points is the same thing as using the plane that lies between those two points // points is the same thing as using the plane that lies between those two points
// as a decision boundary. So lets compute that decision plane and use it to classify // as a decision boundary. So let's compute that decision plane and use it to classify
// all the points. // all the points.
sample_type plane_normal_vector = class1_center - class2_center; sample_type plane_normal_vector = class1_center - class2_center;
...@@ -291,7 +291,7 @@ void test_empirical_kernel_map ( ...@@ -291,7 +291,7 @@ void test_empirical_kernel_map (
{ {
double side = dec_funct(samples[i]); double side = dec_funct(samples[i]);
// And lets just check that the dec_funct really does compute the same thing as the previous equation. // And let's just check that the dec_funct really does compute the same thing as the previous equation.
double side_alternate_equation = dot(plane_normal_vector, projected_samples[i]) - bias; double side_alternate_equation = dot(plane_normal_vector, projected_samples[i]) - bias;
if (abs(side-side_alternate_equation) > 1e-14) if (abs(side-side_alternate_equation) > 1e-14)
cout << "dec_funct error: " << abs(side-side_alternate_equation) << endl; cout << "dec_funct error: " << abs(side-side_alternate_equation) << endl;
......
...@@ -55,7 +55,7 @@ int main(int argc, char** argv) ...@@ -55,7 +55,7 @@ int main(int argc, char** argv)
cout << "hog image has " << hog.nr() << " rows and " << hog.nc() << " columns." << endl; cout << "hog image has " << hog.nr() << " rows and " << hog.nc() << " columns." << endl;
// Lets see what the image and FHOG features look like. // Let's see what the image and FHOG features look like.
image_window win(img); image_window win(img);
image_window winhog(draw_fhog(hog)); image_window winhog(draw_fhog(hog));
......
...@@ -161,7 +161,7 @@ int main(int argc, char** argv) ...@@ -161,7 +161,7 @@ int main(int argc, char** argv)
// a face. // a face.
image_window hogwin(draw_fhog(detector), "Learned fHOG detector"); image_window hogwin(draw_fhog(detector), "Learned fHOG detector");
// Now for the really fun part. Lets display the testing images on the screen and // Now for the really fun part. Let's display the testing images on the screen and
// show the output of the face detector overlaid on each image. You will see that // show the output of the face detector overlaid on each image. You will see that
// it finds all the faces without false alarming on any non-faces. // it finds all the faces without false alarming on any non-faces.
image_window win; image_window win;
...@@ -191,7 +191,7 @@ int main(int argc, char** argv) ...@@ -191,7 +191,7 @@ int main(int argc, char** argv)
// Now lets talk about some optional features of this training tool as well as some // Now let's talk about some optional features of this training tool as well as some
// important points you should understand. // important points you should understand.
// //
// The first thing that should be pointed out is that, since this is a sliding // The first thing that should be pointed out is that, since this is a sliding
......
...@@ -194,13 +194,13 @@ int main() ...@@ -194,13 +194,13 @@ int main()
// indicate that all nodes were correctly classified. // indicate that all nodes were correctly classified.
cout << "3-fold cross-validation: " << cross_validate_graph_labeling_trainer(trainer, samples, labels, 3) << endl; cout << "3-fold cross-validation: " << cross_validate_graph_labeling_trainer(trainer, samples, labels, 3) << endl;
// Since the trainer is working well. Lets have it make a graph_labeler // Since the trainer is working well. Let's have it make a graph_labeler
// based on the training data. // based on the training data.
graph_labeler<vector_type> labeler = trainer.train(samples, labels); graph_labeler<vector_type> labeler = trainer.train(samples, labels);
/* /*
Lets try the graph_labeler on a new test graph. In particular, lets Let's try the graph_labeler on a new test graph. In particular, let's
use one with 5 nodes as shown below: use one with 5 nodes as shown below:
(0 F)-----(1 T) (0 F)-----(1 T)
......
...@@ -114,7 +114,7 @@ public: ...@@ -114,7 +114,7 @@ public:
b.set_pos(10,60); b.set_pos(10,60);
b.set_name("button"); b.set_name("button");
// lets put the label 5 pixels below the button // let's put the label 5 pixels below the button
c.set_pos(b.left(),b.bottom()+5); c.set_pos(b.left(),b.bottom()+5);
...@@ -137,7 +137,7 @@ public: ...@@ -137,7 +137,7 @@ public:
// functions or lambda functions. // functions or lambda functions.
// Lets also make a simple menu bar. // Let's also make a simple menu bar.
// First we say how many menus we want in our menu bar. In this example we only want 1. // First we say how many menus we want in our menu bar. In this example we only want 1.
mbar.set_number_of_menus(1); mbar.set_number_of_menus(1);
// Now we set the name of our menu. The 'M' means that the M in Menu will be underlined // Now we set the name of our menu. The 'M' means that the M in Menu will be underlined
...@@ -147,12 +147,12 @@ public: ...@@ -147,12 +147,12 @@ public:
// Now we add some items to the menu. Note that items in a menu are listed in the // Now we add some items to the menu. Note that items in a menu are listed in the
// order in which they were added. // order in which they were added.
// First lets make a menu item that does the same thing as our button does when it is clicked. // First let's make a menu item that does the same thing as our button does when it is clicked.
// Again, the 'C' means the C in Click is underlined in the menu. // Again, the 'C' means the C in Click is underlined in the menu.
mbar.menu(0).add_menu_item(menu_item_text("Click Button!",*this,&win::on_button_clicked,'C')); mbar.menu(0).add_menu_item(menu_item_text("Click Button!",*this,&win::on_button_clicked,'C'));
// lets add a separator (i.e. a horizontal separating line) to the menu // let's add a separator (i.e. a horizontal separating line) to the menu
mbar.menu(0).add_menu_item(menu_item_separator()); mbar.menu(0).add_menu_item(menu_item_separator());
// Now lets make a menu item that calls show_about when the user selects it. // Now let's make a menu item that calls show_about when the user selects it.
mbar.menu(0).add_menu_item(menu_item_text("About",*this,&win::show_about,'A')); mbar.menu(0).add_menu_item(menu_item_text("About",*this,&win::show_about,'A'));
......
...@@ -46,7 +46,7 @@ int main(int argc, char** argv) ...@@ -46,7 +46,7 @@ int main(int argc, char** argv)
load_image(img, argv[1]); load_image(img, argv[1]);
// Now lets use some image functions. First lets blur the image a little. // Now let's use some image functions. First let's blur the image a little.
array2d<unsigned char> blurred_img; array2d<unsigned char> blurred_img;
gaussian_blur(img, blurred_img); gaussian_blur(img, blurred_img);
...@@ -58,7 +58,7 @@ int main(int argc, char** argv) ...@@ -58,7 +58,7 @@ int main(int argc, char** argv)
// now we do the non-maximum edge suppression step so that our edges are nice and thin // now we do the non-maximum edge suppression step so that our edges are nice and thin
suppress_non_maximum_edges(horz_gradient, vert_gradient, edge_image); suppress_non_maximum_edges(horz_gradient, vert_gradient, edge_image);
// Now we would like to see what our images look like. So lets use a // Now we would like to see what our images look like. So let's use a
// window to display them on the screen. (Note that you can zoom into // window to display them on the screen. (Note that you can zoom into
// the window by holding CTRL and scrolling the mouse wheel) // the window by holding CTRL and scrolling the mouse wheel)
image_window my_window(edge_image, "Normal Edge Image"); image_window my_window(edge_image, "Normal Edge Image");
......
...@@ -28,7 +28,7 @@ int main() ...@@ -28,7 +28,7 @@ int main()
iosockstream stream("www.google.com:80"); iosockstream stream("www.google.com:80");
// At this point, we can use stream the same way we would use any other // At this point, we can use stream the same way we would use any other
// C++ iostream object. So to test it out, lets make a HTTP GET request // C++ iostream object. So to test it out, let's make a HTTP GET request
// for the main Google page. // for the main Google page.
stream << "GET / HTTP/1.0\r\n\r\n"; stream << "GET / HTTP/1.0\r\n\r\n";
......
...@@ -66,7 +66,7 @@ int main() ...@@ -66,7 +66,7 @@ int main()
running_stats<double> rs; running_stats<double> rs;
// Now lets output the distance from the centroid to some points that are from the sinc function. // Now let's output the distance from the centroid to some points that are from the sinc function.
// These numbers should all be similar. We will also calculate the statistics of these numbers // These numbers should all be similar. We will also calculate the statistics of these numbers
// by accumulating them into the running_stats object called rs. This will let us easily // by accumulating them into the running_stats object called rs. This will let us easily
// find the mean and standard deviation of the distances for use below. // find the mean and standard deviation of the distances for use below.
...@@ -80,7 +80,7 @@ int main() ...@@ -80,7 +80,7 @@ int main()
m(0) = -0.5; m(1) = sinc(m(0)); cout << " " << test(m) << endl; rs.add(test(m)); m(0) = -0.5; m(1) = sinc(m(0)); cout << " " << test(m) << endl; rs.add(test(m));
cout << endl; cout << endl;
// Lets output the distance from the centroid to some points that are NOT from the sinc function. // Let's output the distance from the centroid to some points that are NOT from the sinc function.
// These numbers should all be significantly bigger than previous set of numbers. We will also // These numbers should all be significantly bigger than previous set of numbers. We will also
// use the rs.scale() function to find out how many standard deviations they are away from the // use the rs.scale() function to find out how many standard deviations they are away from the
// mean of the test points from the sinc function. So in this case our criterion for "significantly bigger" // mean of the test points from the sinc function. So in this case our criterion for "significantly bigger"
......
...@@ -82,7 +82,7 @@ int main() ...@@ -82,7 +82,7 @@ int main()
serialize(test,fout); serialize(test,fout);
fout.close(); fout.close();
// now lets open that file back up and load the krls object it contains // Now let's open that file back up and load the krls object it contains.
ifstream fin("saved_krls_object.dat",ios::binary); ifstream fin("saved_krls_object.dat",ios::binary);
deserialize(test, fin); deserialize(test, fin);
......
...@@ -63,7 +63,7 @@ int main() ...@@ -63,7 +63,7 @@ int main()
dlib::rand rnd; dlib::rand rnd;
// Now lets loop over a big range of values from the sinc() function. Each time // Now let's loop over a big range of values from the sinc() function. Each time
// adding some random noise to the data we send to the krls object for training. // adding some random noise to the data we send to the krls object for training.
sample_type m; sample_type m;
double mse_noise = 0; double mse_noise = 0;
......
...@@ -43,7 +43,7 @@ int main() ...@@ -43,7 +43,7 @@ int main()
std::vector<sample_type> samples; std::vector<sample_type> samples;
std::vector<double> labels; std::vector<double> labels;
// Now lets put some data into our samples and labels objects. We do this // Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their // by looping over a bunch of points and labeling them according to their
// distance from the origin. // distance from the origin.
for (double r = -20; r <= 20; r += 0.4) for (double r = -20; r <= 20; r += 0.4)
...@@ -129,7 +129,7 @@ int main() ...@@ -129,7 +129,7 @@ int main()
cout << "\nnumber of basis vectors in our learned_function is " cout << "\nnumber of basis vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl; << learned_function.function.basis_vectors.size() << endl;
// Now lets try this decision_function on some samples we haven't seen before. // Now let's try this decision_function on some samples we haven't seen before.
// The decision function will return values >= 0 for samples it predicts // The decision function will return values >= 0 for samples it predicts
// are in the +1 class and numbers < 0 for samples it predicts to be in the -1 class. // are in the +1 class and numbers < 0 for samples it predicts to be in the -1 class.
sample_type sample; sample_type sample;
...@@ -200,7 +200,7 @@ int main() ...@@ -200,7 +200,7 @@ int main()
serialize(learned_pfunct,fout); serialize(learned_pfunct,fout);
fout.close(); fout.close();
// now lets open that file back up and load the function object it contains // Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary); ifstream fin("saved_function.dat",ios::binary);
deserialize(learned_pfunct, fin); deserialize(learned_pfunct, fin);
......
...@@ -98,7 +98,7 @@ int main() ...@@ -98,7 +98,7 @@ int main()
serialize(test,fout); serialize(test,fout);
fout.close(); fout.close();
// now lets open that file back up and load the function object it contains // Now let's open that file back up and load the function object it contains.
ifstream fin("saved_function.dat",ios::binary); ifstream fin("saved_function.dat",ios::binary);
deserialize(test, fin); deserialize(test, fin);
......
...@@ -95,7 +95,7 @@ int main() ...@@ -95,7 +95,7 @@ int main()
cout << "params: " << trans(params) << endl; cout << "params: " << trans(params) << endl;
// Now lets generate a bunch of input/output pairs according to our model. // Now let's generate a bunch of input/output pairs according to our model.
std::vector<std::pair<input_vector, double> > data_samples; std::vector<std::pair<input_vector, double> > data_samples;
input_vector input; input_vector input;
for (int i = 0; i < 1000; ++i) for (int i = 0; i < 1000; ++i)
...@@ -107,7 +107,7 @@ int main() ...@@ -107,7 +107,7 @@ int main()
data_samples.push_back(make_pair(input, output)); data_samples.push_back(make_pair(input, output));
} }
// Before we do anything, lets make sure that our derivative function defined above matches // Before we do anything, let's make sure that our derivative function defined above matches
// the approximate derivative computed using central differences (via derivative()). // the approximate derivative computed using central differences (via derivative()).
// If this value is big then it means we probably typed the derivative function incorrectly. // If this value is big then it means we probably typed the derivative function incorrectly.
cout << "derivative error: " << length(residual_derivative(data_samples[0], params) - cout << "derivative error: " << length(residual_derivative(data_samples[0], params) -
...@@ -117,7 +117,7 @@ int main() ...@@ -117,7 +117,7 @@ int main()
// Now lets use the solve_least_squares_lm() routine to figure out what the // Now let's use the solve_least_squares_lm() routine to figure out what the
// parameters are based on just the data_samples. // parameters are based on just the data_samples.
parameter_vector x; parameter_vector x;
x = 1; x = 1;
......
...@@ -98,7 +98,7 @@ using namespace dlib; ...@@ -98,7 +98,7 @@ using namespace dlib;
// ---------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------
// First lets make a typedef for the kind of samples we will be using. // First let's make a typedef for the kind of samples we will be using.
typedef matrix<double, 0, 1> sample_type; typedef matrix<double, 0, 1> sample_type;
// We will be using the radial_basis_kernel in this example program. // We will be using the radial_basis_kernel in this example program.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment