"tools/vscode:/vscode.git/clone" did not exist on "8c111ee73a41bb68fc9177395ee5bf41f22bdeca"
rvm_ex.cpp 9.09 KB
Newer Older
1
// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
Davis King's avatar
Davis King committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
/*

    This is an example illustrating the use of the relevance vector machine
    utilities from the dlib C++ Library.  

    This example creates a simple set of data to train on and then shows
    you how to use the cross validation and rvm training functions
    to find a good decision function that can classify examples in our
    data set.


    The data used in this example will be 2 dimensional data and will
    come from a distribution where points with a distance less than 10
    from the origin are labeled +1 and all other points are labeled
    as -1.
        
*/


#include <iostream>
#include "dlib/svm.h"

using namespace std;
using namespace dlib;


int main()
{
    // The rvm functions use column vectors to contain a lot of the data on which they they 
    // operate. So the first thing we do here is declare a convenient typedef.  

    // This typedef declares a matrix with 2 rows and 1 column.  It will be the
    // object that contains each of our 2 dimensional samples.   (Note that if you wanted 
    // more than 2 features in this vector you can simply change the 2 to something else.
    // Or if you don't know how many features you want until runtime then you can put a 0
    // here and use the matrix.set_size() member function)
    typedef matrix<double, 2, 1> sample_type;

    // This is a typedef for the type of kernel we are going to use in this example.
    // In this case I have selected the radial basis kernel that can operate on our
    // 2D sample_type objects
    typedef radial_basis_kernel<sample_type> kernel_type;


    // Now we make objects to contain our samples and their respective labels.
    std::vector<sample_type> samples;
    std::vector<double> labels;

    // Now lets put some data into our samples and labels objects.  We do this
    // by looping over a bunch of points and labeling them according to their
    // distance from the origin.
    for (int r = -20; r <= 20; ++r)
    {
        for (int c = -20; c <= 20; ++c)
        {
            sample_type samp;
            samp(0) = r;
            samp(1) = c;
            samples.push_back(samp);

            // if this point is less than 10 from the origin
            if (sqrt((double)r*r + c*c) <= 10)
                labels.push_back(+1);
            else
                labels.push_back(-1);

        }
    }


    // Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
    // This is generally a good idea since it often heads off numerical stability problems and also 
    // prevents one large feature from smothering others.  Doing this doesn't matter much in this example
    // so I'm just doing this here so you can see an easy way to accomplish this with 
    // the library.  
77
78
79
    vector_normalizer<sample_type> normalizer;
    // let the normalizer learn the mean and standard deviation of the samples
    normalizer.train(samples);
Davis King's avatar
Davis King committed
80
81
    // now normalize each sample
    for (unsigned long i = 0; i < samples.size(); ++i)
82
83
        samples[i] = normalizer(samples[i]); 

Davis King's avatar
Davis King committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127



    // Now that we have some data we want to train on it.  However, there is a parameter to the 
    // training.  This is the gamma parameter of the RBF kernel.  Our choice for this parameter will 
    // influence how good the resulting decision function is.  To test how good a particular choice of
    // kernel parameters are we can use the cross_validate_trainer() function to perform n-fold cross
    // validation on our training data.  However, there is a problem with the way we have sampled 
    // our distribution.  The problem is that there is a definite ordering to the samples.  
    // That is, the first half of the samples look like they are from a different distribution 
    // than the second half do.  This would screw up the cross validation process but we can 
    // fix it by randomizing the order of the samples with the following function call.
    randomize_samples(samples, labels);


    // here we make an instance of the rvm_trainer object that uses our kernel type.
    rvm_trainer<kernel_type> trainer;

    // Now we loop over some different gamma values to see how good they are.  Note
    // that this is just a simple brute force way to try out a few possible parameter 
    // choices.  You may want to investigate more sophisticated strategies for determining 
    // good parameter choices.
    cout << "doing cross validation" << endl;
    for (double gamma = 0.00001; gamma <= 1; gamma += 0.1)
    {
        // tell the trainer the parameters we want to use
        trainer.set_kernel(kernel_type(gamma));

        cout << "gamma: " << gamma;
        // Print out the cross validation accuracy for 3-fold cross validation using the current gamma.  
        // cross_validate_trainer() returns a row vector.  The first element of the vector is the fraction
        // of +1 training examples correctly classified and the second number is the fraction of -1 training 
        // examples correctly classified.
        cout << "     cross validation accuracy: " << cross_validate_trainer(trainer, samples, labels, 3);
    }


    // From looking at the output of the above loop it turns out that a good value for 
    // gamma for this problem is 0.1.  So that is what we will use.

    // Now we train on the full set of data and obtain the resulting decision function.  We use the
    // value of 0.1 for gamma.  The decision function will return values >= 0 for samples it predicts
    // are in the +1 class and numbers < 0 for samples it predicts to be in the -1 class.
    trainer.set_kernel(kernel_type(0.1));
128
129
130
131
132
133
134
135
136
    typedef decision_function<kernel_type> dec_funct_type;
    typedef normalized_function<dec_funct_type> funct_type;


    // Here we are making an instance of the normalized_function object.  This object provides a convenient 
    // way to store the vector normalization information along with the decision function we are
    // going to learn.  
    funct_type learned_function;
    learned_function.normalizer = normalizer;  // save normalization information
Davis King's avatar
Davis King committed
137
    learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results
Davis King's avatar
Davis King committed
138
139

    // print out the number of support vectors in the resulting decision function
140
141
    cout << "\nnumber of support vectors in our learned_function is " 
         << learned_function.function.support_vectors.nr() << endl;
Davis King's avatar
Davis King committed
142
143
144
145
146
147

    // now lets try this decision_function on some samples we haven't seen before 
    sample_type sample;

    sample(0) = 3.123;
    sample(1) = 2;
148
    cout << "This sample should be >= 0 and it is classified as a " << learned_function(sample) << endl;
Davis King's avatar
Davis King committed
149
150
151

    sample(0) = 3.123;
    sample(1) = 9.3545;
152
    cout << "This sample should be >= 0 and it is classified as a " << learned_function(sample) << endl;
Davis King's avatar
Davis King committed
153
154
155

    sample(0) = 13.123;
    sample(1) = 9.3545;
156
    cout << "This sample should be < 0 and it is classified as a " << learned_function(sample) << endl;
Davis King's avatar
Davis King committed
157
158
159

    sample(0) = 13.123;
    sample(1) = 0;
160
    cout << "This sample should be < 0 and it is classified as a " << learned_function(sample) << endl;
Davis King's avatar
Davis King committed
161
162
163
164
165


    // We can also train a decision function that reports a well conditioned probability 
    // instead of just a number > 0 for the +1 class and < 0 for the -1 class.  An example 
    // of doing that follows:
166
167
168
169
170
171
    typedef probabilistic_decision_function<kernel_type> probabilistic_funct_type;  
    typedef normalized_function<probabilistic_funct_type> pfunct_type;

    pfunct_type learned_pfunct; 
    learned_pfunct.normalizer = normalizer;
    learned_pfunct.function = train_probabilistic_decision_function(trainer, samples, labels, 3);
Davis King's avatar
Davis King committed
172
173
174
175
    // Now we have a function that returns the probability that a given sample is of the +1 class.  

    // print out the number of support vectors in the resulting decision function.  
    // (it should be the same as in the one above)
176
177
    cout << "\nnumber of support vectors in our learned_pfunct is " 
         << learned_pfunct.function.decision_funct.support_vectors.nr() << endl;
Davis King's avatar
Davis King committed
178
179
180

    sample(0) = 3.123;
    sample(1) = 2;
181
    cout << "This +1 example should have high probability.  It's probability is: " << learned_pfunct(sample) << endl;
Davis King's avatar
Davis King committed
182
183
184

    sample(0) = 3.123;
    sample(1) = 9.3545;
185
    cout << "This +1 example should have high probability.  It's probability is: " << learned_pfunct(sample) << endl;
Davis King's avatar
Davis King committed
186
187
188

    sample(0) = 13.123;
    sample(1) = 9.3545;
189
    cout << "This -1 example should have low probability.  It's probability is: " << learned_pfunct(sample) << endl;
Davis King's avatar
Davis King committed
190
191
192

    sample(0) = 13.123;
    sample(1) = 0;
193
    cout << "This -1 example should have low probability.  It's probability is: " << learned_pfunct(sample) << endl;
Davis King's avatar
Davis King committed
194
195


196
197
198
199
200
201
202
203
204
205
206
207

    // Another thing that is worth knowing is that just about everything in dlib is serializable.
    // So for example, you can save the learned_pfunct object to disk and recall it later like so:
    ofstream fout("saved_function.dat",ios::binary);
    serialize(learned_pfunct,fout);
    fout.close();

    // now lets open that file back up and load the function object it contains
    ifstream fin("saved_function.dat",ios::binary);
    deserialize(learned_pfunct, fin);


Davis King's avatar
Davis King committed
208
209
}