model_selection_ex.cpp 11.8 KB
Newer Older
1
2
3
// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
/*

4
    This is an example that shows some reasonable ways you can perform
5
6
    model selection with the dlib C++ Library.  

7
    It will create a simple set of data and then show you how to use 
8
9
10
11
12
13
14
15
    the cross validation and optimization routines to determine good model 
    parameters for the purpose of training an svm to classify the sample data.

    The data used in this example will be 2 dimensional data and will
    come from a distribution where points with a distance less than 10
    from the origin are labeled +1 and all other points are labeled
    as -1.
        
16
17
18

    As an side, you should probably read the svm_ex.cpp and matrix_ex.cpp example 
    programs before you read this one.
19
20
21
22
*/


#include <iostream>
23
#include <dlib/svm.h>
24
25
26
27

using namespace std;
using namespace dlib;

Davis King's avatar
Davis King committed
28
// The svm functions use column vectors to contain a lot of the data on which they 
29
30
31
// operate. So the first thing we do here is declare a convenient typedef.  

// This typedef declares a matrix with 2 rows and 1 column.  It will be the
32
// object that contains each of our 2 dimensional samples.   
33
34
35
36
37
38
39
40
41
typedef matrix<double, 2, 1> sample_type;

// This is a typedef for the type of kernel we are going to use in this example.
// In this case I have selected the radial basis kernel that can operate on our
// 2D sample_type objects
typedef radial_basis_kernel<sample_type> kernel_type;



42

43
44
class cross_validation_objective
{
45
46
47
48
49
50
51
52
53
    /*!
        WHAT THIS OBJECT REPRESENTS
            This object is a simple function object that takes a set of model
            parameters and returns a number indicating how "good" they are.  It
            does this by performing 10 fold cross validation on our dataset
            and reporting the accuracy.

            See below in main() for how this object gets used. 
    !*/
54
55
56
57
58
59
60
61
62
63
64
public:

    cross_validation_objective (
        const std::vector<sample_type>& samples_,
        const std::vector<double>& labels_
    ) : samples(samples_), labels(labels_) {}

    double operator() (
        const matrix<double>& params
    ) const
    {
65
66
67
        // Pull out the two SVM model parameters.  Note that, in this case,
        // I have setup the parameter search to operate in log scale so we have
        // to remember to call exp() to put the parameters back into a normal scale.
68
69
70
        const double gamma = exp(params(0));
        const double nu    = exp(params(1));

71
        // Make an SVM trainer and tell it what the parameters are supposed to be.
72
73
74
75
        svm_nu_trainer<kernel_type> trainer;
        trainer.set_kernel(kernel_type(gamma));
        trainer.set_nu(nu);

76
        // Finally, perform 10-fold cross validation and then print and return the results.
77
78
        matrix<double> result = cross_validate_trainer(trainer, samples, labels, 10);
        cout << "gamma: " << setw(11) << gamma << "  nu: " << setw(11) << nu <<  "  cross validation accuracy: " << result;
79

80
81
82
83
84
85
        // Here I'm returning the harmonic mean between the accuracies of each class.
        // However, you could do something else.  For example, you might care a lot more
        // about correctly predicting the +1 class, so you could penalize results that
        // didn't obtain a high accuracy on that class.  You might do this by using 
        // something like a weighted version of the F1-score (see http://en.wikipedia.org/wiki/F1_score).     
        return 2*prod(result)/sum(result);
86
87
88
89
90
91
92
    }

    const std::vector<sample_type>& samples;
    const std::vector<double>& labels;

};

93

94
95
int main()
{
96
    try
97
98
    {

99
100
101
        // Now we make objects to contain our samples and their respective labels.
        std::vector<sample_type> samples;
        std::vector<double> labels;
102

Davis King's avatar
Davis King committed
103
        // Now let's put some data into our samples and labels objects.  We do this
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
        // by looping over a bunch of points and labeling them according to their
        // distance from the origin.
        for (double r = -20; r <= 20; r += 0.8)
        {
            for (double c = -20; c <= 20; c += 0.8)
            {
                sample_type samp;
                samp(0) = r;
                samp(1) = c;
                samples.push_back(samp);

                // if this point is less than 10 from the origin
                if (sqrt(r*r + c*c) <= 10)
                    labels.push_back(+1);
                else
                    labels.push_back(-1);

            }
122
123
        }

124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
        cout << "Generated " << samples.size() << " points" << endl;


        // Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
        // This is generally a good idea since it often heads off numerical stability problems and also 
        // prevents one large feature from smothering others.  Doing this doesn't matter much in this example
        // so I'm just doing this here so you can see an easy way to accomplish this with 
        // the library.  
        vector_normalizer<sample_type> normalizer;
        // let the normalizer learn the mean and standard deviation of the samples
        normalizer.train(samples);
        // now normalize each sample
        for (unsigned long i = 0; i < samples.size(); ++i)
            samples[i] = normalizer(samples[i]); 


        // Now that we have some data we want to train on it.  However, there are two parameters to the 
        // training.  These are the nu and gamma parameters.  Our choice for these parameters will 
        // influence how good the resulting decision function is.  To test how good a particular choice 
Davis King's avatar
Davis King committed
143
        // of these parameters is we can use the cross_validate_trainer() function to perform n-fold cross
144
145
146
        // validation on our training data.  However, there is a problem with the way we have sampled 
        // our distribution above.  The problem is that there is a definite ordering to the samples.  
        // That is, the first half of the samples look like they are from a different distribution 
147
        // than the second half.  This would screw up the cross validation process but we can 
148
149
150
151
152
        // fix it by randomizing the order of the samples with the following function call.
        randomize_samples(samples, labels);


        // The nu parameter has a maximum value that is dependent on the ratio of the +1 to -1 
153
154
155
156
157
        // labels in the training data.  This function finds that value.  The 0.999 is here because
        // the maximum allowable nu is strictly less than the value returned by maximum_nu().  So
        // rather than dealing with that below we can just back away from it a little bit here and then
        // not worry about it.
        const double max_nu = 0.999*maximum_nu(labels);
158
159
160
161



        // The first kind of model selection we will do is a simple grid search.  That is, below we just
162
        // generate a fixed grid of points (each point represents one possible setting of the model parameters)
163
164
165
166
167
168
        // and test each via cross validation.

        // This code generates a 4x4 grid of logarithmically spaced points.  The result is a matrix
        // with 2 rows and 16 columns where each column represents one of our points. 
        matrix<double> params = cartesian_product(logspace(log10(5.0), log10(1e-5), 4),  // gamma parameter
                                                  logspace(log10(max_nu), log10(1e-5), 4) // nu parameter
Davis King's avatar
Davis King committed
169
                                                  );
170
171
172
173
174
175
176
177
178
        // As an aside, if you wanted to do a grid search over points of dimensionality more than two
        // you would just nest calls to cartesian_product().  You can also use linspace() to generate 
        // linearly spaced points if that is more appropriate for the parameters you are working with.   


        // Next we loop over all the points we generated and check how good each is.
        cout << "Doing a grid search" << endl;
        matrix<double> best_result(2,1);
        best_result = 0;
179
        double best_gamma = 0.1, best_nu;
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
        for (long col = 0; col < params.nc(); ++col)
        {
            // pull out the current set of model parameters
            const double gamma = params(0, col);
            const double nu    = params(1, col);

            // setup a training object using our current parameters
            svm_nu_trainer<kernel_type> trainer;
            trainer.set_kernel(kernel_type(gamma));
            trainer.set_nu(nu);

            // Finally, do 10 fold cross validation and then check if the results are the best we have seen so far.
            matrix<double> result = cross_validate_trainer(trainer, samples, labels, 10);
            cout << "gamma: " << setw(11) << gamma << "  nu: " << setw(11) << nu <<  "  cross validation accuracy: " << result;

            // save the best results
            if (sum(result) > sum(best_result))
            {
                best_result = result;
                best_gamma = gamma;
                best_nu = nu;
            }
        }
203

204
205
        cout << "\n best result of grid search: " << sum(best_result) << endl;
        cout << " best gamma: " << best_gamma << "   best nu: " << best_nu << endl;
206
207
208



209
210
        // Grid search is a very simple brute force method.  Below we try out the BOBYQA algorithm.
        // It is a routine that performs optimization of a function in the absence of derivatives.  
211

212
        cout << "\n\n Try the BOBYQA algorithm" << endl;
213

214
215
        // We need to supply a starting point for the optimization.  Here we are using the best
        // result of the grid search.  Generally, you want to try and give a reasonable starting
Davis King's avatar
Davis King committed
216
        // point due to the possibility of the optimization getting stuck in a local maxima.  
217
218
        params.set_size(2,1);
        params = best_gamma, // initial gamma
219
                 best_nu;    // initial nu
220

221
222
        // We also need to supply lower and upper bounds for the search.  
        matrix<double> lower_bound(2,1), upper_bound(2,1);
223
224
        lower_bound = 1e-7,   // smallest allowed gamma
                      1e-7;   // smallest allowed nu
225
        upper_bound = 100,    // largest allowed gamma
226
                      max_nu; // largest allowed nu
227
228


229
230
231
232
233
234
        // For the gamma and nu SVM parameters it is generally a good idea to search
        // in log space.  So I'm just converting them into log space here before
        // we start the optimization.
        params = log(params);
        lower_bound = log(lower_bound);
        upper_bound = log(upper_bound);
235

236
237
238
239
240
241
242
243
244
245
246
247
        // Finally, ask BOBYQA to look for the best set of parameters.  Note that we are using the
        // cross validation function object defined at the top of the file.
        double best_score = find_max_bobyqa(
            cross_validation_objective(samples, labels), // Function to maximize
            params,                                      // starting point
            params.size()*2 + 1,                         // See BOBYQA docs, generally size*2+1 is a good setting for this
            lower_bound,                                 // lower bound 
            upper_bound,                                 // upper bound
            min(upper_bound-lower_bound)/10,             // search radius
            0.01,                                        // desired accuracy
            100                                          // max number of allowable calls to cross_validation_objective()
            );
248

249
250
        // Don't forget to convert back from log scale to normal scale
        params = exp(params);
251

252
253
        cout << " best result of BOBYQA: " << best_score << endl;
        cout << " best gamma: " << params(0) << "   best nu: " << params(1) << endl;
254

255
        // Also note that the find_max_bobyqa() function only works for optimization problems
256
        // with 2 variables or more.  If you only have a single variable then you should use
257
        // the find_max_single_variable() function.
258

259
260
261
262
263
    }
    catch (exception& e)
    {
        cout << e.what() << endl;
    }
264
265
}