rank_features_ex.cpp 6.74 KB
Newer Older
1
// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
Davis King's avatar
Davis King committed
2
3
/*

4
5
    This is an example illustrating the use of the rank_features() function 
    from the dlib C++ Library.  
Davis King's avatar
Davis King committed
6

7
8
9
10
    This example creates a simple set of data and then shows
    you how to use the rank_features() function to find a good 
    set of features (where "good" means the feature set will probably
    work well with a classification algorithm).
Davis King's avatar
Davis King committed
11
12
13
14
15
16

    The data used in this example will be 4 dimensional data and will
    come from a distribution where points with a distance less than 10
    from the origin are labeled +1 and all other points are labeled
    as -1.  Note that this data is conceptually 2 dimensional but we
    will add two extra features for the purpose of showing what
17
    the rank_features() function does.
Davis King's avatar
Davis King committed
18
19
20
21
*/


#include <iostream>
22
23
#include <dlib/svm.h>
#include <dlib/rand.h>
Davis King's avatar
Davis King committed
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#include <vector>

using namespace std;
using namespace dlib;


int main()
{

    // This first typedef declares a matrix with 4 rows and 1 column.  It will be the
    // object that contains each of our 4 dimensional samples.  
    typedef matrix<double, 4, 1> sample_type;



    // Now lets make some vector objects that can hold our samples 
    std::vector<sample_type> samples;
    std::vector<double> labels;

Davis King's avatar
Davis King committed
43
    dlib::rand rnd;
Davis King's avatar
Davis King committed
44

Davis King's avatar
Davis King committed
45
    for (int x = -30; x <= 30; ++x)
Davis King's avatar
Davis King committed
46
    {
Davis King's avatar
Davis King committed
47
        for (int y = -30; y <= 30; ++y)
Davis King's avatar
Davis King committed
48
49
50
51
52
53
54
55
56
57
        {
            sample_type samp;

            // the first two features are just the (x,y) position of our points and so
            // we expect them to be good features since our two classes here are points
            // close to the origin and points far away from the origin.
            samp(0) = x;
            samp(1) = y;

            // This is a worthless feature since it is just random noise.  It should
58
            // be indicated as worthless by the rank_features() function below.
Davis King's avatar
Davis King committed
59
60
61
62
63
            samp(2) = rnd.get_random_double();

            // This is a version of the y feature that is corrupted by random noise.  It
            // should be ranked as less useful than features 0, and 1, but more useful
            // than the above feature.
Davis King's avatar
Davis King committed
64
            samp(3) = y*0.2 + (rnd.get_random_double()-0.5)*10;
Davis King's avatar
Davis King committed
65
66
67
68

            // add this sample into our vector of samples.
            samples.push_back(samp);

Davis King's avatar
Davis King committed
69
            // if this point is less than 15 from the origin then label it as a +1 class point.  
Davis King's avatar
Davis King committed
70
            // otherwise it is a -1 class point
Davis King's avatar
Davis King committed
71
            if (sqrt((double)x*x + y*y) <= 15)
Davis King's avatar
Davis King committed
72
73
74
75
76
77
78
                labels.push_back(+1);
            else
                labels.push_back(-1);
        }
    }


Davis King's avatar
Davis King committed
79
    // Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
Davis King's avatar
Davis King committed
80
81
    // This is generally a good idea since it often heads off numerical stability problems and also 
    // prevents one large feature from smothering others.
82
83
    const sample_type m(mean(mat(samples)));  // compute a mean vector
    const sample_type sd(reciprocal(stddev(mat(samples)))); // compute a standard deviation vector
Davis King's avatar
Davis King committed
84
85
86
87
88
    // now normalize each sample
    for (unsigned long i = 0; i < samples.size(); ++i)
        samples[i] = pointwise_multiply(samples[i] - m, sd); 

    // This is another thing that is often good to do from a numerical stability point of view.  
89
    // However, in our case it doesn't really matter.   It's just here to show you how to do it.
Davis King's avatar
Davis King committed
90
91
92
93
    randomize_samples(samples,labels);



94
95
96
97
98
99
100
101
102
    // This is a typedef for the type of kernel we are going to use in this example.
    // In this case I have selected the radial basis kernel that can operate on our
    // 4D sample_type objects.  In general, I would suggest using the same kernel for
    // classification and feature ranking. 
    typedef radial_basis_kernel<sample_type> kernel_type;

    // The radial_basis_kernel has a parameter called gamma that we need to set.  Generally,
    // you should try the same gamma that you are using for training.  But if you don't
    // have a particular gamma in mind then you can use the following function to
103
    // find a reasonable default gamma for your data.  Another reasonable way to pick a gamma
104
105
106
    // is often to use 1.0/compute_mean_squared_distance(randomly_subsample(samples, 2000)).  
    // It computes the mean squared distance between 2000 randomly selected samples and often
    // works quite well.
107
108
109
110
111
112
113
    const double gamma = verbose_find_gamma_with_big_centroid_gap(samples, labels);

    // Next we declare an instance of the kcentroid object.  It is used by rank_features() 
    // two represent the centroids of the two classes.  The kcentroid has 3 parameters 
    // you need to set.  The first argument to the constructor is the kernel we wish to 
    // use.  The second is a parameter that determines the numerical accuracy with which 
    // the object will perform part of the ranking algorithm.  Generally, smaller values 
114
    // give better results but cause the algorithm to attempt to use more dictionary vectors 
115
    // (and thus run slower and use more memory).  The third argument, however, is the 
116
    // maximum number of dictionary vectors a kcentroid is allowed to use.  So you can use
117
118
119
120
121
122
    // it to put an upper limit on the runtime complexity.  
    kcentroid<kernel_type> kc(kernel_type(gamma), 0.001, 25);

    // And finally we get to the feature ranking. Here we call rank_features() with the kcentroid we just made,
    // the samples and labels we made above, and the number of features we want it to rank.  
    cout << rank_features(kc, samples, labels) << endl;
Davis King's avatar
Davis King committed
123
124
125

    // The output is:
    /*
126
        0 0.749265 
127
        1        1 
128
129
        3 0.933378 
        2 0.825179 
Davis King's avatar
Davis King committed
130
131
    */

132
    // The first column is a list of the features in order of decreasing goodness.  So the rank_features() function
Davis King's avatar
Davis King committed
133
134
    // is telling us that the samples[i](0) and samples[i](1) (i.e. the x and y) features are the best two.  Then
    // after that the next best feature is the samples[i](3) (i.e. the y corrupted by noise) and finally the worst
135
    // feature is the one that is just random noise.  So in this case rank_features did exactly what we would
Davis King's avatar
Davis King committed
136
137
138
    // intuitively expect.


Davis King's avatar
Davis King committed
139
140
    // The second column of the matrix is a number that indicates how much the features up to that point
    // contribute to the separation of the two classes.  So bigger numbers are better since they
141
142
    // indicate a larger separation.  The max value is always 1.  In the case below we see that the bad
    // features actually make the class separation go down.
Davis King's avatar
Davis King committed
143
144

    // So to break it down a little more.
145
146
147
148
    //    0 0.749265   <-- class separation of feature 0 all by itself
    //    1        1   <-- class separation of feature 0 and 1
    //    3 0.933378   <-- class separation of feature 0, 1, and 3
    //    2 0.825179   <-- class separation of feature 0, 1, 3, and 2
Davis King's avatar
Davis King committed
149
        
Davis King's avatar
Davis King committed
150
151
152

}