object_detection.cpp 21.1 KB
Newer Older
1
2
3
4
5
6
7
8
// Copyright (C) 2014  Davis E. King (davis@dlib.net)
// License: Boost Software License   See LICENSE.txt for the full license.

#include <dlib/python.h>
#include <dlib/matrix.h>
#include <boost/python/args.hpp>
#include <dlib/geometry.h>
#include <dlib/image_processing/frontal_face_detector.h>
9
#include "indexing.h"
Patrick Snape's avatar
Patrick Snape committed
10
#include "simple_object_detector.h"
11
#include "simple_object_detector_py.h"
Patrick Snape's avatar
Patrick Snape committed
12
#include "conversion.h"
13
14
15
16
17
18
19

using namespace dlib;
using namespace std;
using namespace boost::python;

// ----------------------------------------------------------------------------------------

Patrick Snape's avatar
Patrick Snape committed
20
21
22
23
24
25
26
27
28
string print_simple_test_results(const simple_test_results& r)
{
    std::ostringstream sout;
    sout << "precision: "<<r.precision << ", recall: "<< r.recall << ", average precision: " << r.average_precision;
    return sout.str();
}

// ----------------------------------------------------------------------------------------

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
long left(const rectangle& r) { return r.left(); }
long top(const rectangle& r) { return r.top(); }
long right(const rectangle& r) { return r.right(); }
long bottom(const rectangle& r) { return r.bottom(); }
long width(const rectangle& r) { return r.width(); }
long height(const rectangle& r) { return r.height(); }

string print_rectangle_str(const rectangle& r)
{
    std::ostringstream sout;
    sout << r;
    return sout.str();
}

string print_rectangle_repr(const rectangle& r)
{
    std::ostringstream sout;
    sout << "rectangle(" << r.left() << "," << r.top() << "," << r.right() << "," << r.bottom() << ")";
    return sout.str();
}

// ----------------------------------------------------------------------------------------

52
inline simple_object_detector_py train_simple_object_detector_on_images_py (
53
54
55
    const boost::python::list& pyimages,
    const boost::python::list& pyboxes,
    const simple_object_detector_training_options& options
56
57
58
59
60
61
62
63
64
)
{
    const unsigned long num_images = len(pyimages);
    if (num_images != len(pyboxes))
        throw dlib::error("The length of the boxes list must match the length of the images list.");

    // We never have any ignore boxes for this version of the API.
    std::vector<std::vector<rectangle> > ignore(num_images), boxes(num_images);
    dlib::array<array2d<rgb_pixel> > images(num_images);
Patrick Snape's avatar
Patrick Snape committed
65
    images_and_nested_params_to_dlib(pyimages, pyboxes, images, boxes);
66

67
    return train_simple_object_detector_on_images("", images, boxes, ignore, options);
68
69
}

70
inline simple_test_results test_simple_object_detector_with_images_py (
71
72
73
        const boost::python::list& pyimages,
        const boost::python::list& pyboxes,
        simple_object_detector& detector,
74
        const unsigned int upsampling_amount
75
76
77
78
79
80
81
82
83
)
{
    const unsigned long num_images = len(pyimages);
    if (num_images != len(pyboxes))
        throw dlib::error("The length of the boxes list must match the length of the images list.");

    // We never have any ignore boxes for this version of the API.
    std::vector<std::vector<rectangle> > ignore(num_images), boxes(num_images);
    dlib::array<array2d<rgb_pixel> > images(num_images);
Patrick Snape's avatar
Patrick Snape committed
84
    images_and_nested_params_to_dlib(pyimages, pyboxes, images, boxes);
85

86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
    return test_simple_object_detector_with_images(images, upsampling_amount, boxes, ignore, detector);
}

// ----------------------------------------------------------------------------------------

inline simple_test_results test_simple_object_detector_py_with_images_py (
        const boost::python::list& pyimages,
        const boost::python::list& pyboxes,
        simple_object_detector_py& detector,
        const int upsampling_amount
)
{
    // Allow users to pass an upsampling amount ELSE use the one cached on the object
    // Anything less than 0 is ignored and the cached value is used.
    unsigned int final_upsampling_amount = 0;
    if (upsampling_amount >= 0)
        final_upsampling_amount = upsampling_amount;
    else
        final_upsampling_amount = detector.upsampling_amount;

    return test_simple_object_detector_with_images_py(pyimages, pyboxes, detector.detector, final_upsampling_amount);
107
108
}

109
110
// ----------------------------------------------------------------------------------------

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
inline void find_candidate_object_locations_py (
    object pyimage,
    boost::python::list& pyboxes,
    boost::python::tuple pykvals,
    unsigned long min_size,
    unsigned long max_merging_iterations
)
{
    // Copy the data into dlib based objects
    array2d<rgb_pixel> image;
    if (is_gray_python_image(pyimage))
        assign_image(image, numpy_gray_image(pyimage));
    else if (is_rgb_python_image(pyimage))
        assign_image(image, numpy_rgb_image(pyimage));
    else
        throw dlib::error("Unsupported image type, must be 8bit gray or RGB image.");

    if (boost::python::len(pykvals) != 3)
        throw dlib::error("kvals must be a tuple with three elements for start, end, num.");

    double start = extract<double>(pykvals[0]);
    double end   = extract<double>(pykvals[1]);
    long num     = extract<long>(pykvals[2]);
    matrix_range_exp<double> kvals = linspace(start, end, num);

    std::vector<rectangle> rects;
137
138
139
140
141
142
    const long count = len(pyboxes);
    // Copy any rectangles in the input pyboxes into rects so that any rectangles will be
    // properly deduped in the resulting output.
    for (long i = 0; i < count; ++i)
        rects.push_back(extract<rectangle>(pyboxes[i]));
    // Find candidate objects
143
144
145
146
147
148
149
150
151
152
    find_candidate_object_locations(image, rects, kvals, min_size, max_merging_iterations);

    // Collect boxes containing candidate objects
    std::vector<rectangle>::iterator iter;
    for (iter = rects.begin(); iter != rects.end(); ++iter)
        pyboxes.append(*iter);
}

// ----------------------------------------------------------------------------------------

153
154
155
156
void bind_object_detection()
{
    using boost::python::arg;

Davis King's avatar
Davis King committed
157
158
    class_<simple_object_detector_training_options>("simple_object_detector_training_options", 
        "This object is a container for the options to the train_simple_object_detector() routine.")
159
        .add_property("be_verbose", &simple_object_detector_training_options::be_verbose, 
Davis King's avatar
Davis King committed
160
                                    &simple_object_detector_training_options::be_verbose,
161
"If true, train_simple_object_detector() will print out a lot of information to the screen while training.")
162
        .add_property("add_left_right_image_flips", &simple_object_detector_training_options::add_left_right_image_flips, 
Davis King's avatar
Davis King committed
163
164
165
                                                    &simple_object_detector_training_options::add_left_right_image_flips,
"if true, train_simple_object_detector() will assume the objects are \n\
left/right symmetric and add in left right flips of the training \n\
166
images.  This doubles the size of the training dataset.")
167
        .add_property("detection_window_size", &simple_object_detector_training_options::detection_window_size,
Davis King's avatar
Davis King committed
168
169
                                               &simple_object_detector_training_options::detection_window_size,
                                               "The sliding window used will have about this many pixels inside it.")
170
        .add_property("C", &simple_object_detector_training_options::C,
Davis King's avatar
Davis King committed
171
172
173
174
175
                           &simple_object_detector_training_options::C,
"C is the usual SVM C regularization parameter.  So it is passed to \n\
structural_object_detection_trainer::set_c().  Larger values of C \n\
will encourage the trainer to fit the data better but might lead to \n\
overfitting.  Therefore, you must determine the proper setting of \n\
176
this parameter experimentally.")
177
178
179
        .add_property("epsilon", &simple_object_detector_training_options::epsilon,
                                 &simple_object_detector_training_options::epsilon,
"epsilon is the stopping epsilon.  Smaller values make the trainer's \n\
180
solver more accurate but might take longer to train.")
181
        .add_property("num_threads", &simple_object_detector_training_options::num_threads,
Davis King's avatar
Davis King committed
182
183
184
                                     &simple_object_detector_training_options::num_threads,
"train_simple_object_detector() will use this many threads of \n\
execution.  Set this to the number of CPU cores on your machine to \n\
185
obtain the fastest training speed.");
186
187
188
189
190
191

    class_<simple_test_results>("simple_test_results")
        .add_property("precision", &simple_test_results::precision)
        .add_property("recall", &simple_test_results::recall)
        .add_property("average_precision", &simple_test_results::average_precision)
        .def("__str__", &::print_simple_test_results);
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
    {
    typedef rectangle type;
    class_<type>("rectangle", "This object represents a rectangular area of an image.")
        .def(init<long,long,long,long>( (arg("left"),arg("top"),arg("right"),arg("bottom")) ))
        .def("left",   &::left)
        .def("top",    &::top)
        .def("right",  &::right)
        .def("bottom", &::bottom)
        .def("width",  &::width)
        .def("height", &::height)
        .def("__str__", &::print_rectangle_str)
        .def("__repr__", &::print_rectangle_repr)
        .def_pickle(serialize_pickle<type>());
    }

207
    // Here, kvals is actually the result of linspace(start, end, num) and it is different from kvals used
208
    // in find_candidate_object_locations(). See dlib/image_transforms/segment_image_abstract.h for more details.
209
    def("find_candidate_object_locations", find_candidate_object_locations_py,
210
            (arg("image"), arg("rects"), arg("kvals")=boost::python::make_tuple(50, 200, 3),
211
212
213
214
             arg("min_size")=20, arg("max_merging_iterations")=50),
"Returns found candidate objects\n\
requires\n\
    - image == an image object which is a numpy ndarray\n\
215
216
217
    - len(kvals) == 3\n\
    - kvals should be a tuple that specifies the range of k values to use.  In\n\
      particular, it should take the form (start, end, num) where num > 0. \n\
218
219
220
221
222
223
224
225
226
227
ensures\n\
    - This function takes an input image and generates a set of candidate\n\
      rectangles which are expected to bound any objects in the image.  It does\n\
      this by running a version of the segment_image() routine on the image and\n\
      then reports rectangles containing each of the segments as well as rectangles\n\
      containing unions of adjacent segments.  The basic idea is described in the\n\
      paper: \n\
          Segmentation as Selective Search for Object Recognition by Koen E. A. van de Sande, et al.\n\
      Note that this function deviates from what is described in the paper slightly. \n\
      See the code for details.\n\
228
229
230
    - The basic segmentation is performed kvals[2] times, each time with the k parameter\n\
      (see segment_image() and the Felzenszwalb paper for details on k) set to a different\n\
      value from the range of numbers linearly spaced between kvals[0] to kvals[1].\n\
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
    - When doing the basic segmentations prior to any box merging, we discard all\n\
      rectangles that have an area < min_size.  Therefore, all outputs and\n\
      subsequent merged rectangles are built out of rectangles that contain at\n\
      least min_size pixels.  Note that setting min_size to a smaller value than\n\
      you might otherwise be interested in using can be useful since it allows a\n\
      larger number of possible merged boxes to be created.\n\
    - There are max_merging_iterations rounds of neighboring blob merging.\n\
      Therefore, this parameter has some effect on the number of output rectangles\n\
      you get, with larger values of the parameter giving more output rectangles.\n\
    - This function appends the output rectangles into #rects.  This means that any\n\
      rectangles in rects before this function was called will still be in there\n\
      after it terminates.  Note further that #rects will not contain any duplicate\n\
      rectangles.  That is, for all valid i and j where i != j it will be true\n\
      that:\n\
        - #rects[i] != rects[j]");

    def("get_frontal_face_detector", get_frontal_face_detector,
248
249
        "Returns the default face detector");

250
    def("train_simple_object_detector", train_simple_object_detector,
251
252
253
254
255
256
257
258
259
260
261
262
        (arg("dataset_filename"), arg("detector_output_filename"), arg("options")),
"requires \n\
    - options.C > 0 \n\
ensures \n\
    - Uses the structural_object_detection_trainer to train a \n\
      simple_object_detector based on the labeled images in the XML file \n\
      dataset_filename.  This function assumes the file dataset_filename is in the \n\
      XML format produced by dlib's save_image_dataset_metadata() routine. \n\
    - This function will apply a reasonable set of default parameters and \n\
      preprocessing techniques to the training procedure for simple_object_detector \n\
      objects.  So the point of this function is to provide you with a very easy \n\
      way to train a basic object detector.   \n\
263
    - The trained object detector is serialized to the file detector_output_filename.");
264
265

    def("train_simple_object_detector", train_simple_object_detector_on_images_py,
266
        (arg("images"), arg("boxes"), arg("options")),
267
268
269
270
271
272
273
274
275
276
277
278
"requires \n\
    - options.C > 0 \n\
    - len(images) == len(boxes) \n\
    - images should be a list of numpy matrices that represent images, either RGB or grayscale. \n\
    - boxes should be a list of lists of dlib.rectangle object. \n\
ensures \n\
    - Uses the structural_object_detection_trainer to train a \n\
      simple_object_detector based on the labeled images and bounding boxes.  \n\
    - This function will apply a reasonable set of default parameters and \n\
      preprocessing techniques to the training procedure for simple_object_detector \n\
      objects.  So the point of this function is to provide you with a very easy \n\
      way to train a basic object detector.   \n\
279
    - The trained object detector is returned.");
280
281

    def("test_simple_object_detector", test_simple_object_detector,
282
283
            // Please see test_simple_object_detector for the reason upsampling_amount is -1
            (arg("dataset_filename"), arg("detector_filename"), arg("upsampling_amount")=-1),
284
            "requires \n\
285
                - Optionally, take the number of times to upsample the testing images (upsampling_amount >= 0). \n\
286
             ensures \n\
287
288
289
290
291
292
293
294
295
296
297
                - Loads an image dataset from dataset_filename.  We assume dataset_filename is \n\
                  a file using the XML format written by save_image_dataset_metadata(). \n\
                - Loads a simple_object_detector from the file detector_filename.  This means \n\
                  detector_filename should be a file produced by the train_simple_object_detector()  \n\
                  routine. \n\
                - This function tests the detector against the dataset and returns the \n\
                  precision, recall, and average precision of the detector.  In fact, The \n\
                  return value of this function is identical to that of dlib's \n\
                  test_object_detection_function() routine.  Therefore, see the documentation \n\
                  for test_object_detection_function() for a detailed definition of these \n\
                  metrics. "
298
        );
299

300
    def("test_simple_object_detector", test_simple_object_detector_with_images_py,
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
            (arg("images"), arg("boxes"), arg("detector"), arg("upsampling_amount")=0),
            "requires \n\
               - len(images) == len(boxes) \n\
               - images should be a list of numpy matrices that represent images, either RGB or grayscale. \n\
               - boxes should be a list of lists of dlib.rectangle object. \n\
               - Optionally, take the number of times to upsample the testing images (upsampling_amount >= 0). \n\
             ensures \n\
               - Loads a simple_object_detector from the file detector_filename.  This means \n\
                 detector_filename should be a file produced by the train_simple_object_detector() \n\
                 routine. \n\
               - This function tests the detector against the dataset and returns the \n\
                 precision, recall, and average precision of the detector.  In fact, The \n\
                 return value of this function is identical to that of dlib's \n\
                 test_object_detection_function() routine.  Therefore, see the documentation \n\
                 for test_object_detection_function() for a detailed definition of these \n\
                 metrics. "
    );

    def("test_simple_object_detector", test_simple_object_detector_py_with_images_py,
            // Please see test_simple_object_detector_py_with_images_py for the reason upsampling_amount is -1
            (arg("images"), arg("boxes"), arg("detector"), arg("upsampling_amount")=-1),
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
            "requires \n\
               - len(images) == len(boxes) \n\
               - images should be a list of numpy matrices that represent images, either RGB or grayscale. \n\
               - boxes should be a list of lists of dlib.rectangle object. \n\
             ensures \n\
               - Loads a simple_object_detector from the file detector_filename.  This means \n\
                 detector_filename should be a file produced by the train_simple_object_detector() \n\
                 routine. \n\
               - This function tests the detector against the dataset and returns the \n\
                 precision, recall, and average precision of the detector.  In fact, The \n\
                 return value of this function is identical to that of dlib's \n\
                 test_object_detection_function() routine.  Therefore, see the documentation \n\
                 for test_object_detection_function() for a detailed definition of these \n\
                 metrics. "
    );
337
    {
Patrick Snape's avatar
Patrick Snape committed
338
    typedef simple_object_detector type;
339
    class_<type>("fhog_object_detector",
340
341
        "This object represents a sliding window histogram-of-oriented-gradients based object detector.")
        .def("__init__", make_constructor(&load_object_from_file<type>),  
342
343
344
"Loads an object detector from a file that contains the output of the \n\
train_simple_object_detector() routine or a serialized C++ object of type\n\
object_detector<scan_fhog_pyramid<pyramid_down<6>>>.")
Jack Culpepper's avatar
Jack Culpepper committed
345
        .def("__call__", run_detector_with_upscale2, (arg("image"), arg("upsample_num_times")=0),
346
347
348
349
350
351
352
"requires \n\
    - image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
      image. \n\
    - upsample_num_times >= 0 \n\
ensures \n\
    - This function runs the object detector on the input image and returns \n\
      a list of detections.   \n\
353
354
355
356
357
358
359
360
361
362
363
364
    - Upsamples the image upsample_num_times before running the basic \n\
      detector.  If you don't know how many times you want to upsample then \n\
      don't provide a value for upsample_num_times and an appropriate \n\
      default will be used.")
        .def("run", run_rect_detector, (arg("image"), arg("upsample_num_times")),
"requires \n\
    - image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
      image. \n\
    - upsample_num_times >= 0 \n\
ensures \n\
    - This function runs the object detector on the input image and returns \n\
      a tuple of (list of detections, list of scores, list of weight_indices).   \n\
365
366
367
    - Upsamples the image upsample_num_times before running the basic \n\
      detector.  If you don't know how many times you want to upsample then \n\
      don't provide a value for upsample_num_times and an appropriate \n\
368
      default will be used.")
369
        .def("save", save_simple_object_detector, (arg("detector_output_filename")), "Save a simple_object_detector to the provided path.")
370
371
        .def_pickle(serialize_pickle<type>());
    }
372
    {
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
    typedef simple_object_detector_py type;
    class_<type>("simple_object_detector",
        "This object represents a sliding window histogram-of-oriented-gradients based object detector.")
        .def("__init__", make_constructor(&load_object_from_file<type>),
"Loads a simple_object_detector from a file that contains the output of the \n\
train_simple_object_detector() routine.")
        .def("__call__", &type::run_detector1, (arg("image"), arg("upsample_num_times")),
"requires \n\
    - image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
      image. \n\
    - upsample_num_times >= 0 \n\
ensures \n\
    - This function runs the object detector on the input image and returns \n\
      a list of detections.   \n\
    - Upsamples the image upsample_num_times before running the basic \n\
      detector.  If you don't know how many times you want to upsample then \n\
      don't provide a value for upsample_num_times and an appropriate \n\
      default will be used.")
        .def("__call__", &type::run_detector2, (arg("image")),
"requires \n\
    - image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
      image. \n\
ensures \n\
    - This function runs the object detector on the input image and returns \n\
      a list of detections.")
        .def("save", save_simple_object_detector_py, (arg("detector_output_filename")), "Save a simple_object_detector to the provided path.")
        .def_pickle(serialize_pickle<type>());
    }
    {
402
403
404
405
406
407
408
409
410
411
    typedef std::vector<rectangle> type;
    class_<type>("rectangles", "An array of rectangle objects.")
        .def(vector_indexing_suite<type>())
        .def("clear", &type::clear)
        .def("resize", resize<type>)
        .def_pickle(serialize_pickle<type>());
    }
}

// ----------------------------------------------------------------------------------------