Commit 077a3b60 authored by Mischan Toosarani-Hausberger's avatar Mischan Toosarani-Hausberger Committed by Davis E. King
Browse files

Replace boost::python with pybind11 (#1040)

* Replace boost::python with pybind11

* Replace add_python_module with pybind11_add_module

* Fix clang error on type-dependent expression
parent c68bb4e7
......@@ -3,7 +3,6 @@
#include <dlib/python.h>
#include <dlib/matrix.h>
#include <boost/python/args.hpp>
#include <dlib/geometry.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include "simple_object_detector.h"
......@@ -12,7 +11,8 @@
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
// ----------------------------------------------------------------------------------------
......@@ -26,13 +26,13 @@ string print_simple_test_results(const simple_test_results& r)
// ----------------------------------------------------------------------------------------
inline simple_object_detector_py train_simple_object_detector_on_images_py (
const boost::python::list& pyimages,
const boost::python::list& pyboxes,
const py::list& pyimages,
const py::list& pyboxes,
const simple_object_detector_training_options& options
)
{
const unsigned long num_images = len(pyimages);
if (num_images != len(pyboxes))
const unsigned long num_images = py::len(pyimages);
if (num_images != py::len(pyboxes))
throw dlib::error("The length of the boxes list must match the length of the images list.");
// We never have any ignore boxes for this version of the API.
......@@ -44,14 +44,14 @@ inline simple_object_detector_py train_simple_object_detector_on_images_py (
}
inline simple_test_results test_simple_object_detector_with_images_py (
const boost::python::list& pyimages,
const boost::python::list& pyboxes,
const py::list& pyimages,
const py::list& pyboxes,
simple_object_detector& detector,
const unsigned int upsampling_amount
)
{
const unsigned long num_images = len(pyimages);
if (num_images != len(pyboxes))
const unsigned long num_images = py::len(pyimages);
if (num_images != py::len(pyboxes))
throw dlib::error("The length of the boxes list must match the length of the images list.");
// We never have any ignore boxes for this version of the API.
......@@ -65,8 +65,8 @@ inline simple_test_results test_simple_object_detector_with_images_py (
// ----------------------------------------------------------------------------------------
inline simple_test_results test_simple_object_detector_py_with_images_py (
const boost::python::list& pyimages,
const boost::python::list& pyboxes,
const py::list& pyimages,
const py::list& pyboxes,
simple_object_detector_py& detector,
const int upsampling_amount
)
......@@ -85,9 +85,9 @@ inline simple_test_results test_simple_object_detector_py_with_images_py (
// ----------------------------------------------------------------------------------------
inline void find_candidate_object_locations_py (
object pyimage,
boost::python::list& pyboxes,
boost::python::tuple pykvals,
py::object pyimage,
py::list& pyboxes,
py::tuple pykvals,
unsigned long min_size,
unsigned long max_merging_iterations
)
......@@ -101,20 +101,20 @@ inline void find_candidate_object_locations_py (
else
throw dlib::error("Unsupported image type, must be 8bit gray or RGB image.");
if (boost::python::len(pykvals) != 3)
if (py::len(pykvals) != 3)
throw dlib::error("kvals must be a tuple with three elements for start, end, num.");
double start = extract<double>(pykvals[0]);
double end = extract<double>(pykvals[1]);
long num = extract<long>(pykvals[2]);
double start = pykvals[0].cast<double>();
double end = pykvals[1].cast<double>();
long num = pykvals[2].cast<long>();
matrix_range_exp<double> kvals = linspace(start, end, num);
std::vector<rectangle> rects;
const long count = len(pyboxes);
const long count = py::len(pyboxes);
// Copy any rectangles in the input pyboxes into rects so that any rectangles will be
// properly deduped in the resulting output.
for (long i = 0; i < count; ++i)
rects.push_back(extract<rectangle>(pyboxes[i]));
rects.push_back(pyboxes[i].cast<rectangle>());
// Find candidate objects
find_candidate_object_locations(image, rects, kvals, min_size, max_merging_iterations);
......@@ -126,42 +126,35 @@ inline void find_candidate_object_locations_py (
// ----------------------------------------------------------------------------------------
void bind_object_detection()
void bind_object_detection(py::module& m)
{
using boost::python::arg;
{
typedef simple_object_detector_training_options type;
class_<type>("simple_object_detector_training_options",
py::class_<type>(m, "simple_object_detector_training_options",
"This object is a container for the options to the train_simple_object_detector() routine.")
.add_property("be_verbose", &type::be_verbose,
&type::be_verbose,
.def(py::init())
.def_readwrite("be_verbose", &type::be_verbose,
"If true, train_simple_object_detector() will print out a lot of information to the screen while training.")
.add_property("add_left_right_image_flips", &type::add_left_right_image_flips,
&type::add_left_right_image_flips,
.def_readwrite("add_left_right_image_flips", &type::add_left_right_image_flips,
"if true, train_simple_object_detector() will assume the objects are \n\
left/right symmetric and add in left right flips of the training \n\
images. This doubles the size of the training dataset.")
.add_property("detection_window_size", &type::detection_window_size,
&type::detection_window_size,
.def_readwrite("detection_window_size", &type::detection_window_size,
"The sliding window used will have about this many pixels inside it.")
.add_property("C", &type::C,
&type::C,
.def_readwrite("C", &type::C,
"C is the usual SVM C regularization parameter. So it is passed to \n\
structural_object_detection_trainer::set_c(). Larger values of C \n\
will encourage the trainer to fit the data better but might lead to \n\
overfitting. Therefore, you must determine the proper setting of \n\
this parameter experimentally.")
.add_property("epsilon", &type::epsilon,
&type::epsilon,
.def_readwrite("epsilon", &type::epsilon,
"epsilon is the stopping epsilon. Smaller values make the trainer's \n\
solver more accurate but might take longer to train.")
.add_property("num_threads", &type::num_threads,
&type::num_threads,
.def_readwrite("num_threads", &type::num_threads,
"train_simple_object_detector() will use this many threads of \n\
execution. Set this to the number of CPU cores on your machine to \n\
obtain the fastest training speed.")
.add_property("upsample_limit", &type::upsample_limit,
&type::upsample_limit,
.def_readwrite("upsample_limit", &type::upsample_limit,
"train_simple_object_detector() will upsample images if needed \n\
no more than upsample_limit times. Value 0 will forbid trainer to \n\
upsample any images. If trainer is unable to fit all boxes with \n\
......@@ -171,18 +164,16 @@ Values higher than 2 (default) are not recommended.");
}
{
typedef simple_test_results type;
class_<type>("simple_test_results")
.add_property("precision", &type::precision)
.add_property("recall", &type::recall)
.add_property("average_precision", &type::average_precision)
py::class_<type>(m, "simple_test_results")
.def_readwrite("precision", &type::precision)
.def_readwrite("recall", &type::recall)
.def_readwrite("average_precision", &type::average_precision)
.def("__str__", &::print_simple_test_results);
}
// Here, kvals is actually the result of linspace(start, end, num) and it is different from kvals used
// in find_candidate_object_locations(). See dlib/image_transforms/segment_image_abstract.h for more details.
def("find_candidate_object_locations", find_candidate_object_locations_py,
(arg("image"), arg("rects"), arg("kvals")=boost::python::make_tuple(50, 200, 3),
arg("min_size")=20, arg("max_merging_iterations")=50),
m.def("find_candidate_object_locations", find_candidate_object_locations_py, py::arg("image"), py::arg("rects"), py::arg("kvals")=py::make_tuple(50, 200, 3), py::arg("min_size")=20, py::arg("max_merging_iterations")=50,
"Returns found candidate objects\n\
requires\n\
- image == an image object which is a numpy ndarray\n\
......@@ -218,11 +209,11 @@ ensures\n\
that:\n\
- #rects[i] != rects[j]");
def("get_frontal_face_detector", get_frontal_face_detector,
m.def("get_frontal_face_detector", get_frontal_face_detector,
"Returns the default face detector");
def("train_simple_object_detector", train_simple_object_detector,
(arg("dataset_filename"), arg("detector_output_filename"), arg("options")),
m.def("train_simple_object_detector", train_simple_object_detector,
py::arg("dataset_filename"), py::arg("detector_output_filename"), py::arg("options"),
"requires \n\
- options.C > 0 \n\
ensures \n\
......@@ -236,8 +227,8 @@ ensures \n\
way to train a basic object detector. \n\
- The trained object detector is serialized to the file detector_output_filename.");
def("train_simple_object_detector", train_simple_object_detector_on_images_py,
(arg("images"), arg("boxes"), arg("options")),
m.def("train_simple_object_detector", train_simple_object_detector_on_images_py,
py::arg("images"), py::arg("boxes"), py::arg("options"),
"requires \n\
- options.C > 0 \n\
- len(images) == len(boxes) \n\
......@@ -252,9 +243,9 @@ ensures \n\
way to train a basic object detector. \n\
- The trained object detector is returned.");
def("test_simple_object_detector", test_simple_object_detector,
m.def("test_simple_object_detector", test_simple_object_detector,
// Please see test_simple_object_detector for the reason upsampling_amount is -1
(arg("dataset_filename"), arg("detector_filename"), arg("upsampling_amount")=-1),
py::arg("dataset_filename"), py::arg("detector_filename"), py::arg("upsampling_amount")=-1,
"requires \n\
- Optionally, take the number of times to upsample the testing images (upsampling_amount >= 0). \n\
ensures \n\
......@@ -271,8 +262,8 @@ ensures \n\
metrics. "
);
def("test_simple_object_detector", test_simple_object_detector_with_images_py,
(arg("images"), arg("boxes"), arg("detector"), arg("upsampling_amount")=0),
m.def("test_simple_object_detector", test_simple_object_detector_with_images_py,
py::arg("images"), py::arg("boxes"), py::arg("detector"), py::arg("upsampling_amount")=0,
"requires \n\
- len(images) == len(boxes) \n\
- images should be a list of numpy matrices that represent images, either RGB or grayscale. \n\
......@@ -290,9 +281,9 @@ ensures \n\
metrics. "
);
def("test_simple_object_detector", test_simple_object_detector_py_with_images_py,
m.def("test_simple_object_detector", test_simple_object_detector_py_with_images_py,
// Please see test_simple_object_detector_py_with_images_py for the reason upsampling_amount is -1
(arg("images"), arg("boxes"), arg("detector"), arg("upsampling_amount")=-1),
py::arg("images"), py::arg("boxes"), py::arg("detector"), py::arg("upsampling_amount")=-1,
"requires \n\
- len(images) == len(boxes) \n\
- images should be a list of numpy matrices that represent images, either RGB or grayscale. \n\
......@@ -310,13 +301,13 @@ ensures \n\
);
{
typedef simple_object_detector type;
class_<type>("fhog_object_detector",
py::class_<type, std::shared_ptr<type>>(m, "fhog_object_detector",
"This object represents a sliding window histogram-of-oriented-gradients based object detector.")
.def("__init__", make_constructor(&load_object_from_file<type>),
.def(py::init(&load_object_from_file<type>),
"Loads an object detector from a file that contains the output of the \n\
train_simple_object_detector() routine or a serialized C++ object of type\n\
object_detector<scan_fhog_pyramid<pyramid_down<6>>>.")
.def("__call__", run_detector_with_upscale2, (arg("image"), arg("upsample_num_times")=0),
.def("__call__", run_detector_with_upscale2, py::arg("image"), py::arg("upsample_num_times")=0,
"requires \n\
- image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
image. \n\
......@@ -326,7 +317,7 @@ ensures \n\
a list of detections. \n\
- Upsamples the image upsample_num_times before running the basic \n\
detector.")
.def("run", run_rect_detector, (arg("image"), arg("upsample_num_times")=0, arg("adjust_threshold")=0.0),
.def("run", run_rect_detector, py::arg("image"), py::arg("upsample_num_times")=0, py::arg("adjust_threshold")=0.0,
"requires \n\
- image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
image. \n\
......@@ -336,7 +327,7 @@ ensures \n\
a tuple of (list of detections, list of scores, list of weight_indices). \n\
- Upsamples the image upsample_num_times before running the basic \n\
detector.")
.def("run_multiple", run_multiple_rect_detectors,(arg("detectors"), arg("image"), arg("upsample_num_times")=0, arg("adjust_threshold")=0.0),
.def_static("run_multiple", run_multiple_rect_detectors, py::arg("detectors"), py::arg("image"), py::arg("upsample_num_times")=0, py::arg("adjust_threshold")=0.0,
"requires \n\
- detectors is a list of detectors. \n\
- image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
......@@ -347,18 +338,17 @@ ensures \n\
a tuple of (list of detections, list of scores, list of weight_indices). \n\
- Upsamples the image upsample_num_times before running the basic \n\
detector.")
.staticmethod("run_multiple")
.def("save", save_simple_object_detector, (arg("detector_output_filename")), "Save a simple_object_detector to the provided path.")
.def_pickle(serialize_pickle<type>());
.def("save", save_simple_object_detector, py::arg("detector_output_filename"), "Save a simple_object_detector to the provided path.")
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
typedef simple_object_detector_py type;
class_<type>("simple_object_detector",
py::class_<type, std::shared_ptr<type>>(m, "simple_object_detector",
"This object represents a sliding window histogram-of-oriented-gradients based object detector.")
.def("__init__", make_constructor(&load_object_from_file<type>),
.def(py::init(&load_object_from_file<type>),
"Loads a simple_object_detector from a file that contains the output of the \n\
train_simple_object_detector() routine.")
.def("__call__", &type::run_detector1, (arg("image"), arg("upsample_num_times"), arg("adjust_threshold")=0.0),
.def("__call__", &type::run_detector1, py::arg("image"), py::arg("upsample_num_times"),
"requires \n\
- image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
image. \n\
......@@ -370,15 +360,15 @@ ensures \n\
detector. If you don't know how many times you want to upsample then \n\
don't provide a value for upsample_num_times and an appropriate \n\
default will be used.")
.def("__call__", &type::run_detector2, (arg("image")),
.def("__call__", &type::run_detector2, py::arg("image"),
"requires \n\
- image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
image. \n\
ensures \n\
- This function runs the object detector on the input image and returns \n\
a list of detections.")
.def("save", save_simple_object_detector_py, (arg("detector_output_filename")), "Save a simple_object_detector to the provided path.")
.def_pickle(serialize_pickle<type>());
.def("save", save_simple_object_detector_py, py::arg("detector_output_filename"), "Save a simple_object_detector to the provided path.")
.def(py::pickle(&getstate<type>, &setstate<type>));
}
}
......
......@@ -2,17 +2,15 @@
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/python.h>
#include <boost/shared_ptr.hpp>
#include <dlib/matrix.h>
#include <dlib/data_io.h>
#include <dlib/sparse_vector.h>
#include <boost/python/args.hpp>
#include <dlib/optimization.h>
#include <dlib/statistics/running_gradient.h>
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
typedef std::vector<std::pair<unsigned long,double> > sparse_vect;
......@@ -32,14 +30,14 @@ void _make_sparse_vector2 (
make_sparse_vector_inplace(v[i]);
}
boost::python::tuple _load_libsvm_formatted_data(
py::tuple _load_libsvm_formatted_data(
const std::string& file_name
)
{
std::vector<sparse_vect> samples;
std::vector<double> labels;
load_libsvm_formatted_data(file_name, samples, labels);
return boost::python::make_tuple(samples, labels);
return py::make_tuple(samples, labels);
}
void _save_libsvm_formatted_data (
......@@ -54,7 +52,7 @@ void _save_libsvm_formatted_data (
// ----------------------------------------------------------------------------------------
boost::python::list _max_cost_assignment (
py::list _max_cost_assignment (
const matrix<double>& cost
)
{
......@@ -70,7 +68,7 @@ boost::python::list _max_cost_assignment (
double _assignment_cost (
const matrix<double>& cost,
const boost::python::list& assignment
const py::list& assignment
)
{
return assignment_cost(cost, python_list_to_vector<long>(assignment));
......@@ -79,7 +77,7 @@ double _assignment_cost (
// ----------------------------------------------------------------------------------------
size_t py_count_steps_without_decrease (
boost::python::object arr,
py::object arr,
double probability_of_decrease
)
{
......@@ -90,7 +88,7 @@ size_t py_count_steps_without_decrease (
// ----------------------------------------------------------------------------------------
size_t py_count_steps_without_decrease_robust (
boost::python::object arr,
py::object arr,
double probability_of_decrease,
double quantile_discard
)
......@@ -110,11 +108,9 @@ void hit_enter_to_continue()
// ----------------------------------------------------------------------------------------
void bind_other()
void bind_other(py::module &m)
{
using boost::python::arg;
def("max_cost_assignment", _max_cost_assignment, (arg("cost")),
m.def("max_cost_assignment", _max_cost_assignment, py::arg("cost"),
"requires \n\
- cost.nr() == cost.nc() \n\
(i.e. the input must be a square matrix) \n\
......@@ -135,7 +131,7 @@ ensures \n\
of the largest to the smallest value in cost is no more than about 1e16. "
);
def("assignment_cost", _assignment_cost, (arg("cost"),arg("assignment")),
m.def("assignment_cost", _assignment_cost, py::arg("cost"),py::arg("assignment"),
"requires \n\
- cost.nr() == cost.nc() \n\
(i.e. the input must be a square matrix) \n\
......@@ -151,7 +147,7 @@ ensures \n\
sum over i: cost[i][assignment[i]] "
);
def("make_sparse_vector", _make_sparse_vector ,
m.def("make_sparse_vector", _make_sparse_vector ,
"This function modifies its argument so that it is a properly sorted sparse vector. \n\
This means that the elements of the sparse vector will be ordered so that pairs \n\
with smaller indices come first. Additionally, there won't be any pairs with \n\
......@@ -159,10 +155,10 @@ identical indices. If such pairs were present in the input sparse vector then
their values will be added together and only one pair with their index will be \n\
present in the output. "
);
def("make_sparse_vector", _make_sparse_vector2 ,
m.def("make_sparse_vector", _make_sparse_vector2 ,
"This function modifies a sparse_vectors object so that all elements it contains are properly sorted sparse vectors.");
def("load_libsvm_formatted_data",_load_libsvm_formatted_data, (arg("file_name")),
m.def("load_libsvm_formatted_data",_load_libsvm_formatted_data, py::arg("file_name"),
"ensures \n\
- Attempts to read a file of the given name that should contain libsvm \n\
formatted data. The data is returned as a tuple where the first tuple \n\
......@@ -170,20 +166,20 @@ present in the output. "
labels. "
);
def("save_libsvm_formatted_data",_save_libsvm_formatted_data, (arg("file_name"), arg("samples"), arg("labels")),
m.def("save_libsvm_formatted_data",_save_libsvm_formatted_data, py::arg("file_name"), py::arg("samples"), py::arg("labels"),
"requires \n\
- len(samples) == len(labels) \n\
ensures \n\
- saves the data to the given file in libsvm format "
);
def("hit_enter_to_continue", hit_enter_to_continue,
m.def("hit_enter_to_continue", hit_enter_to_continue,
"Asks the user to hit enter to continue and pauses until they do so.");
def("count_steps_without_decrease",py_count_steps_without_decrease, (arg("time_series"), arg("probability_of_decrease")=0.51),
m.def("count_steps_without_decrease",py_count_steps_without_decrease, py::arg("time_series"), py::arg("probability_of_decrease")=0.51,
"requires \n\
- time_series must be a one dimensional array of real numbers. \n\
- 0.5 < probability_of_decrease < 1 \n\
......@@ -230,7 +226,7 @@ ensures \n\
!*/
);
def("count_steps_without_decrease_robust",py_count_steps_without_decrease_robust, (arg("time_series"), arg("probability_of_decrease")=0.51, arg("quantile_discard")=0.1),
m.def("count_steps_without_decrease_robust",py_count_steps_without_decrease_robust, py::arg("time_series"), py::arg("probability_of_decrease")=0.51, py::arg("quantile_discard")=0.1,
"requires \n\
- time_series must be a one dimensional array of real numbers. \n\
- 0.5 < probability_of_decrease < 1 \n\
......
......@@ -2,13 +2,16 @@
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/python.h>
#include <boost/python/args.hpp>
#include <dlib/geometry.h>
#include <pybind11/stl_bind.h>
#include "indexing.h"
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
PYBIND11_MAKE_OPAQUE(std::vector<rectangle>);
// ----------------------------------------------------------------------------------------
......@@ -67,13 +70,12 @@ string print_rectangle_repr(const rect_type& r)
// ----------------------------------------------------------------------------------------
void bind_rectangles()
void bind_rectangles(py::module& m)
{
using boost::python::arg;
{
typedef rectangle type;
class_<type>("rectangle", "This object represents a rectangular area of an image.")
.def(init<long,long,long,long>( (arg("left"),arg("top"),arg("right"),arg("bottom")) ))
py::class_<type>(m, "rectangle", "This object represents a rectangular area of an image.")
.def(py::init<long,long,long,long>(), py::arg("left"),py::arg("top"),py::arg("right"),py::arg("bottom"))
.def("area", &::area)
.def("left", &::left)
.def("top", &::top)
......@@ -84,20 +86,20 @@ void bind_rectangles()
.def("is_empty", &::is_empty<type>)
.def("center", &::center<type>)
.def("dcenter", &::dcenter<type>)
.def("contains", &::contains<type>, arg("point"))
.def("contains", &::contains_xy<type>, (arg("x"), arg("y")))
.def("contains", &::contains_rec<type>, (arg("rectangle")))
.def("intersect", &::intersect<type>, (arg("rectangle")))
.def("contains", &::contains<type>, py::arg("point"))
.def("contains", &::contains_xy<type>, py::arg("x"), py::arg("y"))
.def("contains", &::contains_rec<type>, py::arg("rectangle"))
.def("intersect", &::intersect<type>, py::arg("rectangle"))
.def("__str__", &::print_rectangle_str<type>)
.def("__repr__", &::print_rectangle_repr<type>)
.def(self == self)
.def(self != self)
.def_pickle(serialize_pickle<type>());
.def(py::self == py::self)
.def(py::self != py::self)
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
typedef drectangle type;
class_<type>("drectangle", "This object represents a rectangular area of an image with floating point coordinates.")
.def(init<double,double,double,double>( (arg("left"),arg("top"),arg("right"),arg("bottom")) ))
py::class_<type>(m, "drectangle", "This object represents a rectangular area of an image with floating point coordinates.")
.def(py::init<double,double,double,double>(), py::arg("left"), py::arg("top"), py::arg("right"), py::arg("bottom"))
.def("area", &::darea)
.def("left", &::dleft)
.def("top", &::dtop)
......@@ -108,23 +110,23 @@ void bind_rectangles()
.def("is_empty", &::is_empty<type>)
.def("center", &::center<type>)
.def("dcenter", &::dcenter<type>)
.def("contains", &::contains<type>, arg("point"))
.def("contains", &::contains_xy<type>, (arg("x"), arg("y")))
.def("contains", &::contains_rec<type>, (arg("rectangle")))
.def("intersect", &::intersect<type>, (arg("rectangle")))
.def("contains", &::contains<type>, py::arg("point"))
.def("contains", &::contains_xy<type>, py::arg("x"), py::arg("y"))
.def("contains", &::contains_rec<type>, py::arg("rectangle"))
.def("intersect", &::intersect<type>, py::arg("rectangle"))
.def("__str__", &::print_rectangle_str<type>)
.def("__repr__", &::print_rectangle_repr<type>)
.def(self == self)
.def(self != self)
.def_pickle(serialize_pickle<type>());
.def(py::self == py::self)
.def(py::self != py::self)
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
typedef std::vector<rectangle> type;
class_<type>("rectangles", "An array of rectangle objects.")
.def(vector_indexing_suite<type>())
py::bind_vector<type>(m, "rectangles", "An array of rectangle objects.")
.def("clear", &type::clear)
.def("resize", resize<type>)
.def_pickle(serialize_pickle<type>());
.def("extend", extend_vector_with_python_list<rectangle>)
.def(py::pickle(&getstate<type>, &setstate<type>));
}
}
......
......@@ -2,15 +2,12 @@
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/python.h>
#include <boost/shared_ptr.hpp>
#include <dlib/matrix.h>
#include <dlib/svm_threaded.h>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include <boost/python/args.hpp>
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
typedef matrix<double,0,1> dense_vect;
typedef std::vector<std::pair<unsigned long,double> > sparse_vect;
......@@ -776,11 +773,12 @@ const segmenter_test cross_validate_sequence_segmenter2 (
// ----------------------------------------------------------------------------------------
void bind_sequence_segmenter()
void bind_sequence_segmenter(py::module& m)
{
class_<segmenter_params>("segmenter_params",
py::class_<segmenter_params>(m, "segmenter_params",
"This class is used to define all the optional parameters to the \n\
train_sequence_segmenter() and cross_validate_sequence_segmenter() routines. ")
.def(py::init<>())
.def_readwrite("use_BIO_model", &segmenter_params::use_BIO_model)
.def_readwrite("use_high_order_features", &segmenter_params::use_high_order_features)
.def_readwrite("allow_negative_weights", &segmenter_params::allow_negative_weights)
......@@ -792,36 +790,35 @@ train_sequence_segmenter() and cross_validate_sequence_segmenter() routines. "
.def_readwrite("be_verbose", &segmenter_params::be_verbose)
.def("__repr__",&segmenter_params__repr__)
.def("__str__",&segmenter_params__str__)
.def_pickle(serialize_pickle<segmenter_params>());
.def(py::pickle(&getstate<segmenter_params>, &setstate<segmenter_params>));
class_<segmenter_type> ("segmenter_type", "This object represents a sequence segmenter and is the type of object "
py::class_<segmenter_type> (m, "segmenter_type", "This object represents a sequence segmenter and is the type of object "
"returned by the dlib.train_sequence_segmenter() routine.")
.def("__call__", &segmenter_type::segment_sequence_dense)
.def("__call__", &segmenter_type::segment_sequence_sparse)
.def_readonly("weights", &segmenter_type::get_weights)
.def_pickle(serialize_pickle<segmenter_type>());
.def_property_readonly("weights", &segmenter_type::get_weights)
.def(py::pickle(&getstate<segmenter_type>, &setstate<segmenter_type>));
class_<segmenter_test> ("segmenter_test", "This object is the output of the dlib.test_sequence_segmenter() and "
py::class_<segmenter_test> (m, "segmenter_test", "This object is the output of the dlib.test_sequence_segmenter() and "
"dlib.cross_validate_sequence_segmenter() routines.")
.def_readwrite("precision", &segmenter_test::precision)
.def_readwrite("recall", &segmenter_test::recall)
.def_readwrite("f1", &segmenter_test::f1)
.def("__repr__",&segmenter_test__repr__)
.def("__str__",&segmenter_test__str__)
.def_pickle(serialize_pickle<segmenter_test>());
.def(py::pickle(&getstate<segmenter_test>, &setstate<segmenter_test>));
using boost::python::arg;
def("train_sequence_segmenter", train_dense, (arg("samples"), arg("segments"), arg("params")=segmenter_params()));
def("train_sequence_segmenter", train_sparse, (arg("samples"), arg("segments"), arg("params")=segmenter_params()));
m.def("train_sequence_segmenter", train_dense, py::arg("samples"), py::arg("segments"), py::arg("params")=segmenter_params());
m.def("train_sequence_segmenter", train_sparse, py::arg("samples"), py::arg("segments"), py::arg("params")=segmenter_params());
def("test_sequence_segmenter", test_sequence_segmenter1);
def("test_sequence_segmenter", test_sequence_segmenter2);
m.def("test_sequence_segmenter", test_sequence_segmenter1);
m.def("test_sequence_segmenter", test_sequence_segmenter2);
def("cross_validate_sequence_segmenter", cross_validate_sequence_segmenter1,
(arg("samples"), arg("segments"), arg("folds"), arg("params")=segmenter_params()));
def("cross_validate_sequence_segmenter", cross_validate_sequence_segmenter2,
(arg("samples"), arg("segments"), arg("folds"), arg("params")=segmenter_params()));
m.def("cross_validate_sequence_segmenter", cross_validate_sequence_segmenter1,
py::arg("samples"), py::arg("segments"), py::arg("folds"), py::arg("params")=segmenter_params());
m.def("cross_validate_sequence_segmenter", cross_validate_sequence_segmenter2,
py::arg("samples"), py::arg("segments"), py::arg("folds"), py::arg("params")=segmenter_params());
}
......
......@@ -3,24 +3,24 @@
#include <dlib/python.h>
#include <dlib/geometry.h>
#include <boost/python/args.hpp>
#include <dlib/image_processing.h>
#include "shape_predictor.h"
#include "conversion.h"
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
// ----------------------------------------------------------------------------------------
full_object_detection run_predictor (
shape_predictor& predictor,
object img,
object rect
py::object img,
py::object rect
)
{
rectangle box = extract<rectangle>(rect);
rectangle box = rect.cast<rectangle>();
if (is_gray_python_image(img))
{
return predictor(numpy_gray_image(img), box);
......@@ -54,7 +54,7 @@ point full_obj_det_part (const full_object_detection& detection, const unsigned
if (idx >= detection.num_parts())
{
PyErr_SetString(PyExc_IndexError, "Index out of range");
boost::python::throw_error_already_set();
throw py::error_already_set();
}
return detection.part(idx);
}
......@@ -68,28 +68,31 @@ std::vector<point> full_obj_det_parts (const full_object_detection& detection)
return parts;
}
boost::shared_ptr<full_object_detection> full_obj_det_init(object& pyrect, object& pyparts)
std::shared_ptr<full_object_detection> full_obj_det_init(py::object& pyrect, py::object& pyparts)
{
const unsigned long num_parts = len(pyparts);
const unsigned long num_parts = py::len(pyparts);
std::vector<point> parts(num_parts);
rectangle rect = extract<rectangle>(pyrect);
rectangle rect = pyrect.cast<rectangle>();
py::iterator parts_it = pyparts.begin();
for (unsigned long j = 0; j < num_parts; ++j)
parts[j] = extract<point>(pyparts[j]);
for (unsigned long j = 0;
parts_it != pyparts.end();
++j, ++parts_it)
parts[j] = parts_it->cast<point>();
return boost::shared_ptr<full_object_detection>(new full_object_detection(rect, parts));
return std::make_shared<full_object_detection>(rect, parts);
}
// ----------------------------------------------------------------------------------------
inline shape_predictor train_shape_predictor_on_images_py (
const boost::python::list& pyimages,
const boost::python::list& pydetections,
const py::list& pyimages,
const py::list& pydetections,
const shape_predictor_training_options& options
)
{
const unsigned long num_images = len(pyimages);
if (num_images != len(pydetections))
const unsigned long num_images = py::len(pyimages);
if (num_images != py::len(pydetections))
throw dlib::error("The length of the detections list must match the length of the images list.");
std::vector<std::vector<full_object_detection> > detections(num_images);
......@@ -101,15 +104,15 @@ inline shape_predictor train_shape_predictor_on_images_py (
inline double test_shape_predictor_with_images_py (
const boost::python::list& pyimages,
const boost::python::list& pydetections,
const boost::python::list& pyscales,
const py::list& pyimages,
const py::list& pydetections,
const py::list& pyscales,
const shape_predictor& predictor
)
{
const unsigned long num_images = len(pyimages);
const unsigned long num_scales = len(pyscales);
if (num_images != len(pydetections))
const unsigned long num_images = py::len(pyimages);
const unsigned long num_scales = py::len(pyscales);
if (num_images != py::len(pydetections))
throw dlib::error("The length of the detections list must match the length of the images list.");
if (num_scales > 0 && num_scales != num_images)
......@@ -124,17 +127,21 @@ inline double test_shape_predictor_with_images_py (
// Now copy the data into dlib based objects so we can call the testing routine.
for (unsigned long i = 0; i < num_images; ++i)
{
const unsigned long num_boxes = len(pydetections[i]);
for (unsigned long j = 0; j < num_boxes; ++j)
detections[i].push_back(extract<full_object_detection>(pydetections[i][j]));
const unsigned long num_boxes = py::len(pydetections[i]);
for (py::iterator det_it = pydetections[i].begin();
det_it != pydetections[i].end();
++det_it)
detections[i].push_back(det_it->cast<full_object_detection>());
pyimage_to_dlib_image(pyimages[i], images[i]);
if (num_scales > 0)
{
if (num_boxes != len(pyscales[i]))
if (num_boxes != py::len(pyscales[i]))
throw dlib::error("The length of the scales list must match the length of the detections list.");
for (unsigned long j = 0; j < num_boxes; ++j)
scales[i].push_back(extract<double>(pyscales[i][j]));
for (py::iterator scale_it = pyscales[i].begin();
scale_it != pyscales[i].end();
++scale_it)
scales[i].push_back(scale_it->cast<double>());
}
}
......@@ -142,90 +149,80 @@ inline double test_shape_predictor_with_images_py (
}
inline double test_shape_predictor_with_images_no_scales_py (
const boost::python::list& pyimages,
const boost::python::list& pydetections,
const py::list& pyimages,
const py::list& pydetections,
const shape_predictor& predictor
)
{
boost::python::list pyscales;
py::list pyscales;
return test_shape_predictor_with_images_py(pyimages, pydetections, pyscales, predictor);
}
// ----------------------------------------------------------------------------------------
void bind_shape_predictors()
void bind_shape_predictors(py::module &m)
{
using boost::python::arg;
{
typedef full_object_detection type;
class_<type>("full_object_detection",
py::class_<type, std::shared_ptr<type>>(m, "full_object_detection",
"This object represents the location of an object in an image along with the \
positions of each of its constituent parts.")
.def("__init__", make_constructor(&full_obj_det_init),
.def(py::init(&full_obj_det_init),
"requires \n\
- rect: dlib rectangle \n\
- parts: list of dlib points")
.add_property("rect", &full_obj_det_get_rect, "Bounding box from the underlying detector. Parts can be outside box if appropriate.")
.add_property("num_parts", &full_obj_det_num_parts, "The number of parts of the object.")
.def("part", &full_obj_det_part, (arg("idx")), "A single part of the object as a dlib point.")
.def_property_readonly("rect", &full_obj_det_get_rect, "Bounding box from the underlying detector. Parts can be outside box if appropriate.")
.def_property_readonly("num_parts", &full_obj_det_num_parts, "The number of parts of the object.")
.def("part", &full_obj_det_part, py::arg("idx"), "A single part of the object as a dlib point.")
.def("parts", &full_obj_det_parts, "A vector of dlib points representing all of the parts.")
.def_pickle(serialize_pickle<type>());
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
typedef shape_predictor_training_options type;
class_<type>("shape_predictor_training_options",
py::class_<type>(m, "shape_predictor_training_options",
"This object is a container for the options to the train_shape_predictor() routine.")
.add_property("be_verbose", &type::be_verbose,
&type::be_verbose,
.def(py::init())
.def_readwrite("be_verbose", &type::be_verbose,
"If true, train_shape_predictor() will print out a lot of information to stdout while training.")
.add_property("cascade_depth", &type::cascade_depth,
&type::cascade_depth,
.def_readwrite("cascade_depth", &type::cascade_depth,
"The number of cascades created to train the model with.")
.add_property("tree_depth", &type::tree_depth,
&type::tree_depth,
.def_readwrite("tree_depth", &type::tree_depth,
"The depth of the trees used in each cascade. There are pow(2, get_tree_depth()) leaves in each tree")
.add_property("num_trees_per_cascade_level", &type::num_trees_per_cascade_level,
&type::num_trees_per_cascade_level,
.def_readwrite("num_trees_per_cascade_level", &type::num_trees_per_cascade_level,
"The number of trees created for each cascade.")
.add_property("nu", &type::nu,
&type::nu,
.def_readwrite("nu", &type::nu,
"The regularization parameter. Larger values of this parameter \
will cause the algorithm to fit the training data better but may also \
cause overfitting. The value must be in the range (0, 1].")
.add_property("oversampling_amount", &type::oversampling_amount,
&type::oversampling_amount,
.def_readwrite("oversampling_amount", &type::oversampling_amount,
"The number of randomly selected initial starting points sampled for each training example")
.add_property("feature_pool_size", &type::feature_pool_size,
&type::feature_pool_size,
.def_readwrite("feature_pool_size", &type::feature_pool_size,
"Number of pixels used to generate features for the random trees.")
.add_property("lambda_param", &type::lambda_param,
&type::lambda_param,
.def_readwrite("lambda_param", &type::lambda_param,
"Controls how tight the feature sampling should be. Lower values enforce closer features.")
.add_property("num_test_splits", &type::num_test_splits,
&type::num_test_splits,
.def_readwrite("num_test_splits", &type::num_test_splits,
"Number of split features at each node to sample. The one that gives the best split is chosen.")
.add_property("feature_pool_region_padding", &type::feature_pool_region_padding,
&type::feature_pool_region_padding,
.def_readwrite("feature_pool_region_padding", &type::feature_pool_region_padding,
"Size of region within which to sample features for the feature pool, \
e.g a padding of 0.5 would cause the algorithm to sample pixels from a box that was 2x2 pixels")
.add_property("random_seed", &type::random_seed,
&type::random_seed,
.def_readwrite("random_seed", &type::random_seed,
"The random seed used by the internal random number generator")
.def("__str__", &::print_shape_predictor_training_options)
.def_pickle(serialize_pickle<type>());
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
typedef shape_predictor type;
class_<type>("shape_predictor",
py::class_<type, std::shared_ptr<type>>(m, "shape_predictor",
"This object is a tool that takes in an image region containing some object and \
outputs a set of point locations that define the pose of the object. The classic \
example of this is human face pose prediction, where you take an image of a human \
face as input and are expected to identify the locations of important facial \
landmarks such as the corners of the mouth and eyes, tip of the nose, and so forth.")
.def("__init__", make_constructor(&load_object_from_file<type>),
.def(py::init())
.def(py::init(&load_object_from_file<type>),
"Loads a shape_predictor from a file that contains the output of the \n\
train_shape_predictor() routine.")
.def("__call__", &run_predictor, (arg("image"), arg("box")),
.def("__call__", &run_predictor, py::arg("image"), py::arg("box"),
"requires \n\
- image is a numpy ndarray containing either an 8bit grayscale or RGB \n\
image. \n\
......@@ -233,12 +230,12 @@ train_shape_predictor() routine.")
ensures \n\
- This function runs the shape predictor on the input image and returns \n\
a single full_object_detection.")
.def("save", save_shape_predictor, (arg("predictor_output_filename")), "Save a shape_predictor to the provided path.")
.def_pickle(serialize_pickle<type>());
.def("save", save_shape_predictor, py::arg("predictor_output_filename"), "Save a shape_predictor to the provided path.")
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
def("train_shape_predictor", train_shape_predictor_on_images_py,
(arg("images"), arg("object_detections"), arg("options")),
m.def("train_shape_predictor", train_shape_predictor_on_images_py,
py::arg("images"), py::arg("object_detections"), py::arg("options"),
"requires \n\
- options.lambda_param > 0 \n\
- 0 < options.nu <= 1 \n\
......@@ -252,8 +249,8 @@ ensures \n\
shape_predictor based on the provided labeled images, full_object_detections, and options.\n\
- The trained shape_predictor is returned");
def("train_shape_predictor", train_shape_predictor,
(arg("dataset_filename"), arg("predictor_output_filename"), arg("options")),
m.def("train_shape_predictor", train_shape_predictor,
py::arg("dataset_filename"), py::arg("predictor_output_filename"), py::arg("options"),
"requires \n\
- options.lambda_param > 0 \n\
- 0 < options.nu <= 1 \n\
......@@ -265,8 +262,8 @@ ensures \n\
XML format produced by dlib's save_image_dataset_metadata() routine. \n\
- The trained shape predictor is serialized to the file predictor_output_filename.");
def("test_shape_predictor", test_shape_predictor_py,
(arg("dataset_filename"), arg("predictor_filename")),
m.def("test_shape_predictor", test_shape_predictor_py,
py::arg("dataset_filename"), py::arg("predictor_filename"),
"ensures \n\
- Loads an image dataset from dataset_filename. We assume dataset_filename is \n\
a file using the XML format written by save_image_dataset_metadata(). \n\
......@@ -279,8 +276,8 @@ ensures \n\
shape_predictor_trainer() routine. Therefore, see the documentation \n\
for shape_predictor_trainer() for a detailed definition of the mean average error.");
def("test_shape_predictor", test_shape_predictor_with_images_no_scales_py,
(arg("images"), arg("detections"), arg("shape_predictor")),
m.def("test_shape_predictor", test_shape_predictor_with_images_no_scales_py,
py::arg("images"), py::arg("detections"), py::arg("shape_predictor"),
"requires \n\
- len(images) == len(object_detections) \n\
- images should be a list of numpy matrices that represent images, either RGB or grayscale. \n\
......@@ -296,8 +293,8 @@ ensures \n\
for shape_predictor_trainer() for a detailed definition of the mean average error.");
def("test_shape_predictor", test_shape_predictor_with_images_py,
(arg("images"), arg("detections"), arg("scales"), arg("shape_predictor")),
m.def("test_shape_predictor", test_shape_predictor_with_images_py,
py::arg("images"), py::arg("detections"), py::arg("scales"), py::arg("shape_predictor"),
"requires \n\
- len(images) == len(object_detections) \n\
- len(object_detections) == len(scales) \n\
......
......@@ -5,10 +5,11 @@
#include <dlib/python.h>
#include <dlib/matrix.h>
#include <boost/python/args.hpp>
#include <dlib/geometry.h>
#include <dlib/image_processing/frontal_face_detector.h>
namespace py = pybind11;
namespace dlib
{
typedef object_detector<scan_fhog_pyramid<pyramid_down<6> > > simple_object_detector;
......@@ -35,7 +36,7 @@ namespace dlib
inline std::vector<dlib::rectangle> run_detector_with_upscale1 (
dlib::simple_object_detector& detector,
boost::python::object img,
py::object img,
const unsigned int upsampling_amount,
const double adjust_threshold,
std::vector<double>& detection_confidences,
......@@ -115,7 +116,7 @@ namespace dlib
inline std::vector<dlib::rectangle> run_detectors_with_upscale1 (
std::vector<simple_object_detector >& detectors,
boost::python::object img,
py::object img,
const unsigned int upsampling_amount,
const double adjust_threshold,
std::vector<double>& detection_confidences,
......@@ -195,7 +196,7 @@ namespace dlib
inline std::vector<dlib::rectangle> run_detector_with_upscale2 (
dlib::simple_object_detector& detector,
boost::python::object img,
py::object img,
const unsigned int upsampling_amount
)
......@@ -209,13 +210,13 @@ namespace dlib
detection_confidences, weight_indices);
}
inline boost::python::tuple run_rect_detector (
inline py::tuple run_rect_detector (
dlib::simple_object_detector& detector,
boost::python::object img,
py::object img,
const unsigned int upsampling_amount,
const double adjust_threshold)
{
boost::python::tuple t;
py::tuple t;
std::vector<double> detection_confidences;
std::vector<double> weight_indices;
......@@ -225,26 +226,26 @@ namespace dlib
adjust_threshold,
detection_confidences, weight_indices);
return boost::python::make_tuple(rectangles,
detection_confidences, weight_indices);
return py::make_tuple(rectangles,
detection_confidences, weight_indices);
}
inline boost::python::tuple run_multiple_rect_detectors (
boost::python::list& detectors,
boost::python::object img,
inline py::tuple run_multiple_rect_detectors (
py::list& detectors,
py::object img,
const unsigned int upsampling_amount,
const double adjust_threshold)
{
boost::python::tuple t;
py::tuple t;
std::vector<simple_object_detector > vector_detectors;
const unsigned long num_detectors = len(detectors);
// Now copy the data into dlib based objects.
for (unsigned long i = 0; i < num_detectors; ++i)
{
vector_detectors.push_back(boost::python::extract<simple_object_detector >(detectors[i]));
vector_detectors.push_back(detectors[i].cast<simple_object_detector >());
}
std::vector<double> detection_confidences;
std::vector<double> weight_indices;
std::vector<rectangle> rectangles;
......@@ -253,8 +254,8 @@ namespace dlib
adjust_threshold,
detection_confidences, weight_indices);
return boost::python::make_tuple(rectangles,
detection_confidences, weight_indices);
return py::make_tuple(rectangles,
detection_confidences, weight_indices);
}
......@@ -268,13 +269,13 @@ namespace dlib
simple_object_detector_py(simple_object_detector& _detector, unsigned int _upsampling_amount) :
detector(_detector), upsampling_amount(_upsampling_amount) {}
std::vector<dlib::rectangle> run_detector1 (boost::python::object img,
std::vector<dlib::rectangle> run_detector1 (py::object img,
const unsigned int upsampling_amount_)
{
return run_detector_with_upscale2(detector, img, upsampling_amount_);
}
std::vector<dlib::rectangle> run_detector2 (boost::python::object img)
std::vector<dlib::rectangle> run_detector2 (py::object img)
{
return run_detector_with_upscale2(detector, img, upsampling_amount);
}
......
......@@ -3,14 +3,11 @@
#include <dlib/python.h>
#include "testing_results.h"
#include <boost/shared_ptr.hpp>
#include <dlib/matrix.h>
#include <dlib/svm_threaded.h>
#include <boost/python/args.hpp>
using namespace dlib;
using namespace std;
using namespace boost::python;
typedef matrix<double,0,1> sample_type;
typedef std::vector<std::pair<unsigned long,double> > sparse_vect;
......@@ -75,26 +72,27 @@ template <typename trainer_type>
double get_c_class2 ( const trainer_type& trainer) { return trainer.get_c_class2(); }
template <typename trainer_type>
class_<trainer_type> setup_trainer (
py::class_<trainer_type> setup_trainer (
py::module& m,
const std::string& name
)
{
return class_<trainer_type>(name.c_str())
return py::class_<trainer_type>(m, name.c_str())
.def("train", train<trainer_type>)
.def("set_c", set_c<trainer_type>)
.add_property("c_class1", get_c_class1<trainer_type>, set_c_class1<trainer_type>)
.add_property("c_class2", get_c_class2<trainer_type>, set_c_class2<trainer_type>)
.add_property("epsilon", get_epsilon<trainer_type>, set_epsilon<trainer_type>);
.def_property("c_class1", get_c_class1<trainer_type>, set_c_class1<trainer_type>)
.def_property("c_class2", get_c_class2<trainer_type>, set_c_class2<trainer_type>)
.def_property("epsilon", get_epsilon<trainer_type>, set_epsilon<trainer_type>);
}
template <typename trainer_type>
class_<trainer_type> setup_trainer2 (
py::class_<trainer_type> setup_trainer2 (
py::module& m,
const std::string& name
)
{
return setup_trainer<trainer_type>(name)
.add_property("cache_size", get_cache_size<trainer_type>, set_cache_size<trainer_type>);
return setup_trainer<trainer_type>(m, name)
.def_property("cache_size", get_cache_size<trainer_type>, set_cache_size<trainer_type>);
}
void set_gamma (
......@@ -165,79 +163,80 @@ const binary_test _cross_validate_trainer_t (
// ----------------------------------------------------------------------------------------
void bind_svm_c_trainer()
void bind_svm_c_trainer(py::module& m)
{
using boost::python::arg;
namespace py = pybind11;
{
typedef svm_c_trainer<radial_basis_kernel<sample_type> > T;
setup_trainer2<T>("svm_c_trainer_radial_basis")
.add_property("gamma", get_gamma, set_gamma);
def("cross_validate_trainer", _cross_validate_trainer<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds")));
def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds"),arg("num_threads")));
setup_trainer2<T>(m, "svm_c_trainer_radial_basis")
.def_property("gamma", get_gamma, set_gamma);
m.def("cross_validate_trainer", _cross_validate_trainer<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"));
m.def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"),py::arg("num_threads"));
}
{
typedef svm_c_trainer<sparse_radial_basis_kernel<sparse_vect> > T;
setup_trainer2<T>("svm_c_trainer_sparse_radial_basis")
.add_property("gamma", get_gamma_sparse, set_gamma_sparse);
def("cross_validate_trainer", _cross_validate_trainer<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds")));
def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds"),arg("num_threads")));
setup_trainer2<T>(m, "svm_c_trainer_sparse_radial_basis")
.def_property("gamma", get_gamma_sparse, set_gamma_sparse);
m.def("cross_validate_trainer", _cross_validate_trainer<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"));
m.def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"),py::arg("num_threads"));
}
{
typedef svm_c_trainer<histogram_intersection_kernel<sample_type> > T;
setup_trainer2<T>("svm_c_trainer_histogram_intersection");
def("cross_validate_trainer", _cross_validate_trainer<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds")));
def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds"),arg("num_threads")));
setup_trainer2<T>(m, "svm_c_trainer_histogram_intersection");
m.def("cross_validate_trainer", _cross_validate_trainer<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"));
m.def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"),py::arg("num_threads"));
}
{
typedef svm_c_trainer<sparse_histogram_intersection_kernel<sparse_vect> > T;
setup_trainer2<T>("svm_c_trainer_sparse_histogram_intersection");
def("cross_validate_trainer", _cross_validate_trainer<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds")));
def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds"),arg("num_threads")));
setup_trainer2<T>(m, "svm_c_trainer_sparse_histogram_intersection");
m.def("cross_validate_trainer", _cross_validate_trainer<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"));
m.def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"),py::arg("num_threads"));
}
{
typedef svm_c_linear_trainer<linear_kernel<sample_type> > T;
setup_trainer<T>("svm_c_trainer_linear")
.add_property("max_iterations", &T::get_max_iterations, &T::set_max_iterations)
.add_property("force_last_weight_to_1", &T::forces_last_weight_to_1, &T::force_last_weight_to_1)
.add_property("learns_nonnegative_weights", &T::learns_nonnegative_weights, &T::set_learns_nonnegative_weights)
.add_property("has_prior", &T::has_prior)
setup_trainer<T>(m, "svm_c_trainer_linear")
.def(py::init())
.def_property("max_iterations", &T::get_max_iterations, &T::set_max_iterations)
.def_property("force_last_weight_to_1", &T::forces_last_weight_to_1, &T::force_last_weight_to_1)
.def_property("learns_nonnegative_weights", &T::learns_nonnegative_weights, &T::set_learns_nonnegative_weights)
.def_property_readonly("has_prior", &T::has_prior)
.def("set_prior", &T::set_prior)
.def("be_verbose", &T::be_verbose)
.def("be_quiet", &T::be_quiet);
def("cross_validate_trainer", _cross_validate_trainer<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds")));
def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds"),arg("num_threads")));
m.def("cross_validate_trainer", _cross_validate_trainer<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"));
m.def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"),py::arg("num_threads"));
}
{
typedef svm_c_linear_trainer<sparse_linear_kernel<sparse_vect> > T;
setup_trainer<T>("svm_c_trainer_sparse_linear")
.add_property("max_iterations", &T::get_max_iterations, &T::set_max_iterations)
.add_property("force_last_weight_to_1", &T::forces_last_weight_to_1, &T::force_last_weight_to_1)
.add_property("learns_nonnegative_weights", &T::learns_nonnegative_weights, &T::set_learns_nonnegative_weights)
.add_property("has_prior", &T::has_prior)
setup_trainer<T>(m, "svm_c_trainer_sparse_linear")
.def_property("max_iterations", &T::get_max_iterations, &T::set_max_iterations)
.def_property("force_last_weight_to_1", &T::forces_last_weight_to_1, &T::force_last_weight_to_1)
.def_property("learns_nonnegative_weights", &T::learns_nonnegative_weights, &T::set_learns_nonnegative_weights)
.def_property_readonly("has_prior", &T::has_prior)
.def("set_prior", &T::set_prior)
.def("be_verbose", &T::be_verbose)
.def("be_quiet", &T::be_quiet);
def("cross_validate_trainer", _cross_validate_trainer<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds")));
def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
(arg("trainer"),arg("x"),arg("y"),arg("folds"),arg("num_threads")));
m.def("cross_validate_trainer", _cross_validate_trainer<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"));
m.def("cross_validate_trainer_threaded", _cross_validate_trainer_t<T>,
py::arg("trainer"),py::arg("x"),py::arg("y"),py::arg("folds"),py::arg("num_threads"));
}
}
......
......@@ -2,20 +2,22 @@
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/python.h>
#include <boost/shared_ptr.hpp>
#include <dlib/matrix.h>
#include <dlib/svm.h>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include "testing_results.h"
#include <boost/python/args.hpp>
#include <pybind11/stl_bind.h>
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
typedef matrix<double,0,1> sample_type;
typedef std::vector<std::pair<unsigned long,double> > sparse_vect;
typedef std::vector<ranking_pair<sample_type> > ranking_pairs;
typedef std::vector<ranking_pair<sparse_vect> > sparse_ranking_pairs;
PYBIND11_MAKE_OPAQUE(ranking_pairs);
PYBIND11_MAKE_OPAQUE(sparse_ranking_pairs);
// ----------------------------------------------------------------------------------------
......@@ -23,7 +25,7 @@ namespace dlib
{
template <typename T>
bool operator== (
const ranking_pair<T>& ,
const ranking_pair<T>&,
const ranking_pair<T>&
)
{
......@@ -84,16 +86,18 @@ double get_c (const trainer_type& trainer)
template <typename trainer>
void add_ranker (
py::module& m,
const char* name
)
{
class_<trainer>(name)
.add_property("epsilon", get_epsilon<trainer>, set_epsilon<trainer>)
.add_property("c", get_c<trainer>, set_c<trainer>)
.add_property("max_iterations", &trainer::get_max_iterations, &trainer::set_max_iterations)
.add_property("force_last_weight_to_1", &trainer::forces_last_weight_to_1, &trainer::force_last_weight_to_1)
.add_property("learns_nonnegative_weights", &trainer::learns_nonnegative_weights, &trainer::set_learns_nonnegative_weights)
.add_property("has_prior", &trainer::has_prior)
py::class_<trainer>(m, name)
.def(py::init())
.def_property("epsilon", get_epsilon<trainer>, set_epsilon<trainer>)
.def_property("c", get_c<trainer>, set_c<trainer>)
.def_property("max_iterations", &trainer::get_max_iterations, &trainer::set_max_iterations)
.def_property("force_last_weight_to_1", &trainer::forces_last_weight_to_1, &trainer::force_last_weight_to_1)
.def_property("learns_nonnegative_weights", &trainer::learns_nonnegative_weights, &trainer::set_learns_nonnegative_weights)
.def_property_readonly("has_prior", &trainer::has_prior)
.def("train", train1<trainer>)
.def("train", train2<trainer>)
.def("set_prior", &trainer::set_prior)
......@@ -120,42 +124,41 @@ const ranking_test _cross_ranking_validate_trainer (
// ----------------------------------------------------------------------------------------
void bind_svm_rank_trainer()
void bind_svm_rank_trainer(py::module& m)
{
using boost::python::arg;
class_<ranking_pair<sample_type> >("ranking_pair")
.add_property("relevant", &ranking_pair<sample_type>::relevant)
.add_property("nonrelevant", &ranking_pair<sample_type>::nonrelevant)
.def_pickle(serialize_pickle<ranking_pair<sample_type> >());
class_<ranking_pair<sparse_vect> >("sparse_ranking_pair")
.add_property("relevant", &ranking_pair<sparse_vect>::relevant)
.add_property("nonrelevant", &ranking_pair<sparse_vect>::nonrelevant)
.def_pickle(serialize_pickle<ranking_pair<sparse_vect> >());
typedef std::vector<ranking_pair<sample_type> > ranking_pairs;
class_<ranking_pairs>("ranking_pairs")
.def(vector_indexing_suite<ranking_pairs>())
py::class_<ranking_pair<sample_type> >(m, "ranking_pair")
.def(py::init())
.def_readwrite("relevant", &ranking_pair<sample_type>::relevant)
.def_readwrite("nonrelevant", &ranking_pair<sample_type>::nonrelevant)
.def(py::pickle(&getstate<ranking_pair<sample_type>>, &setstate<ranking_pair<sample_type>>));
py::class_<ranking_pair<sparse_vect> >(m, "sparse_ranking_pair")
.def(py::init())
.def_readwrite("relevant", &ranking_pair<sparse_vect>::relevant)
.def_readwrite("nonrelevant", &ranking_pair<sparse_vect>::nonrelevant)
.def(py::pickle(&getstate<ranking_pair<sparse_vect>>, &setstate<ranking_pair<sparse_vect>>));
py::bind_vector<ranking_pairs>(m, "ranking_pairs")
.def("clear", &ranking_pairs::clear)
.def("resize", resize<ranking_pairs>)
.def_pickle(serialize_pickle<ranking_pairs>());
.def("extend", extend_vector_with_python_list<ranking_pair<sample_type>>)
.def(py::pickle(&getstate<ranking_pairs>, &setstate<ranking_pairs>));
typedef std::vector<ranking_pair<sparse_vect> > sparse_ranking_pairs;
class_<sparse_ranking_pairs>("sparse_ranking_pairs")
.def(vector_indexing_suite<sparse_ranking_pairs>())
py::bind_vector<sparse_ranking_pairs>(m, "sparse_ranking_pairs")
.def("clear", &sparse_ranking_pairs::clear)
.def("resize", resize<sparse_ranking_pairs>)
.def_pickle(serialize_pickle<sparse_ranking_pairs>());
.def("extend", extend_vector_with_python_list<ranking_pair<sparse_vect>>)
.def(py::pickle(&getstate<sparse_ranking_pairs>, &setstate<sparse_ranking_pairs>));
add_ranker<svm_rank_trainer<linear_kernel<sample_type> > >("svm_rank_trainer");
add_ranker<svm_rank_trainer<sparse_linear_kernel<sparse_vect> > >("svm_rank_trainer_sparse");
add_ranker<svm_rank_trainer<linear_kernel<sample_type> > >(m, "svm_rank_trainer");
add_ranker<svm_rank_trainer<sparse_linear_kernel<sparse_vect> > >(m, "svm_rank_trainer_sparse");
def("cross_validate_ranking_trainer", &_cross_ranking_validate_trainer<
m.def("cross_validate_ranking_trainer", &_cross_ranking_validate_trainer<
svm_rank_trainer<linear_kernel<sample_type> >,sample_type>,
(arg("trainer"), arg("samples"), arg("folds")) );
def("cross_validate_ranking_trainer", &_cross_ranking_validate_trainer<
py::arg("trainer"), py::arg("samples"), py::arg("folds") );
m.def("cross_validate_ranking_trainer", &_cross_ranking_validate_trainer<
svm_rank_trainer<sparse_linear_kernel<sparse_vect> > ,sparse_vect>,
(arg("trainer"), arg("samples"), arg("folds")) );
py::arg("trainer"), py::arg("samples"), py::arg("folds") );
}
......
......@@ -3,13 +3,11 @@
#include <dlib/python.h>
#include <dlib/matrix.h>
#include <boost/python/args.hpp>
#include <dlib/svm.h>
using namespace dlib;
using namespace std;
using namespace boost::python;
namespace py = pybind11;
template <typename psi_type>
class svm_struct_prob : public structural_svm_problem<matrix<double,0,1>, psi_type>
......@@ -20,7 +18,7 @@ class svm_struct_prob : public structural_svm_problem<matrix<double,0,1>, psi_ty
typedef typename base::scalar_type scalar_type;
public:
svm_struct_prob (
object& problem_,
py::object& problem_,
long num_dimensions_,
long num_samples_
) :
......@@ -40,7 +38,7 @@ public:
feature_vector_type& psi
) const
{
psi = extract<feature_vector_type&>(problem.attr("get_truth_joint_feature_vector")(idx));
psi = problem.attr("get_truth_joint_feature_vector")(idx).template cast<feature_vector_type&>();
}
virtual void separation_oracle (
......@@ -50,51 +48,49 @@ public:
feature_vector_type& psi
) const
{
object res = problem.attr("separation_oracle")(idx,boost::ref(current_solution));
py::object res = problem.attr("separation_oracle")(idx,std::ref(current_solution));
pyassert(len(res) == 2, "separation_oracle() must return two objects, the loss and the psi vector");
py::tuple t = res.cast<py::tuple>();
// let the user supply the output arguments in any order.
if (extract<double>(res[0]).check())
{
loss = extract<double>(res[0]);
psi = extract<feature_vector_type&>(res[1]);
}
else
{
psi = extract<feature_vector_type&>(res[0]);
loss = extract<double>(res[1]);
}
try {
loss = t[0].cast<scalar_type>();
psi = t[1].cast<feature_vector_type&>();
} catch(py::cast_error &e) {
psi = t[0].cast<feature_vector_type&>();
loss = t[1].cast<scalar_type>();
}
}
private:
const long num_dimensions;
const long num_samples;
object& problem;
py::object& problem;
};
// ----------------------------------------------------------------------------------------
template <typename psi_type>
matrix<double,0,1> solve_structural_svm_problem_impl(
object problem
py::object problem
)
{
const double C = extract<double>(problem.attr("C"));
const bool be_verbose = hasattr(problem,"be_verbose") && extract<bool>(problem.attr("be_verbose"));
const bool use_sparse_feature_vectors = hasattr(problem,"use_sparse_feature_vectors") &&
extract<bool>(problem.attr("use_sparse_feature_vectors"));
const bool learns_nonnegative_weights = hasattr(problem,"learns_nonnegative_weights") &&
extract<bool>(problem.attr("learns_nonnegative_weights"));
const double C = problem.attr("C").cast<double>();
const bool be_verbose = py::hasattr(problem,"be_verbose") && problem.attr("be_verbose").cast<bool>();
const bool use_sparse_feature_vectors = py::hasattr(problem,"use_sparse_feature_vectors") &&
problem.attr("use_sparse_feature_vectors").cast<bool>();
const bool learns_nonnegative_weights = py::hasattr(problem,"learns_nonnegative_weights") &&
problem.attr("learns_nonnegative_weights").cast<bool>();
double eps = 0.001;
unsigned long max_cache_size = 10;
if (hasattr(problem, "epsilon"))
eps = extract<double>(problem.attr("epsilon"));
if (hasattr(problem, "max_cache_size"))
max_cache_size = extract<double>(problem.attr("max_cache_size"));
if (py::hasattr(problem, "epsilon"))
eps = problem.attr("epsilon").cast<double>();
if (py::hasattr(problem, "max_cache_size"))
max_cache_size = problem.attr("max_cache_size").cast<double>();
const long num_samples = extract<long>(problem.attr("num_samples"));
const long num_dimensions = extract<long>(problem.attr("num_dimensions"));
const long num_samples = problem.attr("num_samples").cast<long>();
const long num_dimensions = problem.attr("num_dimensions").cast<long>();
pyassert(num_samples > 0, "You can't train a Structural-SVM if you don't have any training samples.");
......@@ -129,12 +125,11 @@ matrix<double,0,1> solve_structural_svm_problem_impl(
// ----------------------------------------------------------------------------------------
matrix<double,0,1> solve_structural_svm_problem(
object problem
py::object problem
)
{
// Check if the python code is using sparse or dense vectors to represent PSI()
extract<matrix<double,0,1> > isdense(problem.attr("get_truth_joint_feature_vector")(0));
if (isdense.check())
if (py::isinstance<matrix<double,0,1>>(problem.attr("get_truth_joint_feature_vector")(0)))
return solve_structural_svm_problem_impl<matrix<double,0,1> >(problem);
else
return solve_structural_svm_problem_impl<std::vector<std::pair<unsigned long,double> > >(problem);
......@@ -142,11 +137,9 @@ matrix<double,0,1> solve_structural_svm_problem(
// ----------------------------------------------------------------------------------------
void bind_svm_struct()
void bind_svm_struct(py::module& m)
{
using boost::python::arg;
def("solve_structural_svm_problem",solve_structural_svm_problem, (arg("problem")),
m.def("solve_structural_svm_problem",solve_structural_svm_problem, py::arg("problem"),
"This function solves a structural SVM problem and returns the weight vector \n\
that defines the solution. See the example program python_examples/svm_struct.py \n\
for documentation about how to create a proper problem object. "
......
......@@ -2,19 +2,18 @@
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/python.h>
#include <boost/shared_ptr.hpp>
#include <dlib/matrix.h>
#include <boost/python/slice.hpp>
#include <dlib/geometry/vector.h>
#include <pybind11/stl_bind.h>
#include "indexing.h"
using namespace dlib;
using namespace std;
using namespace boost::python;
typedef matrix<double,0,1> cv;
PYBIND11_MAKE_OPAQUE(std::vector<point>);
void cv_set_size(cv& m, long s)
{
m.set_size(s);
......@@ -52,23 +51,20 @@ string cv__repr__ (const cv& v)
return sout.str();
}
boost::shared_ptr<cv> cv_from_object(object obj)
std::shared_ptr<cv> cv_from_object(py::object obj)
{
extract<long> thesize(obj);
if (thesize.check())
{
long nr = thesize;
boost::shared_ptr<cv> temp(new cv(nr));
try {
long nr = obj.cast<long>();
auto temp = std::make_shared<cv>(nr);
*temp = 0;
return temp;
}
else
{
} catch(py::cast_error &e) {
py::list li = obj.cast<py::list>();
const long nr = len(obj);
boost::shared_ptr<cv> temp(new cv(nr));
auto temp = std::make_shared<cv>(nr);
for ( long r = 0; r < nr; ++r)
{
(*temp)(r) = extract<double>(obj[r]);
(*temp)(r) = li[r].cast<double>();
}
return temp;
}
......@@ -88,7 +84,7 @@ void cv__setitem__(cv& c, long p, double val)
if (p > c.size()-1) {
PyErr_SetString( PyExc_IndexError, "index out of range"
);
boost::python::throw_error_already_set();
throw py::error_already_set();
}
c(p) = val;
}
......@@ -101,38 +97,29 @@ double cv__getitem__(cv& m, long r)
if (r > m.size()-1 || r < 0) {
PyErr_SetString( PyExc_IndexError, "index out of range"
);
boost::python::throw_error_already_set();
throw py::error_already_set();
}
return m(r);
}
cv cv__getitem2__(cv& m, slice r)
cv cv__getitem2__(cv& m, py::slice r)
{
slice::range<cv::iterator> bounds;
bounds = r.get_indicies<>(m.begin(), m.end());
long num = (bounds.stop-bounds.start+1);
// round num up to the next multiple of bounds.step.
if ((num%bounds.step) != 0)
num += bounds.step - num%bounds.step;
size_t start, stop, step, slicelength;
if (!r.compute(m.size(), &start, &stop, &step, &slicelength))
throw py::error_already_set();
cv temp(num/bounds.step);
cv temp(slicelength);
if (temp.size() == 0)
return temp;
long ii = 0;
while(bounds.start != bounds.stop)
{
temp(ii++) = *bounds.start;
std::advance(bounds.start, bounds.step);
for (size_t i = 0; i < slicelength; ++i) {
temp(i) = m(start); start += step;
}
temp(ii) = *bounds.start;
return temp;
}
boost::python::tuple cv_get_matrix_size(cv& m)
py::tuple cv_get_matrix_size(cv& m)
{
return boost::python::make_tuple(m.nr(), m.nc());
return py::make_tuple(m.nr(), m.nc());
}
// ----------------------------------------------------------------------------------------
......@@ -155,41 +142,41 @@ long point_x(const point& p) { return p.x(); }
long point_y(const point& p) { return p.y(); }
// ----------------------------------------------------------------------------------------
void bind_vector()
void bind_vector(py::module& m)
{
using boost::python::arg;
{
class_<cv>("vector", "This object represents the mathematical idea of a column vector.", init<>())
py::class_<cv, std::shared_ptr<cv>>(m, "vector", "This object represents the mathematical idea of a column vector.")
.def(py::init())
.def("set_size", &cv_set_size)
.def("resize", &cv_set_size)
.def("__init__", make_constructor(&cv_from_object))
.def(py::init(&cv_from_object))
.def("__repr__", &cv__repr__)
.def("__str__", &cv__str__)
.def("__len__", &cv__len__)
.def("__getitem__", &cv__getitem__)
.def("__getitem__", &cv__getitem2__)
.def("__setitem__", &cv__setitem__)
.add_property("shape", &cv_get_matrix_size)
.def_pickle(serialize_pickle<cv>());
.def_property_readonly("shape", &cv_get_matrix_size)
.def(py::pickle(&getstate<cv>, &setstate<cv>));
def("dot", dotprod, "Compute the dot product between two dense column vectors.");
m.def("dot", &dotprod, "Compute the dot product between two dense column vectors.");
}
{
typedef point type;
class_<type>("point", "This object represents a single point of integer coordinates that maps directly to a dlib::point.")
.def(init<long,long>((arg("x"), arg("y"))))
py::class_<type>(m, "point", "This object represents a single point of integer coordinates that maps directly to a dlib::point.")
.def(py::init<long,long>(), py::arg("x"), py::arg("y"))
.def("__repr__", &point__repr__)
.def("__str__", &point__str__)
.add_property("x", &point_x, "The x-coordinate of the point.")
.add_property("y", &point_y, "The y-coordinate of the point.")
.def_pickle(serialize_pickle<type>());
.def_property_readonly("x", &point_x, "The x-coordinate of the point.")
.def_property_readonly("y", &point_y, "The y-coordinate of the point.")
.def(py::pickle(&getstate<type>, &setstate<type>));
}
{
typedef std::vector<point> type;
class_<type>("points", "An array of point objects.")
.def(vector_indexing_suite<type>())
py::bind_vector<type>(m, "points", "An array of point objects.")
.def("clear", &type::clear)
.def("resize", resize<type>)
.def_pickle(serialize_pickle<type>());
.def("extend", extend_vector_with_python_list<point>)
.def(py::pickle(&getstate<type>, &setstate<type>));
}
}
from dlib import array
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from types import FloatType
from pytest import raises
def test_array_init_with_number():
a = array(5)
assert len(a) == 5
for i in range(5):
assert a[i] == 0
assert type(a[i]) == FloatType
def test_array_init_with_negative_number():
with raises(MemoryError):
array(-5)
def test_array_init_with_zero():
a = array(0)
assert len(a) == 0
def test_array_init_with_list():
a = array([0, 1, 2, 3, 4])
assert len(a) == 5
for idx, val in enumerate(a):
assert idx == val
assert type(val) == FloatType
def test_array_init_with_empty_list():
a = array([])
assert len(a) == 0
def test_array_init_without_argument():
a = array()
assert len(a) == 0
def test_array_init_with_tuple():
a = array((0, 1, 2, 3, 4))
for idx, val in enumerate(a):
assert idx == val
assert type(val) == FloatType
def test_array_serialization_empty():
a = array()
# cPickle with protocol 2 required for Python 2.7
# see http://pybind11.readthedocs.io/en/stable/advanced/classes.html#custom-constructors
ser = pickle.dumps(a, 2)
deser = pickle.loads(ser)
assert a == deser
def test_array_serialization():
a = array([0, 1, 2, 3, 4])
ser = pickle.dumps(a, 2)
deser = pickle.loads(ser)
assert a == deser
def test_array_extend():
a = array()
a.extend([0, 1, 2, 3, 4])
assert len(a) == 5
for idx, val in enumerate(a):
assert idx == val
assert type(val) == FloatType
def test_array_string_representations_empty():
a = array()
assert str(a) == ""
assert repr(a) == "array[]"
def test_array_string_representations():
a = array([1, 2, 3])
assert str(a) == "1\n2\n3"
assert repr(a) == "array[1, 2, 3]"
def test_array_clear():
a = array(10)
a.clear()
assert len(a) == 0
def test_array_resize():
a = array(10)
a.resize(100)
assert len(a) == 100
for i in range(100):
assert a[i] == 0
from dlib import matrix
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from pytest import raises
import numpy
def test_matrix_empty_init():
m = matrix()
assert m.nr() == 0
assert m.nc() == 0
assert m.shape == (0, 0)
assert len(m) == 0
assert repr(m) == "< dlib.matrix containing: >"
assert str(m) == ""
def test_matrix_from_list():
m = matrix([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
assert m.nr() == 3
assert m.nc() == 3
assert m.shape == (3, 3)
assert len(m) == 3
assert repr(m) == "< dlib.matrix containing: \n0 1 2 \n3 4 5 \n6 7 8 >"
assert str(m) == "0 1 2 \n3 4 5 \n6 7 8"
deser = pickle.loads(pickle.dumps(m, 2))
for row in range(3):
for col in range(3):
assert m[row][col] == deser[row][col]
def test_matrix_from_list_with_invalid_rows():
with raises(ValueError):
matrix([[0, 1, 2],
[3, 4],
[5, 6, 7]])
def test_matrix_from_list_as_column_vector():
m = matrix([0, 1, 2])
assert m.nr() == 3
assert m.nc() == 1
assert m.shape == (3, 1)
assert len(m) == 3
assert repr(m) == "< dlib.matrix containing: \n0 \n1 \n2 >"
assert str(m) == "0 \n1 \n2"
def test_matrix_from_object_with_2d_shape():
m1 = numpy.array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
m = matrix(m1)
assert m.nr() == 3
assert m.nc() == 3
assert m.shape == (3, 3)
assert len(m) == 3
assert repr(m) == "< dlib.matrix containing: \n0 1 2 \n3 4 5 \n6 7 8 >"
assert str(m) == "0 1 2 \n3 4 5 \n6 7 8"
def test_matrix_from_object_without_2d_shape():
with raises(IndexError):
m1 = numpy.array([0, 1, 2])
matrix(m1)
def test_matrix_from_object_without_shape():
with raises(AttributeError):
matrix("invalid")
def test_matrix_set_size():
m = matrix()
m.set_size(5, 5)
assert m.nr() == 5
assert m.nc() == 5
assert m.shape == (5, 5)
assert len(m) == 5
assert repr(m) == "< dlib.matrix containing: \n0 0 0 0 0 \n0 0 0 0 0 \n0 0 0 0 0 \n0 0 0 0 0 \n0 0 0 0 0 >"
assert str(m) == "0 0 0 0 0 \n0 0 0 0 0 \n0 0 0 0 0 \n0 0 0 0 0 \n0 0 0 0 0"
deser = pickle.loads(pickle.dumps(m, 2))
for row in range(5):
for col in range(5):
assert m[row][col] == deser[row][col]
from dlib import point, points
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
def test_point():
p = point(27, 42)
assert repr(p) == "point(27, 42)"
assert str(p) == "(27, 42)"
assert p.x == 27
assert p.y == 42
ser = pickle.dumps(p, 2)
deser = pickle.loads(ser)
assert deser.x == p.x
assert deser.y == p.y
def test_point_init_kwargs():
p = point(y=27, x=42)
assert repr(p) == "point(42, 27)"
assert str(p) == "(42, 27)"
assert p.x == 42
assert p.y == 27
def test_points():
ps = points()
ps.resize(5)
assert len(ps) == 5
for i in range(5):
assert ps[i].x == 0
assert ps[i].y == 0
ps.clear()
assert len(ps) == 0
ps.extend([point(1, 2), point(3, 4)])
assert len(ps) == 2
ser = pickle.dumps(ps, 2)
deser = pickle.loads(ser)
assert deser[0].x == 1
assert deser[0].y == 2
assert deser[1].x == 3
assert deser[1].y == 4
from dlib import range, ranges, rangess
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from pytest import raises
def test_range():
r = range(0, 10)
assert r.begin == 0
assert r.end == 10
assert str(r) == "0, 10"
assert repr(r) == "dlib.range(0, 10)"
assert len(r) == 10
ser = pickle.dumps(r, 2)
deser = pickle.loads(ser)
for a, b in zip(r, deser):
assert a == b
# TODO: make this init parameterization an exception?
def test_range_wrong_order():
r = range(5, 0)
assert r.begin == 5
assert r.end == 0
assert str(r) == "5, 0"
assert repr(r) == "dlib.range(5, 0)"
assert len(r) == 0
def test_range_with_negative_elements():
with raises(TypeError):
range(-1, 1)
with raises(TypeError):
range(1, -1)
def test_ranges():
rs = ranges()
assert len(rs) == 0
rs.resize(5)
assert len(rs) == 5
for r in rs:
assert r.begin == 0
assert r.end == 0
rs.clear()
assert len(rs) == 0
rs.extend([range(1, 2), range(3, 4)])
assert rs[0].begin == 1
assert rs[0].end == 2
assert rs[1].begin == 3
assert rs[1].end == 4
ser = pickle.dumps(rs, 2)
deser = pickle.loads(ser)
assert rs == deser
def test_rangess():
rss = rangess()
assert len(rss) == 0
rss.resize(5)
assert len(rss) == 5
for rs in rss:
assert len(rs) == 0
rss.clear()
assert len(rss) == 0
rs1 = ranges()
rs1.append(range(1, 2))
rs1.append(range(3, 4))
rs2 = ranges()
rs2.append(range(5, 6))
rs2.append(range(7, 8))
rss.extend([rs1, rs2])
assert rss[0][0].begin == 1
assert rss[0][1].begin == 3
assert rss[1][0].begin == 5
assert rss[1][1].begin == 7
assert rss[0][0].end == 2
assert rss[0][1].end == 4
assert rss[1][0].end == 6
assert rss[1][1].end == 8
ser = pickle.dumps(rss, 2)
deser = pickle.loads(ser)
assert rss == deser
from dlib import rgb_pixel
def test_rgb_pixel():
p = rgb_pixel(0, 50, 100)
assert p.red == 0
assert p.green == 50
assert p.blue == 100
assert str(p) == "red: 0, green: 50, blue: 100"
assert repr(p) == "rgb_pixel(0,50,100)"
p = rgb_pixel(blue=0, red=50, green=100)
assert p.red == 50
assert p.green == 100
assert p.blue == 0
assert str(p) == "red: 50, green: 100, blue: 0"
assert repr(p) == "rgb_pixel(50,100,0)"
p.red = 100
p.green = 0
p.blue = 50
assert p.red == 100
assert p.green == 0
assert p.blue == 50
assert str(p) == "red: 100, green: 0, blue: 50"
assert repr(p) == "rgb_pixel(100,0,50)"
from dlib import pair, make_sparse_vector, sparse_vector, sparse_vectors, sparse_vectorss
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from pytest import approx
def test_pair():
p = pair(4, .9)
assert p.first == 4
assert p.second == .9
p.first = 3
p.second = .4
assert p.first == 3
assert p.second == .4
assert str(p) == "3: 0.4"
assert repr(p) == "dlib.pair(3, 0.4)"
deser = pickle.loads(pickle.dumps(p, 2))
assert deser.first == p.first
assert deser.second == p.second
def test_sparse_vector():
sv = sparse_vector()
sv.append(pair(3, .1))
sv.append(pair(3, .2))
sv.append(pair(2, .3))
sv.append(pair(1, .4))
assert len(sv) == 4
make_sparse_vector(sv)
assert len(sv) == 3
assert sv[0].first == 1
assert sv[0].second == .4
assert sv[1].first == 2
assert sv[1].second == .3
assert sv[2].first == 3
assert sv[2].second == approx(.3)
assert str(sv) == "1: 0.4\n2: 0.3\n3: 0.3"
assert repr(sv) == "< dlib.sparse_vector containing: \n1: 0.4\n2: 0.3\n3: 0.3 >"
def test_sparse_vectors():
svs = sparse_vectors()
assert len(svs) == 0
svs.resize(5)
for sv in svs:
assert len(sv) == 0
svs.clear()
assert len(svs) == 0
svs.extend([sparse_vector([pair(1, 2), pair(3, 4)]), sparse_vector([pair(5, 6), pair(7, 8)])])
assert len(svs) == 2
assert svs[0][0].first == 1
assert svs[0][0].second == 2
assert svs[0][1].first == 3
assert svs[0][1].second == 4
assert svs[1][0].first == 5
assert svs[1][0].second == 6
assert svs[1][1].first == 7
assert svs[1][1].second == 8
deser = pickle.loads(pickle.dumps(svs, 2))
assert deser == svs
def test_sparse_vectorss():
svss = sparse_vectorss()
assert len(svss) == 0
svss.resize(5)
for svs in svss:
assert len(svs) == 0
svss.clear()
assert len(svss) == 0
svss.extend([sparse_vectors([sparse_vector([pair(1, 2), pair(3, 4)]), sparse_vector([pair(5, 6), pair(7, 8)])])])
assert len(svss) == 1
assert svss[0][0][0].first == 1
assert svss[0][0][0].second == 2
assert svss[0][0][1].first == 3
assert svss[0][0][1].second == 4
assert svss[0][1][0].first == 5
assert svss[0][1][0].second == 6
assert svss[0][1][1].first == 7
assert svss[0][1][1].second == 8
deser = pickle.loads(pickle.dumps(svss, 2))
assert deser == svss
from dlib import vector, vectors, vectorss, dot
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from pytest import raises
def test_vector_empty_init():
v = vector()
assert len(v) == 0
assert v.shape == (0, 1)
assert str(v) == ""
assert repr(v) == "dlib.vector([])"
def test_vector_init_with_number():
v = vector(3)
assert len(v) == 3
assert v.shape == (3, 1)
assert str(v) == "0\n0\n0"
assert repr(v) == "dlib.vector([0, 0, 0])"
def test_vector_set_size():
v = vector(3)
v.set_size(0)
assert len(v) == 0
assert v.shape == (0, 1)
v.resize(10)
assert len(v) == 10
assert v.shape == (10, 1)
for i in range(10):
assert v[i] == 0
def test_vector_init_with_list():
v = vector([1, 2, 3])
assert len(v) == 3
assert v.shape == (3, 1)
assert str(v) == "1\n2\n3"
assert repr(v) == "dlib.vector([1, 2, 3])"
def test_vector_getitem():
v = vector([1, 2, 3])
assert v[0] == 1
assert v[-1] == 3
assert v[1] == v[-2]
def test_vector_slice():
v = vector([1, 2, 3, 4, 5])
v_slice = v[1:4]
assert len(v_slice) == 3
for idx, val in enumerate([2, 3, 4]):
assert v_slice[idx] == val
v_slice = v[-3:-1]
assert len(v_slice) == 2
for idx, val in enumerate([3, 4]):
assert v_slice[idx] == val
v_slice = v[1:-2]
assert len(v_slice) == 2
for idx, val in enumerate([2, 3]):
assert v_slice[idx] == val
def test_vector_invalid_getitem():
v = vector([1, 2, 3])
with raises(IndexError):
v[-4]
with raises(IndexError):
v[3]
def test_vector_init_with_negative_number():
with raises(MemoryError):
vector(-3)
def test_dot():
v1 = vector([1, 0])
v2 = vector([0, 1])
v3 = vector([-1, 0])
assert dot(v1, v1) == 1
assert dot(v1, v2) == 0
assert dot(v1, v3) == -1
def test_vector_serialization():
v = vector([1, 2, 3])
ser = pickle.dumps(v, 2)
deser = pickle.loads(ser)
assert str(v) == str(deser)
def generate_test_vectors():
vs = vectors()
vs.append(vector([0, 1, 2]))
vs.append(vector([3, 4, 5]))
vs.append(vector([6, 7, 8]))
assert len(vs) == 3
return vs
def generate_test_vectorss():
vss = vectorss()
vss.append(generate_test_vectors())
vss.append(generate_test_vectors())
vss.append(generate_test_vectors())
assert len(vss) == 3
return vss
def test_vectors_serialization():
vs = generate_test_vectors()
ser = pickle.dumps(vs, 2)
deser = pickle.loads(ser)
assert vs == deser
def test_vectors_clear():
vs = generate_test_vectors()
vs.clear()
assert len(vs) == 0
def test_vectors_resize():
vs = vectors()
vs.resize(100)
assert len(vs) == 100
for i in range(100):
assert len(vs[i]) == 0
def test_vectors_extend():
vs = vectors()
vs.extend([vector([1, 2, 3]), vector([4, 5, 6])])
assert len(vs) == 2
def test_vectorss_serialization():
vss = generate_test_vectorss()
ser = pickle.dumps(vss, 2)
deser = pickle.loads(ser)
assert vss == deser
def test_vectorss_clear():
vss = generate_test_vectorss()
vss.clear()
assert len(vss) == 0
def test_vectorss_resize():
vss = vectorss()
vss.resize(100)
assert len(vss) == 100
for i in range(100):
assert len(vss[i]) == 0
def test_vectorss_extend():
vss = vectorss()
vss.extend([generate_test_vectors(), generate_test_vectors()])
assert len(vss) == 2
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment