Commit 37c815f6 authored by Davis King's avatar Davis King
Browse files

All I did in this change was rename calls to array_to_matrix(),

vector_to_matrix(), pointer_to_column_vector(), and pointer_to_matrix() to
mat() (in addition to adding a few more mat() overloads for certain things).
parent 0d970e47
......@@ -30,7 +30,7 @@ namespace dlib
requires
- stop_strategy == an object that defines a stop strategy such as one of
the objects from dlib/optimization/optimization_stop_strategies_abstract.h
- list == a matrix or something convertible to a matrix via vector_to_matrix()
- list == a matrix or something convertible to a matrix via mat()
such as a std::vector.
- is_vector(list) == true
- list.size() > 0
......@@ -78,7 +78,7 @@ namespace dlib
requires
- stop_strategy == an object that defines a stop strategy such as one of
the objects from dlib/optimization/optimization_stop_strategies_abstract.h
- list == a matrix or something convertible to a matrix via vector_to_matrix()
- list == a matrix or something convertible to a matrix via mat()
such as a std::vector.
- is_vector(list) == true
- list.size() > 0
......
......@@ -225,9 +225,9 @@ namespace dlib
// Note that we warm start this optimization by using the alpha from the last
// iteration as the starting point.
if (num_nonnegative != 0)
solve_qp4_using_smo(rowm(planes,range(0,num_nonnegative-1)), K, vector_to_matrix(bs), alpha, eps, sub_max_iter);
solve_qp4_using_smo(rowm(planes,range(0,num_nonnegative-1)), K, mat(bs), alpha, eps, sub_max_iter);
else
solve_qp_using_smo(K, vector_to_matrix(bs), alpha, eps, sub_max_iter);
solve_qp_using_smo(K, mat(bs), alpha, eps, sub_max_iter);
// construct the w that minimized the subproblem.
w = -(planes*alpha);
......@@ -245,14 +245,14 @@ namespace dlib
// Compute the lower bound on the true objective given to us by the cutting
// plane subproblem.
cp_obj = -0.5*trans(w)*w + trans(alpha)*vector_to_matrix(bs);
cp_obj = -0.5*trans(w)*w + trans(alpha)*mat(bs);
// If it has been a while since a cutting plane was an active constraint then
// we should throw it away.
while (max(vector_to_matrix(miss_count)) >= inactive_thresh)
while (max(mat(miss_count)) >= inactive_thresh)
{
const long idx = index_of_max(vector_to_matrix(miss_count));
const long idx = index_of_max(mat(miss_count));
bs.erase(bs.begin()+idx);
miss_count.erase(miss_count.begin()+idx);
K = removerc(K, idx, idx);
......
......@@ -74,7 +74,7 @@ namespace dlib
const T& y
)
{
return maximum_nu_impl(vector_to_matrix(y));
return maximum_nu_impl(mat(y));
}
template <
......@@ -84,7 +84,7 @@ namespace dlib
const T& y
)
{
return maximum_nu_impl(vector_to_matrix(y));
return maximum_nu_impl(mat(y));
}
// ----------------------------------------------------------------------------------------
......
......@@ -37,7 +37,7 @@ namespace dlib
);
/*!
requires
- T == a matrix object or an object convertible to a matrix via vector_to_matrix()
- T == a matrix object or an object convertible to a matrix via mat()
- is_col_vector(y) == true
- y.size() > 1
- sum((y == +1) + (y == -1)) == y.size()
......
......@@ -7,6 +7,7 @@
#include <vector>
#include "../algs.h"
#include "../serialize.h"
#include "../matrix/matrix_mat.h"
namespace dlib
{
......@@ -209,6 +210,19 @@ namespace dlib
}
}
// ----------------------------------------------------------------------------------------
template <
typename T
>
const matrix_op<op_array_to_mat<circular_buffer<T> > > mat (
const circular_buffer<T>& m
)
{
typedef op_array_to_mat<circular_buffer<T> > op;
return matrix_op<op>(op(m));
}
// ----------------------------------------------------------------------------------------
}
......
......@@ -222,6 +222,23 @@ namespace dlib
provides deserialization support
!*/
// ----------------------------------------------------------------------------------------
template <
typename T
>
const matrix_exp mat (
const circular_buffer<T>& m
);
/*!
ensures
- returns a matrix R such that:
- is_col_vector(R) == true
- R.size() == m.size()
- for all valid r:
R(r) == m[r]
!*/
// ----------------------------------------------------------------------------------------
}
......
......@@ -9,6 +9,7 @@
#include "../algs.h"
#include "../string.h"
#include "../serialize.h"
#include "../matrix/matrix_mat.h"
#include <iostream>
namespace dlib
......@@ -346,6 +347,19 @@ namespace dlib
return subset;
}
// ----------------------------------------------------------------------------------------
template <
typename T
>
const matrix_op<op_array_to_mat<random_subset_selector<T> > > mat (
const random_subset_selector<T>& m
)
{
typedef op_array_to_mat<random_subset_selector<T> > op;
return matrix_op<op>(op(m));
}
// ----------------------------------------------------------------------------------------
}
......
......@@ -351,6 +351,23 @@ namespace dlib
generator used by this function.
!*/
// ----------------------------------------------------------------------------------------
template <
typename T
>
const matrix_exp mat (
const random_subset_selector<T>& m
);
/*!
ensures
- returns a matrix R such that:
- is_col_vector(R) == true
- R.size() == m.size()
- for all valid r:
R(r) == m[r]
!*/
// ----------------------------------------------------------------------------------------
}
......
......@@ -549,7 +549,7 @@ namespace dlib
<< "\n\t b.size(): " << b.size()
);
return mean(squared(matrix_cast<double>(vector_to_matrix(a))-matrix_cast<double>(vector_to_matrix(b))));
return mean(squared(matrix_cast<double>(mat(a))-matrix_cast<double>(mat(b))));
}
// ----------------------------------------------------------------------------------------
......@@ -737,8 +737,8 @@ namespace dlib
<< "\n\tthis: " << this
);
m = mean(vector_to_matrix(samples));
sd = reciprocal(sqrt(variance(vector_to_matrix(samples))));
m = mean(mat(samples));
sd = reciprocal(sqrt(variance(mat(samples))));
}
long in_vector_size (
......@@ -900,7 +900,7 @@ namespace dlib
<< "\n\tyou have to give a nonempty set of samples to this function"
<< "\n\tthis: " << this
);
train_pca_impl(vector_to_matrix(samples),eps);
train_pca_impl(mat(samples),eps);
}
long in_vector_size (
......
......@@ -101,7 +101,7 @@ namespace dlib
- a.size() == b.size()
ensures
- returns the mean squared error between all the elements of a and b.
(i.e. mean(squared(vector_to_matrix(a)-vector_to_matrix(b))))
(i.e. mean(squared(mat(a)-mat(b))))
!*/
// ----------------------------------------------------------------------------------------
......@@ -584,7 +584,7 @@ namespace dlib
requires
- samples.size() > 0
- samples == a column matrix or something convertible to a column
matrix via vector_to_matrix(). Also, x should contain
matrix via mat(). Also, x should contain
matrix_type objects that represent nonempty column vectors.
ensures
- #in_vector_size() == samples(0).nr()
......@@ -742,7 +742,7 @@ namespace dlib
- 0 < eps <= 1
- samples.size() > 0
- samples == a column matrix or something convertible to a column
matrix via vector_to_matrix(). Also, x should contain
matrix via mat(). Also, x should contain
matrix_type objects that represent nonempty column vectors.
ensures
- This object has learned how to normalize vectors that look like
......
......@@ -177,6 +177,18 @@ namespace dlib
const array_type& array;
const std::vector<unsigned long>& idx_set;
};
template <
typename T
>
const matrix_op<op_array_to_mat<array_subset_helper<T> > > mat (
const array_subset_helper<T>& m
)
{
typedef op_array_to_mat<array_subset_helper<T> > op;
return matrix_op<op>(op(m));
}
}
// ----------------------------------------------------------------------------------------
......
......@@ -69,7 +69,7 @@ namespace dlib
const T& basis_samples
)
{
load_impl(kernel_, vector_to_matrix(basis_samples));
load_impl(kernel_, mat(basis_samples));
}
void load(
......@@ -150,7 +150,7 @@ namespace dlib
<< "\n\t this: " << this
);
return decision_function<kernel_type>(trans(weights)*vect, 0, kernel, vector_to_matrix(basis));
return decision_function<kernel_type>(trans(weights)*vect, 0, kernel, mat(basis));
}
template <typename EXP>
......@@ -168,7 +168,7 @@ namespace dlib
<< "\n\t this: " << this
);
return distance_function<kernel_type>(trans(weights)*vect, dot(vect,vect), kernel, vector_to_matrix(basis));
return distance_function<kernel_type>(trans(weights)*vect, dot(vect,vect), kernel, mat(basis));
}
const projection_function<kernel_type> get_projection_function (
......@@ -181,7 +181,7 @@ namespace dlib
<< "\n\t this: " << this
);
return projection_function<kernel_type>(weights, kernel, vector_to_matrix(basis));
return projection_function<kernel_type>(weights, kernel, mat(basis));
}
const matrix<scalar_type,0,0,mem_manager_type> get_transformation_to (
......@@ -242,7 +242,7 @@ namespace dlib
tmat = colm(target.weights, range(0,num1-1))*kernel_matrix(kernel, basis)*trans(weights);
empirical_kernel_map temp_ekm;
temp_ekm.load(kernel, rowm(vector_to_matrix(target.basis), range(num1,num2-1)));
temp_ekm.load(kernel, rowm(mat(target.basis), range(num1,num2-1)));
partial_projection = temp_ekm.get_projection_function();
......
......@@ -124,7 +124,7 @@ namespace dlib
);
/*!
requires
- T must be a dlib::matrix type or something convertible to a matrix via vector_to_matrix()
- T must be a dlib::matrix type or something convertible to a matrix via mat()
(e.g. a std::vector)
- is_vector(basis_samples) == true
- basis_samples.size() > 0
......
......@@ -150,7 +150,7 @@ namespace dlib
const label_matrix_type& labels
)
{
return rank_features_impl(kc, vector_to_matrix(samples), vector_to_matrix(labels));
return rank_features_impl(kc, mat(samples), mat(labels));
}
// ----------------------------------------------------------------------------------------
......@@ -266,14 +266,14 @@ namespace dlib
const long num_features
)
{
if (vector_to_matrix(samples).nr() > 0 && num_features == vector_to_matrix(samples)(0).nr())
if (mat(samples).nr() > 0 && num_features == mat(samples)(0).nr())
{
// if we are going to rank them all then might as well do the recursive feature elimination version
return rank_features_impl(kc, vector_to_matrix(samples), vector_to_matrix(labels));
return rank_features_impl(kc, mat(samples), mat(labels));
}
else
{
return rank_features_impl(kc, vector_to_matrix(samples), vector_to_matrix(labels), num_features);
return rank_features_impl(kc, mat(samples), mat(labels), num_features);
}
}
......@@ -416,8 +416,8 @@ namespace dlib
<< "\n\t is_binary_classification_problem(): " << is_binary_classification_problem(samples, labels)
);
return rank_features_helpers::find_gamma_with_big_centroid_gap_impl(vector_to_matrix(samples),
vector_to_matrix(labels),
return rank_features_helpers::find_gamma_with_big_centroid_gap_impl(mat(samples),
mat(labels),
initial_gamma,
num_sv,
false);
......@@ -443,8 +443,8 @@ namespace dlib
<< "\n\t is_binary_classification_problem(): " << is_binary_classification_problem(samples, labels)
);
return rank_features_helpers::find_gamma_with_big_centroid_gap_impl(vector_to_matrix(samples),
vector_to_matrix(labels),
return rank_features_helpers::find_gamma_with_big_centroid_gap_impl(mat(samples),
mat(labels),
initial_gamma,
num_sv,
true);
......
......@@ -28,8 +28,8 @@ namespace dlib
);
/*!
requires
- sample_matrix_type == a matrix or something convertible to a matrix via vector_to_matrix()
- label_matrix_type == a matrix or something convertible to a matrix via vector_to_matrix()
- sample_matrix_type == a matrix or something convertible to a matrix via mat()
- label_matrix_type == a matrix or something convertible to a matrix via mat()
- is_binary_classification_problem(samples, labels) == true
- kc.train(samples(0)) must be a valid expression. This means that
kc must use a kernel type that is capable of operating on the
......
......@@ -337,10 +337,10 @@ namespace dlib
) const
{
refresh_bias();
return distance_function<kernel_type>(vector_to_matrix(alpha),
return distance_function<kernel_type>(mat(alpha),
bias,
kernel,
vector_to_matrix(dictionary));
mat(dictionary));
}
private:
......@@ -352,7 +352,7 @@ namespace dlib
{
bias_is_stale = false;
// recompute the bias term
bias = sum(pointwise_multiply(K, vector_to_matrix(alpha)*trans(vector_to_matrix(alpha))));
bias = sum(pointwise_multiply(K, mat(alpha)*trans(mat(alpha))));
}
}
......@@ -396,7 +396,7 @@ namespace dlib
if (do_test)
{
refresh_bias();
test_result = std::sqrt(kx + bias - 2*trans(vector_to_matrix(alpha))*k);
test_result = std::sqrt(kx + bias - 2*trans(mat(alpha))*k);
}
// compute the error we would have if we approximated the new x sample
......@@ -525,7 +525,7 @@ namespace dlib
// now compute the updated alpha values to take account that we just removed one of
// our dictionary vectors
a = (K_inv*remove_row(K,i)*vector_to_matrix(alpha));
a = (K_inv*remove_row(K,i)*mat(alpha));
// now copy over the new alpha values
alpha.resize(alpha.size()-1);
......
......@@ -104,7 +104,7 @@ namespace dlib
long max_iter = 1000
)
{
do_train(vector_to_matrix(samples),vector_to_matrix(initial_centers),max_iter);
do_train(mat(samples),mat(initial_centers),max_iter);
}
unsigned long operator() (
......
......@@ -112,7 +112,7 @@ namespace dlib
/*!
requires
- matrix_type and matrix_type2 must either be dlib::matrix objects or convertible to dlib::matrix
via vector_to_matrix()
via mat()
- matrix_type::type == sample_type (i.e. matrix_type should contain sample_type objects)
- matrix_type2::type == sample_type (i.e. matrix_type2 should contain sample_type objects)
- initial_centers.nc() == 1 (i.e. must be a column vector)
......
......@@ -182,7 +182,7 @@ namespace dlib
temp.swap(P);
// now update the alpha vector (equation 3.16)
const scalar_type k_a = (y-trans(k)*vector_to_matrix(alpha))/delta;
const scalar_type k_a = (y-trans(k)*mat(alpha))/delta;
for (unsigned long i = 0; i < alpha.size(); ++i)
{
alpha[i] -= a(i)*k_a;
......@@ -198,7 +198,7 @@ namespace dlib
P -= q*temp_matrix;
// update the alpha vector (equation 3.13)
const scalar_type k_a = y-trans(k)*vector_to_matrix(alpha);
const scalar_type k_a = y-trans(k)*mat(alpha);
for (unsigned long i = 0; i < alpha.size(); ++i)
{
alpha[i] += (K_inv*q*k_a)(i);
......@@ -232,10 +232,10 @@ namespace dlib
) const
{
return decision_function<kernel_type>(
vector_to_matrix(alpha),
-sum(vector_to_matrix(alpha))*tau,
mat(alpha),
-sum(mat(alpha))*tau,
kernel,
vector_to_matrix(dictionary)
mat(dictionary)
);
}
......@@ -295,7 +295,7 @@ namespace dlib
// now compute the updated alpha values to take account that we just removed one of
// our dictionary vectors
a = (K_inv*remove_row(K,i)*vector_to_matrix(alpha));
a = (K_inv*remove_row(K,i)*mat(alpha));
// now copy over the new alpha values
alpha.resize(alpha.size()-1);
......
......@@ -88,15 +88,15 @@ namespace dlib
)
{
// make sure requires clause is not broken
DLIB_ASSERT(basis_samples.size() > 0 && is_vector(vector_to_matrix(basis_samples)),
DLIB_ASSERT(basis_samples.size() > 0 && is_vector(mat(basis_samples)),
"\tvoid krr_trainer::set_basis(basis_samples)"
<< "\n\t You have to give a non-empty set of basis_samples and it must be a vector"
<< "\n\t basis_samples.size(): " << basis_samples.size()
<< "\n\t is_vector(vector_to_matrix(basis_samples)): " << is_vector(vector_to_matrix(basis_samples))
<< "\n\t is_vector(mat(basis_samples)): " << is_vector(mat(basis_samples))
<< "\n\t this: " << this
);
basis = vector_to_matrix(basis_samples);
basis = mat(basis_samples);
ekm_stale = true;
}
......@@ -191,7 +191,7 @@ namespace dlib
{
std::vector<scalar_type> temp;
scalar_type temp2;
return do_train(vector_to_matrix(x), vector_to_matrix(y), false, temp, temp2);
return do_train(mat(x), mat(y), false, temp, temp2);
}
template <
......@@ -205,7 +205,7 @@ namespace dlib
) const
{
scalar_type temp;
return do_train(vector_to_matrix(x), vector_to_matrix(y), true, loo_values, temp);
return do_train(mat(x), mat(y), true, loo_values, temp);
}
template <
......@@ -219,7 +219,7 @@ namespace dlib
scalar_type& lambda_used
) const
{
return do_train(vector_to_matrix(x), vector_to_matrix(y), true, loo_values, lambda_used);
return do_train(mat(x), mat(y), true, loo_values, lambda_used);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment