Unverified Commit 4c4419e0 authored by Lucas Beyer's avatar Lucas Beyer Committed by GitHub
Browse files

Merge pull request #108 from STulling/master

Fix Windows MSVC install by updating Eigen Library
parents 4d5343c3 13b115ab
......@@ -27,8 +27,9 @@ template<typename Func, typename Derived>
struct redux_traits
{
public:
typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType;
enum {
PacketSize = packet_traits<typename Derived::Scalar>::size,
PacketSize = unpacket_traits<PacketType>::size,
InnerMaxSize = int(Derived::IsRowMajor)
? Derived::MaxColsAtCompileTime
: Derived::MaxRowsAtCompileTime
......@@ -37,8 +38,8 @@ public:
enum {
MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
&& (functor_traits<Func>::PacketAccess),
MayLinearVectorize = MightVectorize && (int(Derived::Flags)&LinearAccessBit),
MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize
MayLinearVectorize = bool(MightVectorize) && (int(Derived::Flags)&LinearAccessBit),
MaySliceVectorize = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize
};
public:
......@@ -50,21 +51,34 @@ public:
public:
enum {
Cost = ( Derived::SizeAtCompileTime == Dynamic
|| Derived::CoeffReadCost == Dynamic
|| (Derived::SizeAtCompileTime!=1 && functor_traits<Func>::Cost == Dynamic)
) ? Dynamic
: Derived::SizeAtCompileTime * Derived::CoeffReadCost
+ (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
Cost = Derived::SizeAtCompileTime == Dynamic ? HugeCost
: Derived::SizeAtCompileTime * Derived::CoeffReadCost + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
};
public:
enum {
Unrolling = Cost != Dynamic && Cost <= UnrollingLimit
? CompleteUnrolling
: NoUnrolling
Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling
};
#ifdef EIGEN_DEBUG_ASSIGN
static void debug()
{
std::cerr << "Xpr: " << typeid(typename Derived::XprType).name() << std::endl;
std::cerr.setf(std::ios::hex, std::ios::basefield);
EIGEN_DEBUG_VAR(Derived::Flags)
std::cerr.unsetf(std::ios::hex);
EIGEN_DEBUG_VAR(InnerMaxSize)
EIGEN_DEBUG_VAR(PacketSize)
EIGEN_DEBUG_VAR(MightVectorize)
EIGEN_DEBUG_VAR(MayLinearVectorize)
EIGEN_DEBUG_VAR(MaySliceVectorize)
EIGEN_DEBUG_VAR(Traversal)
EIGEN_DEBUG_VAR(UnrollingLimit)
EIGEN_DEBUG_VAR(Unrolling)
std::cerr << std::endl;
}
#endif
};
/***************************************************************************
......@@ -82,6 +96,7 @@ struct redux_novec_unroller
typedef typename Derived::Scalar Scalar;
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
{
return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
......@@ -99,6 +114,7 @@ struct redux_novec_unroller<Func, Derived, Start, 1>
typedef typename Derived::Scalar Scalar;
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&)
{
return mat.coeffByOuterInner(outer, inner);
......@@ -112,6 +128,7 @@ template<typename Func, typename Derived, int Start>
struct redux_novec_unroller<Func, Derived, Start, 0>
{
typedef typename Derived::Scalar Scalar;
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
};
......@@ -121,12 +138,12 @@ template<typename Func, typename Derived, int Start, int Length>
struct redux_vec_unroller
{
enum {
PacketSize = packet_traits<typename Derived::Scalar>::size,
PacketSize = redux_traits<Func, Derived>::PacketSize,
HalfLength = Length/2
};
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)
{
......@@ -140,18 +157,18 @@ template<typename Func, typename Derived, int Start>
struct redux_vec_unroller<Func, Derived, Start, 1>
{
enum {
index = Start * packet_traits<typename Derived::Scalar>::size,
index = Start * redux_traits<Func, Derived>::PacketSize,
outer = index / int(Derived::InnerSizeAtCompileTime),
inner = index % int(Derived::InnerSizeAtCompileTime),
alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned
alignment = Derived::Alignment
};
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)
{
return mat.template packetByOuterInner<alignment>(outer, inner);
return mat.template packetByOuterInner<alignment,PacketScalar>(outer, inner);
}
};
......@@ -169,8 +186,8 @@ template<typename Func, typename Derived>
struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func)
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
{
eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
Scalar res;
......@@ -193,19 +210,19 @@ template<typename Func, typename Derived>
struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename Derived::Index Index;
typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
static Scalar run(const Derived& mat, const Func& func)
static Scalar run(const Derived &mat, const Func& func)
{
const Index size = mat.size();
eigen_assert(size && "you are using an empty matrix");
const Index packetSize = packet_traits<Scalar>::size;
const Index alignedStart = internal::first_aligned(mat);
const Index packetSize = redux_traits<Func, Derived>::PacketSize;
const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
enum {
alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit)
? Aligned : Unaligned
alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Derived::Alignment)
};
const Index alignedStart = internal::first_default_aligned(mat.nestedExpression());
const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize);
const Index alignedEnd2 = alignedStart + alignedSize2;
......@@ -213,19 +230,19 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
Scalar res;
if(alignedSize)
{
PacketScalar packet_res0 = mat.template packet<alignment>(alignedStart);
PacketScalar packet_res0 = mat.template packet<alignment,PacketScalar>(alignedStart);
if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop
{
PacketScalar packet_res1 = mat.template packet<alignment>(alignedStart+packetSize);
PacketScalar packet_res1 = mat.template packet<alignment,PacketScalar>(alignedStart+packetSize);
for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize)
{
packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment>(index));
packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment>(index+packetSize));
packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(index));
packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment,PacketScalar>(index+packetSize));
}
packet_res0 = func.packetOp(packet_res0,packet_res1);
if(alignedEnd>alignedEnd2)
packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment>(alignedEnd2));
packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(alignedEnd2));
}
res = func.predux(packet_res0);
......@@ -247,29 +264,29 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
}
};
template<typename Func, typename Derived>
struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
template<typename Func, typename Derived, int Unrolling>
struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename Derived::Index Index;
typedef typename redux_traits<Func, Derived>::PacketType PacketType;
static Scalar run(const Derived& mat, const Func& func)
EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)
{
eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
const Index innerSize = mat.innerSize();
const Index outerSize = mat.outerSize();
enum {
packetSize = packet_traits<Scalar>::size
packetSize = redux_traits<Func, Derived>::PacketSize
};
const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
Scalar res;
if(packetedInnerSize)
{
PacketScalar packet_res = mat.template packet<Unaligned>(0,0);
PacketType packet_res = mat.template packet<Unaligned,PacketType>(0,0);
for(Index j=0; j<outerSize; ++j)
for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned>(j,i));
packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned,PacketType>(j,i));
res = func.predux(packet_res);
for(Index j=0; j<outerSize; ++j)
......@@ -290,22 +307,90 @@ template<typename Func, typename Derived>
struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
enum {
PacketSize = packet_traits<Scalar>::size,
PacketSize = redux_traits<Func, Derived>::PacketSize,
Size = Derived::SizeAtCompileTime,
VectorizedSize = (Size / PacketSize) * PacketSize
};
static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func)
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
{
eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
if (VectorizedSize != Size)
res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
return res;
if (VectorizedSize > 0) {
Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
if (VectorizedSize != Size)
res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
return res;
}
else {
return redux_novec_unroller<Func, Derived, 0, Size>::run(mat,func);
}
}
};
// evaluator adaptor
template<typename _XprType>
class redux_evaluator
{
public:
typedef _XprType XprType;
EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
typedef typename XprType::PacketReturnType PacketReturnType;
enum {
MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
// TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator
Flags = evaluator<XprType>::Flags & ~DirectAccessBit,
IsRowMajor = XprType::IsRowMajor,
SizeAtCompileTime = XprType::SizeAtCompileTime,
InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime,
CoeffReadCost = evaluator<XprType>::CoeffReadCost,
Alignment = evaluator<XprType>::Alignment
};
EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
EIGEN_DEVICE_FUNC Index innerSize() const { return m_xpr.innerSize(); }
EIGEN_DEVICE_FUNC Index outerSize() const { return m_xpr.outerSize(); }
EIGEN_DEVICE_FUNC
CoeffReturnType coeff(Index row, Index col) const
{ return m_evaluator.coeff(row, col); }
EIGEN_DEVICE_FUNC
CoeffReturnType coeff(Index index) const
{ return m_evaluator.coeff(index); }
template<int LoadMode, typename PacketType>
PacketType packet(Index row, Index col) const
{ return m_evaluator.template packet<LoadMode,PacketType>(row, col); }
template<int LoadMode, typename PacketType>
PacketType packet(Index index) const
{ return m_evaluator.template packet<LoadMode,PacketType>(index); }
EIGEN_DEVICE_FUNC
CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
{ return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
template<int LoadMode, typename PacketType>
PacketType packetByOuterInner(Index outer, Index inner) const
{ return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
const XprType & nestedExpression() const { return m_xpr; }
protected:
internal::evaluator<XprType> m_evaluator;
const XprType &m_xpr;
};
} // end namespace internal
/***************************************************************************
......@@ -316,39 +401,46 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
/** \returns the result of a full redux operation on the whole matrix or vector using \a func
*
* The template parameter \a BinaryOp is the type of the functor \a func which must be
* an associative operator. Both current STL and TR1 functor styles are handled.
* an associative operator. Both current C++98 and C++11 functor styles are handled.
*
* \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
*/
template<typename Derived>
template<typename Func>
EIGEN_STRONG_INLINE typename internal::result_of<Func(typename internal::traits<Derived>::Scalar)>::type
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::redux(const Func& func) const
{
typedef typename internal::remove_all<typename Derived::Nested>::type ThisNested;
return internal::redux_impl<Func, ThisNested>
::run(derived(), func);
eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
ThisEvaluator thisEval(derived());
return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);
}
/** \returns the minimum of all coefficients of *this
/** \returns the minimum of all coefficients of \c *this.
* \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff() const
{
return this->redux(Eigen::internal::scalar_min_op<Scalar>());
return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());
}
/** \returns the maximum of all coefficients of *this
/** \returns the maximum of all coefficients of \c *this.
* \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff() const
{
return this->redux(Eigen::internal::scalar_max_op<Scalar>());
return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());
}
/** \returns the sum of all coefficients of *this
/** \returns the sum of all coefficients of \c *this
*
* If \c *this is empty, then the value 0 is returned.
*
* \sa trace(), prod(), mean()
*/
......@@ -358,7 +450,7 @@ DenseBase<Derived>::sum() const
{
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
return Scalar(0);
return this->redux(Eigen::internal::scalar_sum_op<Scalar>());
return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>());
}
/** \returns the mean of all coefficients of *this
......@@ -369,7 +461,14 @@ template<typename Derived>
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::mean() const
{
return Scalar(this->redux(Eigen::internal::scalar_sum_op<Scalar>())) / Scalar(this->size());
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning ( disable : 2259 )
#endif
return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->size());
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
}
/** \returns the product of all coefficients of *this
......@@ -385,7 +484,7 @@ DenseBase<Derived>::prod() const
{
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
return Scalar(1);
return this->redux(Eigen::internal::scalar_product_op<Scalar>());
return derived().redux(Eigen::internal::scalar_product_op<Scalar>());
}
/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REF_H
#define EIGEN_REF_H
namespace Eigen {
namespace internal {
template<typename _PlainObjectType, int _Options, typename _StrideType>
struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
: public traits<Map<_PlainObjectType, _Options, _StrideType> >
{
typedef _PlainObjectType PlainObjectType;
typedef _StrideType StrideType;
enum {
Options = _Options,
Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit,
Alignment = traits<Map<_PlainObjectType, _Options, _StrideType> >::Alignment
};
template<typename Derived> struct match {
enum {
IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime,
HasDirectAccess = internal::has_direct_access<Derived>::ret,
StorageOrderMatch = IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
|| int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
|| (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
OuterStrideMatch = IsVectorAtCompileTime
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
// NOTE, this indirection of evaluator<Derived>::Alignment is needed
// to workaround a very strange bug in MSVC related to the instantiation
// of has_*ary_operator in evaluator<CwiseNullaryOp>.
// This line is surprisingly very sensitive. For instance, simply adding parenthesis
// as "DerivedAlignment = (int(evaluator<Derived>::Alignment))," will make MSVC fail...
DerivedAlignment = int(evaluator<Derived>::Alignment),
AlignmentMatch = (int(traits<PlainObjectType>::Alignment)==int(Unaligned)) || (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should be replaced by the required alignment
ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
};
template<typename Derived>
struct traits<RefBase<Derived> > : public traits<Derived> {};
}
template<typename Derived> class RefBase
: public MapBase<Derived>
{
typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
typedef typename internal::traits<Derived>::StrideType StrideType;
public:
typedef MapBase<Derived> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
EIGEN_DEVICE_FUNC inline Index innerStride() const
{
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
}
EIGEN_DEVICE_FUNC inline Index outerStride() const
{
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
: IsVectorAtCompileTime ? this->size()
: int(Flags)&RowMajorBit ? this->cols()
: this->rows();
}
EIGEN_DEVICE_FUNC RefBase()
: Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime),
// Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,
StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)
{}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
protected:
typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;
template<typename Expression>
EIGEN_DEVICE_FUNC void construct(Expression& expr)
{
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(PlainObjectType,Expression);
if(PlainObjectType::RowsAtCompileTime==1)
{
eigen_assert(expr.rows()==1 || expr.cols()==1);
::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());
}
else if(PlainObjectType::ColsAtCompileTime==1)
{
eigen_assert(expr.rows()==1 || expr.cols()==1);
::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);
}
else
::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))
::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);
else
::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
}
StrideBase m_stride;
};
/** \class Ref
* \ingroup Core_Module
*
* \brief A matrix or vector expression mapping an existing expression
*
* \tparam PlainObjectType the equivalent matrix type of the mapped data
* \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned.
* The default is \c #Unaligned.
* \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),
* but accepts a variable outer stride (leading dimension).
* This can be overridden by specifying strides.
* The type passed here must be a specialization of the Stride template, see examples below.
*
* This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the number of copies.
* A Ref<> object can represent either a const expression or a l-value:
* \code
* // in-out argument:
* void foo1(Ref<VectorXf> x);
*
* // read-only const argument:
* void foo2(const Ref<const VectorXf>& x);
* \endcode
*
* In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
* By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.
* Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float whose column's elements are contiguously stored with
* the possibility to have a constant space in-between each column, i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension)
* can be greater than the number of rows.
*
* In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.
* Here are some examples:
* \code
* MatrixXf A;
* VectorXf a;
* foo1(a.head()); // OK
* foo1(A.col()); // OK
* foo1(A.row()); // Compilation error because here innerstride!=1
* foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object
* foo2(A.row().transpose()); // The row is copied into a contiguous temporary
* foo2(2*a); // The expression is evaluated into a temporary
* foo2(A.col().segment(2,4)); // No temporary
* \endcode
*
* The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters.
* Here is an example accepting an innerstride!=1:
* \code
* // in-out argument:
* void foo3(Ref<VectorXf,0,InnerStride<> > x);
* foo3(A.row()); // OK
* \endcode
* The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involve more
* expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overload internally calling a
* template function, e.g.:
* \code
* // in the .h:
* void foo(const Ref<MatrixXf>& A);
* void foo(const Ref<MatrixXf,0,Stride<> >& A);
*
* // in the .cpp:
* template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
* ... // crazy code goes here
* }
* void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
* void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
* \endcode
*
*
* \sa PlainObjectBase::Map(), \ref TopicStorageOrders
*/
template<typename PlainObjectType, int Options, typename StrideType> class Ref
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
{
private:
typedef internal::traits<Ref> Traits;
template<typename Derived>
EIGEN_DEVICE_FUNC inline Ref(const PlainObjectBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
public:
typedef RefBase<Ref> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived>
EIGEN_DEVICE_FUNC inline Ref(PlainObjectBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.derived());
}
template<typename Derived>
EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
#else
/** Implicit constructor from any dense expression */
template<typename Derived>
inline Ref(DenseBase<Derived>& expr)
#endif
{
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
Base::construct(expr.const_cast_derived());
}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
};
// this is the const ref version
template<typename TPlainObjectType, int Options, typename StrideType> class Ref<const TPlainObjectType, Options, StrideType>
: public RefBase<Ref<const TPlainObjectType, Options, StrideType> >
{
typedef internal::traits<Ref> Traits;
public:
typedef RefBase<Ref> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
{
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) {
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
template<typename OtherRef>
EIGEN_DEVICE_FUNC inline Ref(const RefBase<OtherRef>& other) {
construct(other.derived(), typename Traits::template match<OtherRef>::type());
}
protected:
template<typename Expression>
EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type)
{
Base::construct(expr);
}
template<typename Expression>
EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type)
{
internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar,Scalar>());
Base::construct(m_object);
}
protected:
TPlainObjectType m_object;
};
} // end namespace Eigen
#endif // EIGEN_REF_H
......@@ -12,21 +12,6 @@
namespace Eigen {
/**
* \class Replicate
* \ingroup Core_Module
*
* \brief Expression of the multiple replication of a matrix or vector
*
* \param MatrixType the type of the object we are replicating
*
* This class represents an expression of the multiple replication of a matrix or vector.
* It is the return type of DenseBase::replicate() and most of the time
* this is the only way it is used.
*
* \sa DenseBase::replicate()
*/
namespace internal {
template<typename MatrixType,int RowFactor,int ColFactor>
struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
......@@ -35,10 +20,7 @@ struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
typedef typename MatrixType::Scalar Scalar;
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
enum {
Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
};
typedef typename nested<MatrixType,Factor>::type MatrixTypeNested;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
enum {
RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic
......@@ -53,12 +35,29 @@ struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
: MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
: (MatrixType::Flags & RowMajorBit) ? 1 : 0,
Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0),
CoeffReadCost = _MatrixTypeNested::CoeffReadCost
// FIXME enable DirectAccess with negative strides?
Flags = IsRowMajor ? RowMajorBit : 0
};
};
}
/**
* \class Replicate
* \ingroup Core_Module
*
* \brief Expression of the multiple replication of a matrix or vector
*
* \tparam MatrixType the type of the object we are replicating
* \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic.
* \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic.
*
* This class represents an expression of the multiple replication of a matrix or vector.
* It is the return type of DenseBase::replicate() and most of the time
* this is the only way it is used.
*
* \sa DenseBase::replicate()
*/
template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
: public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type
{
......@@ -68,8 +67,10 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
typedef typename internal::dense_xpr_base<Replicate>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
template<typename OriginalMatrixType>
EIGEN_DEVICE_FUNC
inline explicit Replicate(const OriginalMatrixType& matrix)
: m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
{
......@@ -79,6 +80,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
}
template<typename OriginalMatrixType>
EIGEN_DEVICE_FUNC
inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
: m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
{
......@@ -86,34 +88,12 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
}
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
inline Scalar coeff(Index row, Index col) const
{
// try to avoid using modulo; this is a pure optimization strategy
const Index actual_row = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row
: row%m_matrix.rows();
const Index actual_col = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col%m_matrix.cols();
return m_matrix.coeff(actual_row, actual_col);
}
template<int LoadMode>
inline PacketScalar packet(Index row, Index col) const
{
const Index actual_row = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row
: row%m_matrix.rows();
const Index actual_col = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col%m_matrix.cols();
return m_matrix.template packet<LoadMode>(actual_row, actual_col);
}
EIGEN_DEVICE_FUNC
const _MatrixTypeNested& nestedExpression() const
{
return m_matrix;
......@@ -135,27 +115,12 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
*/
template<typename Derived>
template<int RowFactor, int ColFactor>
inline const Replicate<Derived,RowFactor,ColFactor>
const Replicate<Derived,RowFactor,ColFactor>
DenseBase<Derived>::replicate() const
{
return Replicate<Derived,RowFactor,ColFactor>(derived());
}
/**
* \return an expression of the replication of \c *this
*
* Example: \include MatrixBase_replicate_int_int.cpp
* Output: \verbinclude MatrixBase_replicate_int_int.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
*/
template<typename Derived>
inline const Replicate<Derived,Dynamic,Dynamic>
DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
{
return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
}
/**
* \return an expression of the replication of each column (or row) of \c *this
*
......
......@@ -13,11 +13,6 @@
namespace Eigen {
/** \class ReturnByValue
* \ingroup Core_Module
*
*/
namespace internal {
template<typename Derived>
......@@ -38,17 +33,22 @@ struct traits<ReturnByValue<Derived> >
* So internal::nested always gives the plain return matrix type.
*
* FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ??
* Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators
*/
template<typename Derived,int n,typename PlainObject>
struct nested<ReturnByValue<Derived>, n, PlainObject>
struct nested_eval<ReturnByValue<Derived>, n, PlainObject>
{
typedef typename traits<Derived>::ReturnType type;
};
} // end namespace internal
/** \class ReturnByValue
* \ingroup Core_Module
*
*/
template<typename Derived> class ReturnByValue
: public internal::dense_xpr_base< ReturnByValue<Derived> >::type
: public internal::dense_xpr_base< ReturnByValue<Derived> >::type, internal::no_assignment_operator
{
public:
typedef typename internal::traits<Derived>::ReturnType ReturnType;
......@@ -57,10 +57,11 @@ template<typename Derived> class ReturnByValue
EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
template<typename Dest>
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const
{ static_cast<const Derived*>(this)->evalTo(dst); }
inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }
inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }
EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
......@@ -72,6 +73,7 @@ template<typename Derived> class ReturnByValue
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
#undef Unusable
#endif
};
......@@ -83,6 +85,33 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
return derived();
}
namespace internal {
// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that
// when a ReturnByValue expression is assigned, the evaluator is not constructed.
// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world
template<typename Derived>
struct evaluator<ReturnByValue<Derived> >
: public evaluator<typename internal::traits<Derived>::ReturnType>
{
typedef ReturnByValue<Derived> XprType;
typedef typename internal::traits<Derived>::ReturnType PlainObject;
typedef evaluator<PlainObject> Base;
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
xpr.evalTo(m_result);
}
protected:
PlainObject m_result;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_RETURNBYVALUE_H
......@@ -14,20 +14,6 @@
namespace Eigen {
/** \class Reverse
* \ingroup Core_Module
*
* \brief Expression of the reverse of a vector or matrix
*
* \param MatrixType the type of the object of which we are taking the reverse
*
* This class represents an expression of the reverse of a vector.
* It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::reverse(), VectorwiseOp::reverse()
*/
namespace internal {
template<typename MatrixType, int Direction>
......@@ -37,36 +23,43 @@ struct traits<Reverse<MatrixType, Direction> >
typedef typename MatrixType::Scalar Scalar;
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
typedef typename nested<MatrixType>::type MatrixTypeNested;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
// let's enable LinearAccess only with vectorization because of the product overhead
LinearAccess = ( (Direction==BothDirections) && (int(_MatrixTypeNested::Flags)&PacketAccessBit) )
? LinearAccessBit : 0,
Flags = int(_MatrixTypeNested::Flags) & (HereditaryBits | LvalueBit | PacketAccessBit | LinearAccess),
CoeffReadCost = _MatrixTypeNested::CoeffReadCost
Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit)
};
};
template<typename PacketScalar, bool ReversePacket> struct reverse_packet_cond
template<typename PacketType, bool ReversePacket> struct reverse_packet_cond
{
static inline PacketScalar run(const PacketScalar& x) { return preverse(x); }
static inline PacketType run(const PacketType& x) { return preverse(x); }
};
template<typename PacketScalar> struct reverse_packet_cond<PacketScalar,false>
template<typename PacketType> struct reverse_packet_cond<PacketType,false>
{
static inline PacketScalar run(const PacketScalar& x) { return x; }
static inline PacketType run(const PacketType& x) { return x; }
};
} // end namespace internal
/** \class Reverse
* \ingroup Core_Module
*
* \brief Expression of the reverse of a vector or matrix
*
* \tparam MatrixType the type of the object of which we are taking the reverse
* \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections
*
* This class represents an expression of the reverse of a vector.
* It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::reverse(), VectorwiseOp::reverse()
*/
template<typename MatrixType, int Direction> class Reverse
: public internal::dense_xpr_base< Reverse<MatrixType, Direction> >::type
{
......@@ -74,12 +67,9 @@ template<typename MatrixType, int Direction> class Reverse
typedef typename internal::dense_xpr_base<Reverse>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
using Base::IsRowMajor;
// next line is necessary because otherwise const version of operator()
// is hidden by non-const version defined in this file
using Base::operator();
protected:
enum {
PacketSize = internal::packet_traits<Scalar>::size,
......@@ -95,82 +85,19 @@ template<typename MatrixType, int Direction> class Reverse
typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet;
public:
inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { }
EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { }
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); }
inline Index innerStride() const
EIGEN_DEVICE_FUNC inline Index innerStride() const
{
return -m_matrix.innerStride();
}
inline Scalar& operator()(Index row, Index col)
{
eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
return coeffRef(row, col);
}
inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row,
ReverseCol ? m_matrix.cols() - col - 1 : col);
}
inline CoeffReturnType coeff(Index row, Index col) const
{
return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row,
ReverseCol ? m_matrix.cols() - col - 1 : col);
}
inline CoeffReturnType coeff(Index index) const
{
return m_matrix.coeff(m_matrix.size() - index - 1);
}
inline Scalar& coeffRef(Index index)
{
return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1);
}
inline Scalar& operator()(Index index)
{
eigen_assert(index >= 0 && index < m_matrix.size());
return coeffRef(index);
}
template<int LoadMode>
inline const PacketScalar packet(Index row, Index col) const
{
return reverse_packet::run(m_matrix.template packet<LoadMode>(
ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
ReverseCol ? m_matrix.cols() - col - OffsetCol : col));
}
template<int LoadMode>
inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(
ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
ReverseCol ? m_matrix.cols() - col - OffsetCol : col,
reverse_packet::run(x));
}
template<int LoadMode>
inline const PacketScalar packet(Index index) const
{
return internal::preverse(m_matrix.template packet<LoadMode>( m_matrix.size() - index - PacketSize ));
}
template<int LoadMode>
inline void writePacket(Index index, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(m_matrix.size() - index - PacketSize, internal::preverse(x));
}
const typename internal::remove_all<typename MatrixType::Nested>::type&
EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type&
nestedExpression() const
{
return m_matrix;
......@@ -190,33 +117,93 @@ template<typename Derived>
inline typename DenseBase<Derived>::ReverseReturnType
DenseBase<Derived>::reverse()
{
return derived();
return ReverseReturnType(derived());
}
/** This is the const version of reverse(). */
template<typename Derived>
inline const typename DenseBase<Derived>::ConstReverseReturnType
DenseBase<Derived>::reverse() const
{
return derived();
}
//reverse const overload moved DenseBase.h due to a CUDA compiler bug
/** This is the "in place" version of reverse: it reverses \c *this.
*
* In most cases it is probably better to simply use the reversed expression
* of a matrix. However, when reversing the matrix data itself is really needed,
* then this "in-place" version is probably the right choice because it provides
* the following additional features:
* the following additional benefits:
* - less error prone: doing the same operation with .reverse() requires special care:
* \code m = m.reverse().eval(); \endcode
* - this API allows to avoid creating a temporary (the current implementation creates a temporary, but that could be avoided using swap)
* - this API enables reverse operations without the need for a temporary
* - it allows future optimizations (cache friendliness, etc.)
*
* \sa reverse() */
* \sa VectorwiseOp::reverseInPlace(), reverse() */
template<typename Derived>
inline void DenseBase<Derived>::reverseInPlace()
{
derived() = derived().reverse().eval();
if(cols()>rows())
{
Index half = cols()/2;
leftCols(half).swap(rightCols(half).reverse());
if((cols()%2)==1)
{
Index half2 = rows()/2;
col(half).head(half2).swap(col(half).tail(half2).reverse());
}
}
else
{
Index half = rows()/2;
topRows(half).swap(bottomRows(half).reverse());
if((rows()%2)==1)
{
Index half2 = cols()/2;
row(half).head(half2).swap(row(half).tail(half2).reverse());
}
}
}
namespace internal {
template<int Direction>
struct vectorwise_reverse_inplace_impl;
template<>
struct vectorwise_reverse_inplace_impl<Vertical>
{
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
Index half = xpr.rows()/2;
xpr.topRows(half).swap(xpr.bottomRows(half).colwise().reverse());
}
};
template<>
struct vectorwise_reverse_inplace_impl<Horizontal>
{
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
Index half = xpr.cols()/2;
xpr.leftCols(half).swap(xpr.rightCols(half).rowwise().reverse());
}
};
} // end namespace internal
/** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this.
*
* In most cases it is probably better to simply use the reversed expression
* of a matrix. However, when reversing the matrix data itself is really needed,
* then this "in-place" version is probably the right choice because it provides
* the following additional benefits:
* - less error prone: doing the same operation with .reverse() requires special care:
* \code m = m.reverse().eval(); \endcode
* - this API enables reverse operations without the need for a temporary
*
* \sa DenseBase::reverseInPlace(), reverse() */
template<typename ExpressionType, int Direction>
void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
{
internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
}
} // end namespace Eigen
......
......@@ -43,35 +43,34 @@ struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits,
CoeffReadCost = traits<typename remove_all<ConditionMatrixNested>::type>::CoeffReadCost
+ EIGEN_SIZE_MAX(traits<typename remove_all<ThenMatrixNested>::type>::CoeffReadCost,
traits<typename remove_all<ElseMatrixNested>::type>::CoeffReadCost)
Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit
};
};
}
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
class Select : internal::no_assignment_operator,
public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type
class Select : public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type,
internal::no_assignment_operator
{
public:
typedef typename internal::dense_xpr_base<Select>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Select)
Select(const ConditionMatrixType& conditionMatrix,
const ThenMatrixType& thenMatrix,
const ElseMatrixType& elseMatrix)
: m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix)
inline EIGEN_DEVICE_FUNC
Select(const ConditionMatrixType& a_conditionMatrix,
const ThenMatrixType& a_thenMatrix,
const ElseMatrixType& a_elseMatrix)
: m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)
{
eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
}
Index rows() const { return m_condition.rows(); }
Index cols() const { return m_condition.cols(); }
inline EIGEN_DEVICE_FUNC Index rows() const { return m_condition.rows(); }
inline EIGEN_DEVICE_FUNC Index cols() const { return m_condition.cols(); }
inline EIGEN_DEVICE_FUNC
const Scalar coeff(Index i, Index j) const
{
if (m_condition.coeff(i,j))
......@@ -80,6 +79,7 @@ class Select : internal::no_assignment_operator,
return m_else.coeff(i,j);
}
inline EIGEN_DEVICE_FUNC
const Scalar coeff(Index i) const
{
if (m_condition.coeff(i))
......@@ -88,17 +88,17 @@ class Select : internal::no_assignment_operator,
return m_else.coeff(i);
}
const ConditionMatrixType& conditionMatrix() const
inline EIGEN_DEVICE_FUNC const ConditionMatrixType& conditionMatrix() const
{
return m_condition;
}
const ThenMatrixType& thenMatrix() const
inline EIGEN_DEVICE_FUNC const ThenMatrixType& thenMatrix() const
{
return m_then;
}
const ElseMatrixType& elseMatrix() const
inline EIGEN_DEVICE_FUNC const ElseMatrixType& elseMatrix() const
{
return m_else;
}
......@@ -136,7 +136,7 @@ template<typename Derived>
template<typename ThenDerived>
inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
typename ThenDerived::Scalar elseScalar) const
const typename ThenDerived::Scalar& elseScalar) const
{
return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
......@@ -150,8 +150,8 @@ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
template<typename Derived>
template<typename ElseDerived>
inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
DenseBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
const DenseBase<ElseDerived>& elseMatrix) const
DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,
const DenseBase<ElseDerived>& elseMatrix) const
{
return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
......
......@@ -32,54 +32,62 @@ namespace internal {
template<typename MatrixType, unsigned int UpLo>
struct traits<SelfAdjointView<MatrixType, UpLo> > : traits<MatrixType>
{
typedef typename nested<MatrixType>::type MatrixTypeNested;
typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
typedef MatrixType ExpressionType;
typedef typename MatrixType::PlainObject DenseMatrixType;
typedef typename MatrixType::PlainObject FullMatrixType;
enum {
Mode = UpLo | SelfAdjoint,
Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits)
& (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)), // FIXME these flags should be preserved
CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits|FlagsLvalueBit)
& (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)) // FIXME these flags should be preserved
};
};
}
template <typename Lhs, int LhsMode, bool LhsIsVector,
typename Rhs, int RhsMode, bool RhsIsVector>
struct SelfadjointProductMatrix;
// FIXME could also be called SelfAdjointWrapper to be consistent with DiagonalWrapper ??
template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
: public TriangularBase<SelfAdjointView<MatrixType, UpLo> >
template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
: public TriangularBase<SelfAdjointView<_MatrixType, UpLo> >
{
public:
typedef _MatrixType MatrixType;
typedef TriangularBase<SelfAdjointView> Base;
typedef typename internal::traits<SelfAdjointView>::MatrixTypeNested MatrixTypeNested;
typedef typename internal::traits<SelfAdjointView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
typedef MatrixTypeNestedCleaned NestedExpression;
/** \brief The type of coefficients in this matrix */
typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
enum {
Mode = internal::traits<SelfAdjointView>::Mode
Mode = internal::traits<SelfAdjointView>::Mode,
Flags = internal::traits<SelfAdjointView>::Flags,
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0)
};
typedef typename MatrixType::PlainObject PlainObject;
inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
{}
EIGEN_DEVICE_FUNC
explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
{
EIGEN_STATIC_ASSERT(UpLo==Lower || UpLo==Upper,SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY);
}
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_matrix.rows(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return m_matrix.cols(); }
EIGEN_DEVICE_FUNC
inline Index outerStride() const { return m_matrix.outerStride(); }
EIGEN_DEVICE_FUNC
inline Index innerStride() const { return m_matrix.innerStride(); }
/** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part
*/
EIGEN_DEVICE_FUNC
inline Scalar coeff(Index row, Index col) const
{
Base::check_coordinates_internal(row, col);
......@@ -89,36 +97,46 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
/** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part
*/
EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index row, Index col)
{
EIGEN_STATIC_ASSERT_LVALUE(SelfAdjointView);
Base::check_coordinates_internal(row, col);
return m_matrix.const_cast_derived().coeffRef(row, col);
return m_matrix.coeffRef(row, col);
}
/** \internal */
EIGEN_DEVICE_FUNC
const MatrixTypeNestedCleaned& _expression() const { return m_matrix; }
EIGEN_DEVICE_FUNC
const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
EIGEN_DEVICE_FUNC
MatrixTypeNestedCleaned& nestedExpression() { return m_matrix; }
/** Efficient self-adjoint matrix times vector/matrix product */
/** Efficient triangular matrix times vector/matrix product */
template<typename OtherDerived>
SelfadjointProductMatrix<MatrixType,Mode,false,OtherDerived,0,OtherDerived::IsVectorAtCompileTime>
EIGEN_DEVICE_FUNC
const Product<SelfAdjointView,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return SelfadjointProductMatrix
<MatrixType,Mode,false,OtherDerived,0,OtherDerived::IsVectorAtCompileTime>
(m_matrix, rhs.derived());
return Product<SelfAdjointView,OtherDerived>(*this, rhs.derived());
}
/** Efficient vector/matrix times self-adjoint matrix product */
/** Efficient vector/matrix times triangular matrix product */
template<typename OtherDerived> friend
SelfadjointProductMatrix<OtherDerived,0,OtherDerived::IsVectorAtCompileTime,MatrixType,Mode,false>
EIGEN_DEVICE_FUNC
const Product<OtherDerived,SelfAdjointView>
operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView& rhs)
{
return SelfadjointProductMatrix
<OtherDerived,0,OtherDerived::IsVectorAtCompileTime,MatrixType,Mode,false>
(lhs.derived(),rhs.m_matrix);
return Product<OtherDerived,SelfAdjointView>(lhs.derived(),rhs);
}
friend EIGEN_DEVICE_FUNC
const SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,MatrixType,product),UpLo>
operator*(const Scalar& s, const SelfAdjointView& mat)
{
return (s*mat.nestedExpression()).template selfadjointView<UpLo>();
}
/** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this:
......@@ -132,7 +150,8 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
* \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
*/
template<typename DerivedU, typename DerivedV>
SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha = Scalar(1));
EIGEN_DEVICE_FUNC
SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha = Scalar(1));
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
......@@ -145,7 +164,73 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
* \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
*/
template<typename DerivedU>
SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
EIGEN_DEVICE_FUNC
SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
/** \returns an expression of a triangular view extracted from the current selfadjoint view of a given triangular part
*
* The parameter \a TriMode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
* \c #Lower, \c #StrictlyLower, \c #UnitLower.
*
* If \c TriMode references the same triangular part than \c *this, then this method simply return a \c TriangularView of the nested expression,
* otherwise, the nested expression is first transposed, thus returning a \c TriangularView<Transpose<MatrixType>> object.
*
* \sa MatrixBase::triangularView(), class TriangularView
*/
template<unsigned int TriMode>
EIGEN_DEVICE_FUNC
typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)),
TriangularView<MatrixType,TriMode>,
TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type
triangularView() const
{
typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::ConstTransposeReturnType>::type tmp1(m_matrix);
typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::AdjointReturnType>::type tmp2(tmp1);
return typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)),
TriangularView<MatrixType,TriMode>,
TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type(tmp2);
}
typedef SelfAdjointView<const MatrixConjugateReturnType,UpLo> ConjugateReturnType;
/** \sa MatrixBase::conjugate() const */
EIGEN_DEVICE_FUNC
inline const ConjugateReturnType conjugate() const
{ return ConjugateReturnType(m_matrix.conjugate()); }
typedef SelfAdjointView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
inline const AdjointReturnType adjoint() const
{ return AdjointReturnType(m_matrix.adjoint()); }
typedef SelfAdjointView<typename MatrixType::TransposeReturnType,TransposeMode> TransposeReturnType;
/** \sa MatrixBase::transpose() */
EIGEN_DEVICE_FUNC
inline TransposeReturnType transpose()
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
typename MatrixType::TransposeReturnType tmp(m_matrix);
return TransposeReturnType(tmp);
}
typedef SelfAdjointView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;
/** \sa MatrixBase::transpose() const */
EIGEN_DEVICE_FUNC
inline const ConstTransposeReturnType transpose() const
{
return ConstTransposeReturnType(m_matrix.transpose());
}
/** \returns a const expression of the main diagonal of the matrix \c *this
*
* This method simply returns the diagonal of the nested expression, thus by-passing the SelfAdjointView decorator.
*
* \sa MatrixBase::diagonal(), class Diagonal */
EIGEN_DEVICE_FUNC
typename MatrixType::ConstDiagonalReturnType diagonal() const
{
return typename MatrixType::ConstDiagonalReturnType(m_matrix);
}
/////////// Cholesky module ///////////
......@@ -159,31 +244,10 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
/** Return type of eigenvalues() */
typedef Matrix<RealScalar, internal::traits<MatrixType>::ColsAtCompileTime, 1> EigenvaluesReturnType;
EIGEN_DEVICE_FUNC
EigenvaluesReturnType eigenvalues() const;
EIGEN_DEVICE_FUNC
RealScalar operatorNorm() const;
#ifdef EIGEN2_SUPPORT
template<typename OtherDerived>
SelfAdjointView& operator=(const MatrixBase<OtherDerived>& other)
{
enum {
OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper
};
m_matrix.const_cast_derived().template triangularView<UpLo>() = other;
m_matrix.const_cast_derived().template triangularView<OtherPart>() = other.adjoint();
return *this;
}
template<typename OtherMatrixType, unsigned int OtherMode>
SelfAdjointView& operator=(const TriangularView<OtherMatrixType, OtherMode>& other)
{
enum {
OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper
};
m_matrix.const_cast_derived().template triangularView<UpLo>() = other.toDenseMatrix();
m_matrix.const_cast_derived().template triangularView<OtherPart>() = other.toDenseMatrix().adjoint();
return *this;
}
#endif
protected:
MatrixTypeNested m_matrix;
......@@ -201,90 +265,54 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
namespace internal {
template<typename Derived1, typename Derived2, int UnrollCount, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount, ClearOpposite>
{
enum {
col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
row = (UnrollCount-1) % Derived1::RowsAtCompileTime
};
static inline void run(Derived1 &dst, const Derived2 &src)
{
triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount-1, ClearOpposite>::run(dst, src);
if(row == col)
dst.coeffRef(row, col) = real(src.coeff(row, col));
else if(row < col)
dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, 0, ClearOpposite>
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
// in the future selfadjoint-ness should be defined by the expression traits
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
template<typename MatrixType, unsigned int Mode>
struct evaluator_traits<SelfAdjointView<MatrixType,Mode> >
{
static inline void run(Derived1 &, const Derived2 &) {}
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef SelfAdjointShape Shape;
};
template<typename Derived1, typename Derived2, int UnrollCount, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount, ClearOpposite>
template<int UpLo, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version>
class triangular_dense_assignment_kernel<UpLo,SelfAdjoint,SetOpposite,DstEvaluatorTypeT,SrcEvaluatorTypeT,Functor,Version>
: public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version>
{
enum {
col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
row = (UnrollCount-1) % Derived1::RowsAtCompileTime
};
static inline void run(Derived1 &dst, const Derived2 &src)
protected:
typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base;
typedef typename Base::DstXprType DstXprType;
typedef typename Base::SrcXprType SrcXprType;
using Base::m_dst;
using Base::m_src;
using Base::m_functor;
public:
typedef typename Base::DstEvaluatorType DstEvaluatorType;
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
typedef typename Base::Scalar Scalar;
typedef typename Base::AssignmentTraits AssignmentTraits;
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
{
triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount-1, ClearOpposite>::run(dst, src);
if(row == col)
dst.coeffRef(row, col) = real(src.coeff(row, col));
else if(row > col)
dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
eigen_internal_assert(row!=col);
Scalar tmp = m_src.coeff(row,col);
m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp);
m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp));
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, 0, ClearOpposite>
{
static inline void run(Derived1 &, const Derived2 &) {}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
{
for(Index j = 0; j < dst.cols(); ++j)
{
for(Index i = 0; i < j; ++i)
{
dst.copyCoeff(i, j, src);
dst.coeffRef(j,i) = conj(dst.coeff(i,j));
}
dst.copyCoeff(j, j, src);
}
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dynamic, ClearOpposite>
{
static inline void run(Derived1 &dst, const Derived2 &src)
{
typedef typename Derived1::Index Index;
for(Index i = 0; i < dst.rows(); ++i)
{
for(Index j = 0; j < i; ++j)
{
dst.copyCoeff(i, j, src);
dst.coeffRef(j,i) = conj(dst.coeff(i,j));
}
dst.copyCoeff(i, i, src);
}
Base::assignCoeff(id,id);
}
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index)
{ eigen_internal_assert(false && "should never be called"); }
};
} // end namespace internal
......@@ -293,20 +321,30 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dyn
* Implementation of MatrixBase methods
***************************************************************************/
/** This is the const version of MatrixBase::selfadjointView() */
template<typename Derived>
template<unsigned int UpLo>
typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
MatrixBase<Derived>::selfadjointView() const
{
return derived();
return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived());
}
/** \returns an expression of a symmetric/self-adjoint view extracted from the upper or lower triangular part of the current matrix
*
* The parameter \a UpLo can be either \c #Upper or \c #Lower
*
* Example: \include MatrixBase_selfadjointView.cpp
* Output: \verbinclude MatrixBase_selfadjointView.out
*
* \sa class SelfAdjointView
*/
template<typename Derived>
template<unsigned int UpLo>
typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
MatrixBase<Derived>::selfadjointView()
{
return derived();
return typename SelfAdjointViewReturnType<UpLo>::Type(derived());
}
} // end namespace Eigen
......
......@@ -12,180 +12,33 @@
namespace Eigen {
/** \class SelfCwiseBinaryOp
* \ingroup Core_Module
*
* \internal
*
* \brief Internal helper class for optimizing operators like +=, -=
*
* This is a pseudo expression class re-implementing the copyCoeff/copyPacket
* method to directly performs a +=/-= operations in an optimal way. In particular,
* this allows to make sure that the input/output data are loaded only once using
* aligned packet loads.
*
* \sa class SwapWrapper for a similar trick.
*/
// TODO generalize the scalar type of 'other'
namespace internal {
template<typename BinaryOp, typename Lhs, typename Rhs>
struct traits<SelfCwiseBinaryOp<BinaryOp,Lhs,Rhs> >
: traits<CwiseBinaryOp<BinaryOp,Lhs,Rhs> >
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
{
enum {
// Note that it is still a good idea to preserve the DirectAccessBit
// so that assign can correctly align the data.
Flags = traits<CwiseBinaryOp<BinaryOp,Lhs,Rhs> >::Flags | (Lhs::Flags&DirectAccessBit) | (Lhs::Flags&LvalueBit),
OuterStrideAtCompileTime = Lhs::OuterStrideAtCompileTime,
InnerStrideAtCompileTime = Lhs::InnerStrideAtCompileTime
};
};
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>());
return derived();
}
template<typename BinaryOp, typename Lhs, typename Rhs> class SelfCwiseBinaryOp
: public internal::dense_xpr_base< SelfCwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
{
public:
typedef typename internal::dense_xpr_base<SelfCwiseBinaryOp>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp)
typedef typename internal::packet_traits<Scalar>::type Packet;
inline SelfCwiseBinaryOp(Lhs& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
inline Index outerStride() const { return m_matrix.outerStride(); }
inline Index innerStride() const { return m_matrix.innerStride(); }
inline const Scalar* data() const { return m_matrix.data(); }
// note that this function is needed by assign to correctly align loads/stores
// TODO make Assign use .data()
inline Scalar& coeffRef(Index row, Index col)
{
EIGEN_STATIC_ASSERT_LVALUE(Lhs)
return m_matrix.const_cast_derived().coeffRef(row, col);
}
inline const Scalar& coeffRef(Index row, Index col) const
{
return m_matrix.coeffRef(row, col);
}
// note that this function is needed by assign to correctly align loads/stores
// TODO make Assign use .data()
inline Scalar& coeffRef(Index index)
{
EIGEN_STATIC_ASSERT_LVALUE(Lhs)
return m_matrix.const_cast_derived().coeffRef(index);
}
inline const Scalar& coeffRef(Index index) const
{
return m_matrix.const_cast_derived().coeffRef(index);
}
template<typename OtherDerived>
void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
Scalar& tmp = m_matrix.coeffRef(row,col);
tmp = m_functor(tmp, _other.coeff(row,col));
}
template<typename OtherDerived>
void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(index >= 0 && index < m_matrix.size());
Scalar& tmp = m_matrix.coeffRef(index);
tmp = m_functor(tmp, _other.coeff(index));
}
template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
m_matrix.template writePacket<StoreMode>(row, col,
m_functor.packetOp(m_matrix.template packet<StoreMode>(row, col),_other.template packet<LoadMode>(row, col)) );
}
template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(index >= 0 && index < m_matrix.size());
m_matrix.template writePacket<StoreMode>(index,
m_functor.packetOp(m_matrix.template packet<StoreMode>(index),_other.template packet<LoadMode>(index)) );
}
// reimplement lazyAssign to handle complex *= real
// see CwiseBinaryOp ctor for details
template<typename RhsDerived>
EIGEN_STRONG_INLINE SelfCwiseBinaryOp& lazyAssign(const DenseBase<RhsDerived>& rhs)
{
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs,RhsDerived)
EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename RhsDerived::Scalar);
#ifdef EIGEN_DEBUG_ASSIGN
internal::assign_traits<SelfCwiseBinaryOp, RhsDerived>::debug();
#endif
eigen_assert(rows() == rhs.rows() && cols() == rhs.cols());
internal::assign_impl<SelfCwiseBinaryOp, RhsDerived>::run(*this,rhs.derived());
#ifndef EIGEN_NO_DEBUG
this->checkTransposeAliasing(rhs.derived());
#endif
return *this;
}
// overloaded to honor evaluation of special matrices
// maybe another solution would be to not use SelfCwiseBinaryOp
// at first...
SelfCwiseBinaryOp& operator=(const Rhs& _rhs)
{
typename internal::nested<Rhs>::type rhs(_rhs);
return Base::operator=(rhs);
}
Lhs& expression() const
{
return m_matrix;
}
const BinaryOp& functor() const
{
return m_functor;
}
protected:
Lhs& m_matrix;
const BinaryOp& m_functor;
private:
SelfCwiseBinaryOp& operator=(const SelfCwiseBinaryOp&);
};
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
{
typedef typename Derived::PlainObject PlainObject;
SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
tmp = PlainObject::Constant(rows(),cols(),other);
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
internal::scalar_quotient_op<Scalar>,
internal::scalar_product_op<Scalar> >::type BinOp;
typedef typename Derived::PlainObject PlainObject;
SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
tmp = PlainObject::Constant(rows(),cols(), NumTraits<Scalar>::IsInteger ? other : Scalar(1)/other);
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>());
return derived();
}
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SOLVE_H
#define EIGEN_SOLVE_H
namespace Eigen {
template<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl;
/** \class Solve
* \ingroup Core_Module
*
* \brief Pseudo expression representing a solving operation
*
* \tparam Decomposition the type of the matrix or decomposion object
* \tparam Rhstype the type of the right-hand side
*
* This class represents an expression of A.solve(B)
* and most of the time this is the only way it is used.
*
*/
namespace internal {
// this solve_traits class permits to determine the evaluation type with respect to storage kind (Dense vs Sparse)
template<typename Decomposition, typename RhsType,typename StorageKind> struct solve_traits;
template<typename Decomposition, typename RhsType>
struct solve_traits<Decomposition,RhsType,Dense>
{
typedef typename make_proper_matrix_type<typename RhsType::Scalar,
Decomposition::ColsAtCompileTime,
RhsType::ColsAtCompileTime,
RhsType::PlainObject::Options,
Decomposition::MaxColsAtCompileTime,
RhsType::MaxColsAtCompileTime>::type PlainObject;
};
template<typename Decomposition, typename RhsType>
struct traits<Solve<Decomposition, RhsType> >
: traits<typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject>
{
typedef typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject PlainObject;
typedef typename promote_index_type<typename Decomposition::StorageIndex, typename RhsType::StorageIndex>::type StorageIndex;
typedef traits<PlainObject> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit,
CoeffReadCost = HugeCost
};
};
}
template<typename Decomposition, typename RhsType>
class Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>
{
public:
typedef typename internal::traits<Solve>::PlainObject PlainObject;
typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
Solve(const Decomposition &dec, const RhsType &rhs)
: m_dec(dec), m_rhs(rhs)
{}
EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }
EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; }
protected:
const Decomposition &m_dec;
const RhsType &m_rhs;
};
// Specialization of the Solve expression for dense results
template<typename Decomposition, typename RhsType>
class SolveImpl<Decomposition,RhsType,Dense>
: public MatrixBase<Solve<Decomposition,RhsType> >
{
typedef Solve<Decomposition,RhsType> Derived;
public:
typedef MatrixBase<Solve<Decomposition,RhsType> > Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
private:
Scalar coeff(Index row, Index col) const;
Scalar coeff(Index i) const;
};
// Generic API dispatcher
template<typename Decomposition, typename RhsType, typename StorageKind>
class SolveImpl : public internal::generic_xpr_base<Solve<Decomposition,RhsType>, MatrixXpr, StorageKind>::type
{
public:
typedef typename internal::generic_xpr_base<Solve<Decomposition,RhsType>, MatrixXpr, StorageKind>::type Base;
};
namespace internal {
// Evaluator of Solve -> eval into a temporary
template<typename Decomposition, typename RhsType>
struct evaluator<Solve<Decomposition,RhsType> >
: public evaluator<typename Solve<Decomposition,RhsType>::PlainObject>
{
typedef Solve<Decomposition,RhsType> SolveType;
typedef typename SolveType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
enum { Flags = Base::Flags | EvalBeforeNestingBit };
EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve)
: m_result(solve.rows(), solve.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
solve.dec()._solve_impl(solve.rhs(), m_result);
}
protected:
PlainObject m_result;
};
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
{
typedef Solve<DecType,RhsType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
src.dec()._solve_impl(src.rhs(), dst);
}
};
// Specialization for "dst = dec.transpose().solve(rhs)"
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
{
typedef Solve<Transpose<const DecType>,RhsType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst);
}
};
// Specialization for "dst = dec.adjoint().solve(rhs)"
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>,
internal::assign_op<Scalar,Scalar>, Dense2Dense>
{
typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
}
};
} // end namepsace internal
} // end namespace Eigen
#endif // EIGEN_SOLVE_H
......@@ -19,7 +19,7 @@ namespace internal {
template<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>
struct triangular_solve_vector;
template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder, int OtherInnerStride>
struct triangular_solve_matrix;
// small helper struct extracting some traits on the underlying solver operation
......@@ -68,7 +68,7 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
if(!useRhsDirectly)
MappedRhs(actualRhs,rhs.size()) = rhs;
triangular_solve_vector<LhsScalar, RhsScalar, typename Lhs::Index, Side, Mode, LhsProductTraits::NeedToConjugate,
triangular_solve_vector<LhsScalar, RhsScalar, Index, Side, Mode, LhsProductTraits::NeedToConjugate,
(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>
::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs);
......@@ -82,7 +82,6 @@ template<typename Lhs, typename Rhs, int Side, int Mode>
struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
{
typedef typename Rhs::Scalar Scalar;
typedef typename Rhs::Index Index;
typedef blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
......@@ -96,11 +95,11 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxRowsAtCompileTime,4> BlockingType;
BlockingType blocking(rhs.rows(), rhs.cols(), size);
BlockingType blocking(rhs.rows(), rhs.cols(), size, 1, false);
triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,
(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride(), blocking);
(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor, Rhs::InnerStrideAtCompileTime>
::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.innerStride(), rhs.outerStride(), blocking);
}
};
......@@ -108,32 +107,32 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
* meta-unrolling implementation
***************************************************************************/
template<typename Lhs, typename Rhs, int Mode, int Index, int Size,
bool Stop = Index==Size>
template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size,
bool Stop = LoopIndex==Size>
struct triangular_solver_unroller;
template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,false> {
template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>
struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,false> {
enum {
IsLower = ((Mode&Lower)==Lower),
I = IsLower ? Index : Size - Index - 1,
S = IsLower ? 0 : I+1
DiagIndex = IsLower ? LoopIndex : Size - LoopIndex - 1,
StartIndex = IsLower ? 0 : DiagIndex+1
};
static void run(const Lhs& lhs, Rhs& rhs)
{
if (Index>0)
rhs.coeffRef(I) -= lhs.row(I).template segment<Index>(S).transpose()
.cwiseProduct(rhs.template segment<Index>(S)).sum();
if (LoopIndex>0)
rhs.coeffRef(DiagIndex) -= lhs.row(DiagIndex).template segment<LoopIndex>(StartIndex).transpose()
.cwiseProduct(rhs.template segment<LoopIndex>(StartIndex)).sum();
if(!(Mode & UnitDiag))
rhs.coeffRef(I) /= lhs.coeff(I,I);
rhs.coeffRef(DiagIndex) /= lhs.coeff(DiagIndex,DiagIndex);
triangular_solver_unroller<Lhs,Rhs,Mode,Index+1,Size>::run(lhs,rhs);
triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex+1,Size>::run(lhs,rhs);
}
};
template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,true> {
template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>
struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,true> {
static void run(const Lhs&, Rhs&) {}
};
......@@ -162,61 +161,38 @@ struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
* TriangularView methods
***************************************************************************/
/** "in-place" version of TriangularView::solve() where the result is written in \a other
*
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
* This function will const_cast it, so constness isn't honored here.
*
* See TriangularView:solve() for the details.
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatrixType, unsigned int Mode>
template<int Side, typename OtherDerived>
void TriangularView<MatrixType,Mode>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
{
OtherDerived& other = _other.const_cast_derived();
eigen_assert( cols() == rows() && ((Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols())) );
eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) );
eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
// If solving for a 0x0 matrix, nothing to do, simply return.
if (derived().cols() == 0)
return;
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime };
enum { copy = (internal::traits<OtherDerived>::Flags & RowMajorBit) && OtherDerived::IsVectorAtCompileTime && OtherDerived::SizeAtCompileTime!=1};
typedef typename internal::conditional<copy,
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
OtherCopy otherCopy(other);
internal::triangular_solver_selector<MatrixType, typename internal::remove_reference<OtherCopy>::type,
Side, Mode>::run(nestedExpression(), otherCopy);
Side, Mode>::run(derived().nestedExpression(), otherCopy);
if (copy)
other = otherCopy;
}
/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular.
*
* This function computes the inverse-matrix matrix product inverse(\c *this) * \a other if
* \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
* \a Side==OnTheRight.
*
* The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
* diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
* is an upper (resp. lower) triangular matrix.
*
* Example: \include MatrixBase_marked.cpp
* Output: \verbinclude MatrixBase_marked.out
*
* This function returns an expression of the inverse-multiply and can works in-place if it is assigned
* to the same matrix or vector \a other.
*
* For users coming from BLAS, this function (and more specifically solveInPlace()) offer
* all the operations supported by the \c *TRSV and \c *TRSM BLAS routines.
*
* \sa TriangularView::solveInPlace()
*/
template<typename Derived, unsigned int Mode>
template<int Side, typename Other>
const internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other>
TriangularView<Derived,Mode>::solve(const MatrixBase<Other>& other) const
TriangularViewImpl<Derived,Mode,Dense>::solve(const MatrixBase<Other>& other) const
{
return internal::triangular_solve_retval<Side,TriangularView,Other>(*this, other.derived());
return internal::triangular_solve_retval<Side,TriangularViewType,Other>(derived(), other.derived());
}
#endif
namespace internal {
......@@ -232,7 +208,6 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
{
typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
typedef ReturnByValue<triangular_solve_retval> Base;
typedef typename Base::Index Index;
triangular_solve_retval(const TriangularType& tri, const Rhs& rhs)
: m_triangularMatrix(tri), m_rhs(rhs)
......@@ -243,7 +218,7 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
template<typename Dest> inline void evalTo(Dest& dst) const
{
if(!(is_same<RhsNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_rhs)))
if(!is_same_dense(dst,m_rhs))
dst = m_rhs;
m_triangularMatrix.template solveInPlace<Side>(dst);
}
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SOLVERBASE_H
#define EIGEN_SOLVERBASE_H
namespace Eigen {
namespace internal {
} // end namespace internal
/** \class SolverBase
* \brief A base class for matrix decomposition and solvers
*
* \tparam Derived the actual type of the decomposition/solver.
*
* Any matrix decomposition inheriting this base class provide the following API:
*
* \code
* MatrixType A, b, x;
* DecompositionType dec(A);
* x = dec.solve(b); // solve A * x = b
* x = dec.transpose().solve(b); // solve A^T * x = b
* x = dec.adjoint().solve(b); // solve A' * x = b
* \endcode
*
* \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors.
*
* \sa class PartialPivLU, class FullPivLU
*/
template<typename Derived>
class SolverBase : public EigenBase<Derived>
{
public:
typedef EigenBase<Derived> Base;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef Scalar CoeffReturnType;
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime>::ret),
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
|| internal::traits<Derived>::MaxColsAtCompileTime == 1
};
/** Default constructor */
SolverBase()
{}
~SolverBase()
{}
using Base::derived;
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
/** \internal the return type of transpose() */
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
/** \returns an expression of the transposed of the factored matrix.
*
* A typical usage is to solve for the transposed problem A^T x = b:
* \code x = dec.transpose().solve(b); \endcode
*
* \sa adjoint(), solve()
*/
inline ConstTransposeReturnType transpose() const
{
return ConstTransposeReturnType(derived());
}
/** \internal the return type of adjoint() */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
ConstTransposeReturnType
>::type AdjointReturnType;
/** \returns an expression of the adjoint of the factored matrix
*
* A typical usage is to solve for the adjoint problem A' x = b:
* \code x = dec.adjoint().solve(b); \endcode
*
* For real scalar types, this function is equivalent to transpose().
*
* \sa transpose(), solve()
*/
inline AdjointReturnType adjoint() const
{
return AdjointReturnType(derived().transpose());
}
protected:
};
namespace internal {
template<typename Derived>
struct generic_xpr_base<Derived, MatrixXpr, SolverStorage>
{
typedef SolverBase<Derived> type;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SOLVERBASE_H
......@@ -17,125 +17,105 @@ namespace internal {
template<typename ExpressionType, typename Scalar>
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
Scalar max = bl.cwiseAbs().maxCoeff();
if (max>scale)
Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
if(maxCoeff>scale)
{
ssq = ssq * abs2(scale/max);
scale = max;
invScale = Scalar(1)/scale;
ssq = ssq * numext::abs2(scale/maxCoeff);
Scalar tmp = Scalar(1)/maxCoeff;
if(tmp > NumTraits<Scalar>::highest())
{
invScale = NumTraits<Scalar>::highest();
scale = Scalar(1)/invScale;
}
else if(maxCoeff>NumTraits<Scalar>::highest()) // we got a INF
{
invScale = Scalar(1);
scale = maxCoeff;
}
else
{
scale = maxCoeff;
invScale = tmp;
}
}
else if(maxCoeff!=maxCoeff) // we got a NaN
{
scale = maxCoeff;
}
// TODO if the max is much much smaller than the current scale,
// TODO if the maxCoeff is much much smaller than the current scale,
// then we can neglect this sub vector
ssq += (bl*invScale).squaredNorm();
}
if(scale>Scalar(0)) // if scale==0, then bl is 0
ssq += (bl*invScale).squaredNorm();
}
/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
* This version use a blockwise two passes algorithm:
* 1 - find the absolute largest coefficient \c s
* 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
*
* For architecture/scalar types supporting vectorization, this version
* is faster than blueNorm(). Otherwise the blueNorm() is much faster.
*
* \sa norm(), blueNorm(), hypotNorm()
*/
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
using std::min;
const Index blockSize = 4096;
RealScalar scale(0);
RealScalar invScale(1);
RealScalar ssq(0); // sum of square
enum {
Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
};
Index n = size();
Index bi = internal::first_aligned(derived());
if (bi>0)
internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
for (; bi<n; bi+=blockSize)
internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
return scale * internal::sqrt(ssq);
}
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
* A Portable Fortran Program to Find the Euclidean Norm of a Vector,
* ACM TOMS, Vol 4, Issue 1, 1978.
*
* For architecture/scalar types without vectorization, this version
* is much faster than stableNorm(). Otherwise the stableNorm() is faster.
*
* \sa norm(), stableNorm(), hypotNorm()
*/
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::blueNorm() const
inline typename NumTraits<typename traits<Derived>::Scalar>::Real
blueNorm_impl(const EigenBase<Derived>& _vec)
{
typedef typename Derived::RealScalar RealScalar;
using std::pow;
using std::min;
using std::max;
using std::sqrt;
using std::abs;
const Derived& vec(_vec.derived());
static bool initialized = false;
static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
static RealScalar b1, b2, s1m, s2m, rbig, relerr;
if(!initialized)
{
int ibeta, it, iemin, iemax, iexp;
RealScalar abig, eps;
RealScalar eps;
// This program calculates the machine-dependent constants
// bl, b2, slm, s2m, relerr overfl
// from the "basic" machine-dependent numbers
// ibeta, it, iemin, iemax, rbig.
// nbig, ibeta, it, iemin, iemax, rbig.
// The following define the basic machine-dependent constants.
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
// are used. For any specific computer, each of the assignment
// statements can be replaced
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
iexp = -((1-iemin)/2);
b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
iexp = (iemax + 1 - it)/2;
b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
iexp = (2-iemin)/2;
s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
iexp = - ((iemax+it)/2);
s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
overfl = rbig*s2m; // overflow boundary for abig
eps = RealScalar(pow(double(ibeta), 1-it));
relerr = internal::sqrt(eps); // tolerance for neglecting asml
abig = RealScalar(1.0/eps - 1.0);
relerr = sqrt(eps); // tolerance for neglecting asml
initialized = true;
}
Index n = size();
Index n = vec.size();
RealScalar ab2 = b2 / RealScalar(n);
RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0);
for(Index j=0; j<n; ++j)
for(typename Derived::InnerIterator it(vec, 0); it; ++it)
{
RealScalar ax = internal::abs(coeff(j));
if(ax > ab2) abig += internal::abs2(ax*s2m);
else if(ax < b1) asml += internal::abs2(ax*s1m);
else amed += internal::abs2(ax);
RealScalar ax = abs(it.value());
if(ax > ab2) abig += numext::abs2(ax*s2m);
else if(ax < b1) asml += numext::abs2(ax*s1m);
else amed += numext::abs2(ax);
}
if(amed!=amed)
return amed; // we got a NaN
if(abig > RealScalar(0))
{
abig = internal::sqrt(abig);
if(abig > overfl)
{
return rbig;
}
abig = sqrt(abig);
if(abig > rbig) // overflow, or *this contains INF values
return abig; // return INF
if(amed > RealScalar(0))
{
abig = abig/s2m;
amed = internal::sqrt(amed);
amed = sqrt(amed);
}
else
return abig/s2m;
......@@ -144,20 +124,84 @@ MatrixBase<Derived>::blueNorm() const
{
if (amed > RealScalar(0))
{
abig = internal::sqrt(amed);
amed = internal::sqrt(asml) / s1m;
abig = sqrt(amed);
amed = sqrt(asml) / s1m;
}
else
return internal::sqrt(asml)/s1m;
return sqrt(asml)/s1m;
}
else
return internal::sqrt(amed);
asml = (min)(abig, amed);
abig = (max)(abig, amed);
return sqrt(amed);
asml = numext::mini(abig, amed);
abig = numext::maxi(abig, amed);
if(asml <= abig*relerr)
return abig;
else
return abig * internal::sqrt(RealScalar(1) + internal::abs2(asml/abig));
return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
}
} // end namespace internal
/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
* This version use a blockwise two passes algorithm:
* 1 - find the absolute largest coefficient \c s
* 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
*
* For architecture/scalar types supporting vectorization, this version
* is faster than blueNorm(). Otherwise the blueNorm() is much faster.
*
* \sa norm(), blueNorm(), hypotNorm()
*/
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
using std::sqrt;
using std::abs;
const Index blockSize = 4096;
RealScalar scale(0);
RealScalar invScale(1);
RealScalar ssq(0); // sum of square
typedef typename internal::nested_eval<Derived,2>::type DerivedCopy;
typedef typename internal::remove_all<DerivedCopy>::type DerivedCopyClean;
const DerivedCopy copy(derived());
enum {
CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit)
|| (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)
&& (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization
};
typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>,
typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
Index n = size();
if(n==1)
return abs(this->coeff(0));
Index bi = internal::first_default_aligned(copy);
if (bi>0)
internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
for (; bi<n; bi+=blockSize)
internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
return scale * sqrt(ssq);
}
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
* A Portable Fortran Program to Find the Euclidean Norm of a Vector,
* ACM TOMS, Vol 4, Issue 1, 1978.
*
* For architecture/scalar types without vectorization, this version
* is much faster than stableNorm(). Otherwise the stableNorm() is faster.
*
* \sa norm(), stableNorm(), hypotNorm()
*/
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::blueNorm() const
{
return internal::blueNorm_impl(*this);
}
/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
......
......@@ -31,8 +31,8 @@ namespace Eigen {
* arguments to the constructor.
*
* Indeed, this class takes two template parameters:
* \param _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime.
* \param _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime.
* \tparam _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime.
* \tparam _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime.
*
* Here is an example:
* \include Map_general_stride.cpp
......@@ -44,13 +44,14 @@ template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
class Stride
{
public:
typedef DenseIndex Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
OuterStrideAtCompileTime = _OuterStrideAtCompileTime
};
/** Default constructor, for use when strides are fixed at compile time */
EIGEN_DEVICE_FUNC
Stride()
: m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime)
{
......@@ -58,6 +59,7 @@ class Stride
}
/** Constructor allowing to pass the strides at runtime */
EIGEN_DEVICE_FUNC
Stride(Index outerStride, Index innerStride)
: m_outer(outerStride), m_inner(innerStride)
{
......@@ -65,13 +67,16 @@ class Stride
}
/** Copy constructor */
EIGEN_DEVICE_FUNC
Stride(const Stride& other)
: m_outer(other.outer()), m_inner(other.inner())
{}
/** \returns the outer stride */
EIGEN_DEVICE_FUNC
inline Index outer() const { return m_outer.value(); }
/** \returns the inner stride */
EIGEN_DEVICE_FUNC
inline Index inner() const { return m_inner.value(); }
protected:
......@@ -81,26 +86,24 @@ class Stride
/** \brief Convenience specialization of Stride to specify only an inner stride
* See class Map for some examples */
template<int Value = Dynamic>
template<int Value>
class InnerStride : public Stride<0, Value>
{
typedef Stride<0, Value> Base;
public:
typedef DenseIndex Index;
InnerStride() : Base() {}
InnerStride(Index v) : Base(0, v) {}
EIGEN_DEVICE_FUNC InnerStride() : Base() {}
EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code
};
/** \brief Convenience specialization of Stride to specify only an outer stride
* See class Map for some examples */
template<int Value = Dynamic>
template<int Value>
class OuterStride : public Stride<Value, 0>
{
typedef Stride<Value, 0> Base;
public:
typedef DenseIndex Index;
OuterStride() : Base() {}
OuterStride(Index v) : Base(v,0) {}
EIGEN_DEVICE_FUNC OuterStride() : Base() {}
EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code
};
} // end namespace Eigen
......
......@@ -12,115 +12,56 @@
namespace Eigen {
/** \class SwapWrapper
* \ingroup Core_Module
*
* \internal
*
* \brief Internal helper class for swapping two expressions
*/
namespace internal {
template<typename ExpressionType>
struct traits<SwapWrapper<ExpressionType> > : traits<ExpressionType> {};
}
template<typename ExpressionType> class SwapWrapper
: public internal::dense_xpr_base<SwapWrapper<ExpressionType> >::type
// Overload default assignPacket behavior for swapping them
template<typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT>
class generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, Specialized>
: public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn>
{
public:
typedef typename internal::dense_xpr_base<SwapWrapper>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
typedef typename internal::packet_traits<Scalar>::type Packet;
inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
inline Index rows() const { return m_expression.rows(); }
inline Index cols() const { return m_expression.cols(); }
inline Index outerStride() const { return m_expression.outerStride(); }
inline Index innerStride() const { return m_expression.innerStride(); }
typedef typename internal::conditional<
internal::is_lvalue<ExpressionType>::value,
Scalar,
const Scalar
>::type ScalarWithConstIfNotLvalue;
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
inline const Scalar* data() const { return m_expression.data(); }
inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
inline Scalar& coeffRef(Index row, Index col) const
{
return m_expression.coeffRef(row, col);
}
inline Scalar& coeffRef(Index index) const
{
return m_expression.coeffRef(index);
}
template<typename OtherDerived>
void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
Scalar tmp = m_expression.coeff(row, col);
m_expression.coeffRef(row, col) = _other.coeff(row, col);
_other.coeffRef(row, col) = tmp;
}
template<typename OtherDerived>
void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(index >= 0 && index < m_expression.size());
Scalar tmp = m_expression.coeff(index);
m_expression.coeffRef(index) = _other.coeff(index);
_other.coeffRef(index) = tmp;
}
template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
Packet tmp = m_expression.template packet<StoreMode>(row, col);
m_expression.template writePacket<StoreMode>(row, col,
_other.template packet<LoadMode>(row, col)
);
_other.template writePacket<LoadMode>(row, col, tmp);
}
template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
eigen_internal_assert(index >= 0 && index < m_expression.size());
Packet tmp = m_expression.template packet<StoreMode>(index);
m_expression.template writePacket<StoreMode>(index,
_other.template packet<LoadMode>(index)
);
_other.template writePacket<LoadMode>(index, tmp);
}
ExpressionType& expression() const { return m_expression; }
protected:
ExpressionType& m_expression;
protected:
typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn> Base;
using Base::m_dst;
using Base::m_src;
using Base::m_functor;
public:
typedef typename Base::Scalar Scalar;
typedef typename Base::DstXprType DstXprType;
typedef swap_assign_op<Scalar> Functor;
EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
template<int StoreMode, int LoadMode, typename PacketType>
void assignPacket(Index row, Index col)
{
PacketType tmp = m_src.template packet<LoadMode,PacketType>(row,col);
const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(row,col, m_dst.template packet<StoreMode,PacketType>(row,col));
m_dst.template writePacket<StoreMode>(row,col,tmp);
}
template<int StoreMode, int LoadMode, typename PacketType>
void assignPacket(Index index)
{
PacketType tmp = m_src.template packet<LoadMode,PacketType>(index);
const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(index, m_dst.template packet<StoreMode,PacketType>(index));
m_dst.template writePacket<StoreMode>(index,tmp);
}
// TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I mean no CRTP (Gael)
template<int StoreMode, int LoadMode, typename PacketType>
void assignPacketByOuterInner(Index outer, Index inner)
{
Index row = Base::rowIndexByOuterInner(outer, inner);
Index col = Base::colIndexByOuterInner(outer, inner);
assignPacket<StoreMode,LoadMode,PacketType>(row, col);
}
};
} // namespace internal
} // end namespace Eigen
#endif // EIGEN_SWAP_H
......@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
......@@ -13,39 +13,21 @@
namespace Eigen {
/** \class Transpose
* \ingroup Core_Module
*
* \brief Expression of the transpose of a matrix
*
* \param MatrixType the type of the object of which we are taking the transpose
*
* This class represents an expression of the transpose of a matrix.
* It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::transpose(), MatrixBase::adjoint()
*/
namespace internal {
template<typename MatrixType>
struct traits<Transpose<MatrixType> > : traits<MatrixType>
struct traits<Transpose<MatrixType> > : public traits<MatrixType>
{
typedef typename MatrixType::Scalar Scalar;
typedef typename nested<MatrixType>::type MatrixTypeNested;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
enum {
RowsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit),
Flags0 = traits<MatrixTypeNestedPlain>::Flags & ~(LvalueBit | NestByRefBit),
Flags1 = Flags0 | FlagsLvalueBit,
Flags = Flags1 ^ RowMajorBit,
CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost,
InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,
OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
};
......@@ -54,31 +36,55 @@ struct traits<Transpose<MatrixType> > : traits<MatrixType>
template<typename MatrixType, typename StorageKind> class TransposeImpl;
/** \class Transpose
* \ingroup Core_Module
*
* \brief Expression of the transpose of a matrix
*
* \tparam MatrixType the type of the object of which we are taking the transpose
*
* This class represents an expression of the transpose of a matrix.
* It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::transpose(), MatrixBase::adjoint()
*/
template<typename MatrixType> class Transpose
: public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>
{
public:
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
EIGEN_DEVICE_FUNC
explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
inline Index rows() const { return m_matrix.cols(); }
inline Index cols() const { return m_matrix.rows(); }
EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); }
/** \returns the nested expression */
const typename internal::remove_all<typename MatrixType::Nested>::type&
EIGEN_DEVICE_FUNC
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
/** \returns the nested expression */
typename internal::remove_all<typename MatrixType::Nested>::type&
nestedExpression() { return m_matrix.const_cast_derived(); }
EIGEN_DEVICE_FUNC
typename internal::remove_reference<MatrixTypeNested>::type&
nestedExpression() { return m_matrix; }
/** \internal */
void resize(Index nrows, Index ncols) {
m_matrix.resize(ncols,nrows);
}
protected:
typename MatrixType::Nested m_matrix;
typename internal::ref_selector<MatrixType>::non_const_type m_matrix;
};
namespace internal {
......@@ -97,16 +103,27 @@ struct TransposeImpl_base<MatrixType, false>
} // end namespace internal
// Generic API dispatcher
template<typename XprType, typename StorageKind>
class TransposeImpl
: public internal::generic_xpr_base<Transpose<XprType> >::type
{
public:
typedef typename internal::generic_xpr_base<Transpose<XprType> >::type Base;
};
template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
: public internal::TransposeImpl_base<MatrixType>::type
{
public:
typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
using Base::coeffRef;
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
typedef typename internal::conditional<
internal::is_lvalue<MatrixType>::value,
......@@ -114,64 +131,23 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
const Scalar
>::type ScalarWithConstIfNotLvalue;
inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
inline const Scalar* data() const { return derived().nestedExpression().data(); }
EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); }
inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
// FIXME: shall we keep the const version of coeffRef?
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index rowId, Index colId) const
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
return derived().nestedExpression().const_cast_derived().coeffRef(col, row);
}
inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
return derived().nestedExpression().const_cast_derived().coeffRef(index);
}
inline const Scalar& coeffRef(Index row, Index col) const
{
return derived().nestedExpression().coeffRef(col, row);
return derived().nestedExpression().coeffRef(colId, rowId);
}
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index index) const
{
return derived().nestedExpression().coeffRef(index);
}
inline CoeffReturnType coeff(Index row, Index col) const
{
return derived().nestedExpression().coeff(col, row);
}
inline CoeffReturnType coeff(Index index) const
{
return derived().nestedExpression().coeff(index);
}
template<int LoadMode>
inline const PacketScalar packet(Index row, Index col) const
{
return derived().nestedExpression().template packet<LoadMode>(col, row);
}
template<int LoadMode>
inline void writePacket(Index row, Index col, const PacketScalar& x)
{
derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(col, row, x);
}
template<int LoadMode>
inline const PacketScalar packet(Index index) const
{
return derived().nestedExpression().template packet<LoadMode>(index);
}
template<int LoadMode>
inline void writePacket(Index index, const PacketScalar& x)
{
derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(index, x);
}
protected:
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TransposeImpl)
};
/** \returns an expression of the transpose of *this.
......@@ -197,7 +173,7 @@ template<typename Derived>
inline Transpose<Derived>
DenseBase<Derived>::transpose()
{
return derived();
return TransposeReturnType(derived());
}
/** This is the const version of transpose().
......@@ -206,7 +182,7 @@ DenseBase<Derived>::transpose()
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
inline const typename DenseBase<Derived>::ConstTransposeReturnType
inline typename DenseBase<Derived>::ConstTransposeReturnType
DenseBase<Derived>::transpose() const
{
return ConstTransposeReturnType(derived());
......@@ -235,8 +211,7 @@ template<typename Derived>
inline const typename MatrixBase<Derived>::AdjointReturnType
MatrixBase<Derived>::adjoint() const
{
return this->transpose(); // in the complex case, the .conjugate() is be implicit here
// due to implicit conversion to return type
return AdjointReturnType(this->transpose());
}
/***************************************************************************
......@@ -246,21 +221,41 @@ MatrixBase<Derived>::adjoint() const
namespace internal {
template<typename MatrixType,
bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic>
bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic,
bool MatchPacketSize =
(int(MatrixType::RowsAtCompileTime) == int(internal::packet_traits<typename MatrixType::Scalar>::size))
&& (internal::evaluator<MatrixType>::Flags&PacketAccessBit) >
struct inplace_transpose_selector;
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,true> { // square matrix
struct inplace_transpose_selector<MatrixType,true,false> { // square matrix
static void run(MatrixType& m) {
m.template triangularView<StrictlyUpper>().swap(m.transpose());
m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
}
};
// TODO: vectorized path is currently limited to LargestPacketSize x LargestPacketSize cases only.
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,false> { // non square matrix
struct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x PacketSize
static void run(MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
typedef typename internal::packet_traits<typename MatrixType::Scalar>::type Packet;
const Index PacketSize = internal::packet_traits<Scalar>::size;
const Index Alignment = internal::evaluator<MatrixType>::Alignment;
PacketBlock<Packet> A;
for (Index i=0; i<PacketSize; ++i)
A.packet[i] = m.template packetByOuterInner<Alignment>(i,0);
internal::ptranspose(A);
for (Index i=0; i<PacketSize; ++i)
m.template writePacket<Alignment>(m.rowIndexByOuterInner(i,0), m.colIndexByOuterInner(i,0), A.packet[i]);
}
};
template<typename MatrixType,bool MatchPacketSize>
struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non square matrix
static void run(MatrixType& m) {
if (m.rows()==m.cols())
m.template triangularView<StrictlyUpper>().swap(m.transpose());
m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
else
m = m.transpose().eval();
}
......@@ -278,17 +273,20 @@ struct inplace_transpose_selector<MatrixType,false> { // non square matrix
* m = m.transpose().eval();
* \endcode
* and is faster and also safer because in the latter line of code, forgetting the eval() results
* in a bug caused by aliasing.
* in a bug caused by \ref TopicAliasing "aliasing".
*
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
* If you just need the transpose of a matrix, use transpose().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), adjointInPlace() */
template<typename Derived>
inline void DenseBase<Derived>::transposeInPlace()
{
eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
&& "transposeInPlace() called on a non-square non-resizable matrix");
internal::inplace_transpose_selector<Derived>::run(derived());
}
......@@ -312,6 +310,7 @@ inline void DenseBase<Derived>::transposeInPlace()
* If you just need the adjoint of a matrix, use adjoint().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), transposeInPlace() */
template<typename Derived>
......@@ -326,14 +325,6 @@ inline void MatrixBase<Derived>::adjointInPlace()
namespace internal {
template<typename BinOp,typename NestedXpr,typename Rhs>
struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
: blas_traits<NestedXpr>
{
typedef SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> XprType;
static inline const XprType extract(const XprType& x) { return x; }
};
template<bool DestIsTransposed, typename OtherDerived>
struct check_transpose_aliasing_compile_time_selector
{
......@@ -385,7 +376,7 @@ struct checkTransposeAliasing_impl
eigen_assert((!check_transpose_aliasing_run_time_selector
<typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
::run(extract_data(dst), other))
&& "aliasing detected during tranposition, use transposeInPlace() "
&& "aliasing detected during transposition, use transposeInPlace() "
"or evaluate the rhs into a temporary using .eval()");
}
......@@ -399,15 +390,15 @@ struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
}
};
} // end namespace internal
template<typename Derived>
template<typename OtherDerived>
void DenseBase<Derived>::checkTransposeAliasing(const OtherDerived& other) const
template<typename Dst, typename Src>
void check_for_aliasing(const Dst &dst, const Src &src)
{
internal::checkTransposeAliasing_impl<Derived, OtherDerived>::run(derived(), other);
internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
}
#endif
} // end namespace internal
#endif // EIGEN_NO_DEBUG
} // end namespace Eigen
......
......@@ -12,39 +12,6 @@
namespace Eigen {
/** \class Transpositions
* \ingroup Core_Module
*
* \brief Represents a sequence of transpositions (row/column interchange)
*
* \param SizeAtCompileTime the number of transpositions, or Dynamic
* \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
*
* This class represents a permutation transformation as a sequence of \em n transpositions
* \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
* Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
* the rows \c i and \c indices[i] of the matrix \c M.
* A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
*
* Compared to the class PermutationMatrix, such a sequence of transpositions is what is
* computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
*
* To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
* \code
* Transpositions tr;
* MatrixXf mat;
* mat = tr * mat;
* \endcode
* In this example, we detect that the matrix appears on both side, and so the transpositions
* are applied in-place without any temporary or extra copy.
*
* \sa class PermutationMatrix
*/
namespace internal {
template<typename TranspositionType, typename MatrixType, int Side, bool Transposed=false> struct transposition_matrix_product_retval;
}
template<typename Derived>
class TranspositionsBase
{
......@@ -53,7 +20,8 @@ class TranspositionsBase
public:
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar Index;
typedef typename IndicesType::Scalar StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
......@@ -66,32 +34,25 @@ class TranspositionsBase
return derived();
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is a special case of the templated operator=. Its purpose is to
* prevent a default operator= from hiding the templated operator=.
*/
Derived& operator=(const TranspositionsBase& other)
{
indices() = other.indices();
return derived();
}
#endif
/** \returns the number of transpositions */
inline Index size() const { return indices().size(); }
Index size() const { return indices().size(); }
/** \returns the number of rows of the equivalent permutation matrix */
Index rows() const { return indices().size(); }
/** \returns the number of columns of the equivalent permutation matrix */
Index cols() const { return indices().size(); }
/** Direct access to the underlying index vector */
inline const Index& coeff(Index i) const { return indices().coeff(i); }
inline const StorageIndex& coeff(Index i) const { return indices().coeff(i); }
/** Direct access to the underlying index vector */
inline Index& coeffRef(Index i) { return indices().coeffRef(i); }
inline StorageIndex& coeffRef(Index i) { return indices().coeffRef(i); }
/** Direct access to the underlying index vector */
inline const Index& operator()(Index i) const { return indices()(i); }
inline const StorageIndex& operator()(Index i) const { return indices()(i); }
/** Direct access to the underlying index vector */
inline Index& operator()(Index i) { return indices()(i); }
inline StorageIndex& operator()(Index i) { return indices()(i); }
/** Direct access to the underlying index vector */
inline const Index& operator[](Index i) const { return indices()(i); }
inline const StorageIndex& operator[](Index i) const { return indices()(i); }
/** Direct access to the underlying index vector */
inline Index& operator[](Index i) { return indices()(i); }
inline StorageIndex& operator[](Index i) { return indices()(i); }
/** const version of indices(). */
const IndicesType& indices() const { return derived().indices(); }
......@@ -99,15 +60,15 @@ class TranspositionsBase
IndicesType& indices() { return derived().indices(); }
/** Resizes to given size. */
inline void resize(int size)
inline void resize(Index newSize)
{
indices().resize(size);
indices().resize(newSize);
}
/** Sets \c *this to represents an identity transformation */
void setIdentity()
{
for(int i = 0; i < indices().size(); ++i)
for(StorageIndex i = 0; i < indices().size(); ++i)
coeffRef(i) = i;
}
......@@ -144,23 +105,53 @@ class TranspositionsBase
};
namespace internal {
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>
struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >
: traits<PermutationMatrix<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >
{
typedef IndexType Index;
typedef Matrix<Index, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
typedef TranspositionsStorage StorageKind;
};
}
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
/** \class Transpositions
* \ingroup Core_Module
*
* \brief Represents a sequence of transpositions (row/column interchange)
*
* \tparam SizeAtCompileTime the number of transpositions, or Dynamic
* \tparam MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
*
* This class represents a permutation transformation as a sequence of \em n transpositions
* \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
* Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
* the rows \c i and \c indices[i] of the matrix \c M.
* A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
*
* Compared to the class PermutationMatrix, such a sequence of transpositions is what is
* computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
*
* To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
* \code
* Transpositions tr;
* MatrixXf mat;
* mat = tr * mat;
* \endcode
* In this example, we detect that the matrix appears on both side, and so the transpositions
* are applied in-place without any temporary or extra copy.
*
* \sa class PermutationMatrix
*/
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>
class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >
{
typedef internal::traits<Transpositions> Traits;
public:
typedef TranspositionsBase<Transpositions> Base;
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar Index;
typedef typename IndicesType::Scalar StorageIndex;
inline Transpositions() {}
......@@ -169,12 +160,6 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
inline Transpositions(const TranspositionsBase<OtherDerived>& other)
: m_indices(other.indices()) {}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Standard copy constructor. Defined only to prevent a default copy constructor
* from hiding the other templated constructor */
inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
#endif
/** Generic constructor from expression of the transposition indices. */
template<typename Other>
explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
......@@ -187,17 +172,6 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
return Base::operator=(other);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is a special case of the templated operator=. Its purpose is to
* prevent a default operator= from hiding the templated operator=.
*/
Transpositions& operator=(const Transpositions& other)
{
m_indices = other.m_indices;
return *this;
}
#endif
/** Constructs an uninitialized permutation matrix of given size.
*/
inline Transpositions(Index size) : m_indices(size)
......@@ -215,31 +189,33 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
namespace internal {
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,_PacketAccess> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>
struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,_PacketAccess> >
: traits<PermutationMatrix<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >
{
typedef IndexType Index;
typedef Map<const Matrix<Index,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
typedef Map<const Matrix<_StorageIndex,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
typedef _StorageIndex StorageIndex;
typedef TranspositionsStorage StorageKind;
};
}
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int PacketAccess>
class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess>
: public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int PacketAccess>
class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,PacketAccess>
: public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,PacketAccess> >
{
typedef internal::traits<Map> Traits;
public:
typedef TranspositionsBase<Map> Base;
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar Index;
typedef typename IndicesType::Scalar StorageIndex;
inline Map(const Index* indices)
: m_indices(indices)
explicit inline Map(const StorageIndex* indicesPtr)
: m_indices(indicesPtr)
{}
inline Map(const Index* indices, Index size)
: m_indices(indices,size)
inline Map(const StorageIndex* indicesPtr, Index size)
: m_indices(indicesPtr,size)
{}
/** Copies the \a other transpositions into \c *this */
......@@ -274,9 +250,9 @@ class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,Packe
namespace internal {
template<typename _IndicesType>
struct traits<TranspositionsWrapper<_IndicesType> >
: traits<PermutationWrapper<_IndicesType> >
{
typedef typename _IndicesType::Scalar Index;
typedef _IndicesType IndicesType;
typedef TranspositionsStorage StorageKind;
};
}
......@@ -289,9 +265,9 @@ class TranspositionsWrapper
typedef TranspositionsBase<TranspositionsWrapper> Base;
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar Index;
typedef typename IndicesType::Scalar StorageIndex;
inline TranspositionsWrapper(IndicesType& indices)
explicit inline TranspositionsWrapper(IndicesType& indices)
: m_indices(indices)
{}
......@@ -302,17 +278,6 @@ class TranspositionsWrapper
return Base::operator=(other);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is a special case of the templated operator=. Its purpose is to
* prevent a default operator= from hiding the templated operator=.
*/
TranspositionsWrapper& operator=(const TranspositionsWrapper& other)
{
m_indices = other.m_indices;
return *this;
}
#endif
/** const version of indices(). */
const IndicesType& indices() const { return m_indices; }
......@@ -321,83 +286,46 @@ class TranspositionsWrapper
protected:
const typename IndicesType::Nested m_indices;
typename IndicesType::Nested m_indices;
};
/** \returns the \a matrix with the \a transpositions applied to the columns.
*/
template<typename Derived, typename TranspositionsDerived>
inline const internal::transposition_matrix_product_retval<TranspositionsDerived, Derived, OnTheRight>
operator*(const MatrixBase<Derived>& matrix,
const TranspositionsBase<TranspositionsDerived> &transpositions)
template<typename MatrixDerived, typename TranspositionsDerived>
EIGEN_DEVICE_FUNC
const Product<MatrixDerived, TranspositionsDerived, AliasFreeProduct>
operator*(const MatrixBase<MatrixDerived> &matrix,
const TranspositionsBase<TranspositionsDerived>& transpositions)
{
return internal::transposition_matrix_product_retval
<TranspositionsDerived, Derived, OnTheRight>
(transpositions.derived(), matrix.derived());
return Product<MatrixDerived, TranspositionsDerived, AliasFreeProduct>
(matrix.derived(), transpositions.derived());
}
/** \returns the \a matrix with the \a transpositions applied to the rows.
*/
template<typename Derived, typename TranspositionDerived>
inline const internal::transposition_matrix_product_retval
<TranspositionDerived, Derived, OnTheLeft>
operator*(const TranspositionsBase<TranspositionDerived> &transpositions,
const MatrixBase<Derived>& matrix)
template<typename TranspositionsDerived, typename MatrixDerived>
EIGEN_DEVICE_FUNC
const Product<TranspositionsDerived, MatrixDerived, AliasFreeProduct>
operator*(const TranspositionsBase<TranspositionsDerived> &transpositions,
const MatrixBase<MatrixDerived>& matrix)
{
return internal::transposition_matrix_product_retval
<TranspositionDerived, Derived, OnTheLeft>
(transpositions.derived(), matrix.derived());
return Product<TranspositionsDerived, MatrixDerived, AliasFreeProduct>
(transpositions.derived(), matrix.derived());
}
namespace internal {
template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
struct traits<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
{
typedef typename MatrixType::PlainObject ReturnType;
};
template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
struct transposition_matrix_product_retval
: public ReturnByValue<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
{
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
typedef typename TranspositionType::Index Index;
// Template partial specialization for transposed/inverse transpositions
transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
: m_transpositions(tr), m_matrix(matrix)
{}
inline int rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); }
template<typename Dest> inline void evalTo(Dest& dst) const
{
const int size = m_transpositions.size();
Index j = 0;
if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
dst = m_matrix;
for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
if((j=m_transpositions.coeff(k))!=k)
{
if(Side==OnTheLeft)
dst.row(k).swap(dst.row(j));
else if(Side==OnTheRight)
dst.col(k).swap(dst.col(j));
}
}
namespace internal {
protected:
const TranspositionType& m_transpositions;
typename MatrixType::Nested m_matrix;
};
template<typename Derived>
struct traits<Transpose<TranspositionsBase<Derived> > >
: traits<Derived>
{};
} // end namespace internal
/* Template partial specialization for transposed/inverse transpositions */
template<typename TranspositionsDerived>
class Transpose<TranspositionsBase<TranspositionsDerived> >
{
......@@ -405,27 +333,31 @@ class Transpose<TranspositionsBase<TranspositionsDerived> >
typedef typename TranspositionType::IndicesType IndicesType;
public:
Transpose(const TranspositionType& t) : m_transpositions(t) {}
explicit Transpose(const TranspositionType& t) : m_transpositions(t) {}
inline int size() const { return m_transpositions.size(); }
Index size() const { return m_transpositions.size(); }
Index rows() const { return m_transpositions.size(); }
Index cols() const { return m_transpositions.size(); }
/** \returns the \a matrix with the inverse transpositions applied to the columns.
*/
template<typename Derived> friend
inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>
operator*(const MatrixBase<Derived>& matrix, const Transpose& trt)
template<typename OtherDerived> friend
const Product<OtherDerived, Transpose, AliasFreeProduct>
operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trt)
{
return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>(trt.m_transpositions, matrix.derived());
return Product<OtherDerived, Transpose, AliasFreeProduct>(matrix.derived(), trt);
}
/** \returns the \a matrix with the inverse transpositions applied to the rows.
*/
template<typename Derived>
inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>
operator*(const MatrixBase<Derived>& matrix) const
template<typename OtherDerived>
const Product<Transpose, OtherDerived, AliasFreeProduct>
operator*(const MatrixBase<OtherDerived>& matrix) const
{
return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>(m_transpositions, matrix.derived());
return Product<Transpose, OtherDerived, AliasFreeProduct>(*this, matrix.derived());
}
const TranspositionType& nestedExpression() const { return m_transpositions; }
protected:
const TranspositionType& m_transpositions;
......
......@@ -19,9 +19,7 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
}
/** \internal
*
* \class TriangularBase
/** \class TriangularBase
* \ingroup Core_Module
*
* \brief Base class for triangular part in a matrix
......@@ -32,41 +30,69 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
enum {
Mode = internal::traits<Derived>::Mode,
CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime>::ret),
/**< This is equal to the number of coefficients, i.e. the number of
* rows times the number of columns, or to \a Dynamic if this is not
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime>::ret)
};
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::DenseMatrixType DenseMatrixType;
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
typedef typename internal::traits<Derived>::FullMatrixType DenseMatrixType;
typedef DenseMatrixType DenseType;
typedef Derived const& Nested;
EIGEN_DEVICE_FUNC
inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
EIGEN_DEVICE_FUNC
inline Index rows() const { return derived().rows(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return derived().cols(); }
EIGEN_DEVICE_FUNC
inline Index outerStride() const { return derived().outerStride(); }
EIGEN_DEVICE_FUNC
inline Index innerStride() const { return derived().innerStride(); }
// dummy resize function
void resize(Index rows, Index cols)
{
EIGEN_UNUSED_VARIABLE(rows);
EIGEN_UNUSED_VARIABLE(cols);
eigen_assert(rows==this->rows() && cols==this->cols());
}
EIGEN_DEVICE_FUNC
inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); }
EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
/** \see MatrixBase::copyCoeff(row,col)
*/
template<typename Other>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
{
derived().coeffRef(row, col) = other.coeff(row, col);
}
EIGEN_DEVICE_FUNC
inline Scalar operator()(Index row, Index col) const
{
check_coordinates(row, col);
return coeff(row,col);
}
EIGEN_DEVICE_FUNC
inline Scalar& operator()(Index row, Index col)
{
check_coordinates(row, col);
......@@ -74,15 +100,20 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_DEVICE_FUNC
inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
EIGEN_DEVICE_FUNC
inline Derived& derived() { return *static_cast<Derived*>(this); }
#endif // not EIGEN_PARSED_BY_DOXYGEN
template<typename DenseDerived>
EIGEN_DEVICE_FUNC
void evalTo(MatrixBase<DenseDerived> &other) const;
template<typename DenseDerived>
EIGEN_DEVICE_FUNC
void evalToLazy(MatrixBase<DenseDerived> &other) const;
EIGEN_DEVICE_FUNC
DenseMatrixType toDenseMatrix() const
{
DenseMatrixType res(rows(), cols());
......@@ -119,17 +150,17 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
/** \class TriangularView
* \ingroup Core_Module
*
* \brief Base class for triangular part in a matrix
* \brief Expression of a triangular part in a matrix
*
* \param MatrixType the type of the object in which we are taking the triangular part
* \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
* #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
* This is in fact a bit field; it must have either #Upper or #Lower,
* and additionnaly it may have #UnitDiag or #ZeroDiag or neither.
* and additionally it may have #UnitDiag or #ZeroDiag or neither.
*
* This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
* matrices one should speak of "trapezoid" parts. This class is the return type
* of MatrixBase::triangularView() and most of the time this is the only way it is used.
* of MatrixBase::triangularView() and SparseMatrixBase::triangularView(), and most of the time this is the only way it is used.
*
* \sa MatrixBase::triangularView()
*/
......@@ -137,490 +168,407 @@ namespace internal {
template<typename MatrixType, unsigned int _Mode>
struct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType>
{
typedef typename nested<MatrixType>::type MatrixTypeNested;
typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
typedef typename MatrixType::PlainObject FullMatrixType;
typedef MatrixType ExpressionType;
typedef typename MatrixType::PlainObject DenseMatrixType;
enum {
Mode = _Mode,
Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits | FlagsLvalueBit) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)))
};
};
}
template<int Mode, bool LhsIsTriangular,
typename Lhs, bool LhsIsVector,
typename Rhs, bool RhsIsVector>
struct TriangularProduct;
template<typename _MatrixType, unsigned int _Mode, typename StorageKind> class TriangularViewImpl;
template<typename _MatrixType, unsigned int _Mode> class TriangularView
: public TriangularBase<TriangularView<_MatrixType, _Mode> >
: public TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind >
{
public:
typedef TriangularBase<TriangularView> Base;
typedef TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind > Base;
typedef typename internal::traits<TriangularView>::Scalar Scalar;
typedef _MatrixType MatrixType;
typedef typename internal::traits<TriangularView>::DenseMatrixType DenseMatrixType;
typedef DenseMatrixType PlainObject;
protected:
typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested;
typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
public:
using Base::evalToLazy;
typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
typedef typename internal::traits<TriangularView>::Index Index;
typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned NestedExpression;
enum {
Mode = _Mode,
Flags = internal::traits<TriangularView>::Flags,
TransposeMode = (Mode & Upper ? Lower : 0)
| (Mode & Lower ? Upper : 0)
| (Mode & (UnitDiag))
| (Mode & (ZeroDiag))
| (Mode & (ZeroDiag)),
IsVectorAtCompileTime = false
};
inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
EIGEN_DEVICE_FUNC
explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix)
{}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TriangularView)
/** \copydoc EigenBase::rows() */
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_matrix.rows(); }
/** \copydoc EigenBase::cols() */
EIGEN_DEVICE_FUNC
inline Index cols() const { return m_matrix.cols(); }
inline Index outerStride() const { return m_matrix.outerStride(); }
inline Index innerStride() const { return m_matrix.innerStride(); }
/** \returns a const reference to the nested expression */
EIGEN_DEVICE_FUNC
const NestedExpression& nestedExpression() const { return m_matrix; }
/** \returns a reference to the nested expression */
EIGEN_DEVICE_FUNC
NestedExpression& nestedExpression() { return m_matrix; }
typedef TriangularView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;
/** \sa MatrixBase::conjugate() const */
EIGEN_DEVICE_FUNC
inline const ConjugateReturnType conjugate() const
{ return ConjugateReturnType(m_matrix.conjugate()); }
typedef TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
inline const AdjointReturnType adjoint() const
{ return AdjointReturnType(m_matrix.adjoint()); }
typedef TriangularView<typename MatrixType::TransposeReturnType,TransposeMode> TransposeReturnType;
/** \sa MatrixBase::transpose() */
EIGEN_DEVICE_FUNC
inline TransposeReturnType transpose()
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
typename MatrixType::TransposeReturnType tmp(m_matrix);
return TransposeReturnType(tmp);
}
typedef TriangularView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;
/** \sa MatrixBase::transpose() const */
EIGEN_DEVICE_FUNC
inline const ConstTransposeReturnType transpose() const
{
return ConstTransposeReturnType(m_matrix.transpose());
}
template<typename Other>
EIGEN_DEVICE_FUNC
inline const Solve<TriangularView, Other>
solve(const MatrixBase<Other>& other) const
{ return Solve<TriangularView, Other>(*this, other.derived()); }
// workaround MSVC ICE
#if EIGEN_COMP_MSVC
template<int Side, typename Other>
EIGEN_DEVICE_FUNC
inline const internal::triangular_solve_retval<Side,TriangularView, Other>
solve(const MatrixBase<Other>& other) const
{ return Base::template solve<Side>(other); }
#else
using Base::solve;
#endif
/** \returns a selfadjoint view of the referenced triangular part which must be either \c #Upper or \c #Lower.
*
* This is a shortcut for \code this->nestedExpression().selfadjointView<(*this)::Mode>() \endcode
* \sa MatrixBase::selfadjointView() */
EIGEN_DEVICE_FUNC
SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()
{
EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR);
return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
}
/** This is the const version of selfadjointView() */
EIGEN_DEVICE_FUNC
const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const
{
EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR);
return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
}
/** \returns the determinant of the triangular matrix
* \sa MatrixBase::determinant() */
EIGEN_DEVICE_FUNC
Scalar determinant() const
{
if (Mode & UnitDiag)
return 1;
else if (Mode & ZeroDiag)
return 0;
else
return m_matrix.diagonal().prod();
}
protected:
MatrixTypeNested m_matrix;
};
/** \ingroup Core_Module
*
* \brief Base class for a triangular part in a \b dense matrix
*
* This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.
* It extends class TriangularView with additional methods which available for dense expressions only.
*
* \sa class TriangularView, MatrixBase::triangularView()
*/
template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_MatrixType,_Mode,Dense>
: public TriangularBase<TriangularView<_MatrixType, _Mode> >
{
public:
typedef TriangularView<_MatrixType, _Mode> TriangularViewType;
typedef TriangularBase<TriangularViewType> Base;
typedef typename internal::traits<TriangularViewType>::Scalar Scalar;
typedef _MatrixType MatrixType;
typedef typename MatrixType::PlainObject DenseMatrixType;
typedef DenseMatrixType PlainObject;
public:
using Base::evalToLazy;
using Base::derived;
typedef typename internal::traits<TriangularViewType>::StorageKind StorageKind;
enum {
Mode = _Mode,
Flags = internal::traits<TriangularViewType>::Flags
};
/** \returns the outer-stride of the underlying dense matrix
* \sa DenseCoeffsBase::outerStride() */
EIGEN_DEVICE_FUNC
inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
/** \returns the inner-stride of the underlying dense matrix
* \sa DenseCoeffsBase::innerStride() */
EIGEN_DEVICE_FUNC
inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
/** \sa MatrixBase::operator+=() */
template<typename Other> TriangularView& operator+=(const DenseBase<Other>& other) { return *this = m_matrix + other.derived(); }
template<typename Other>
EIGEN_DEVICE_FUNC
TriangularViewType& operator+=(const DenseBase<Other>& other) {
internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename Other::Scalar>());
return derived();
}
/** \sa MatrixBase::operator-=() */
template<typename Other> TriangularView& operator-=(const DenseBase<Other>& other) { return *this = m_matrix - other.derived(); }
template<typename Other>
EIGEN_DEVICE_FUNC
TriangularViewType& operator-=(const DenseBase<Other>& other) {
internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>());
return derived();
}
/** \sa MatrixBase::operator*=() */
TriangularView& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix * other; }
/** \sa MatrixBase::operator/=() */
TriangularView& operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix / other; }
EIGEN_DEVICE_FUNC
TriangularViewType& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }
/** \sa DenseBase::operator/=() */
EIGEN_DEVICE_FUNC
TriangularViewType& operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() / other; }
/** \sa MatrixBase::fill() */
EIGEN_DEVICE_FUNC
void fill(const Scalar& value) { setConstant(value); }
/** \sa MatrixBase::setConstant() */
TriangularView& setConstant(const Scalar& value)
{ return *this = MatrixType::Constant(rows(), cols(), value); }
EIGEN_DEVICE_FUNC
TriangularViewType& setConstant(const Scalar& value)
{ return *this = MatrixType::Constant(derived().rows(), derived().cols(), value); }
/** \sa MatrixBase::setZero() */
TriangularView& setZero() { return setConstant(Scalar(0)); }
EIGEN_DEVICE_FUNC
TriangularViewType& setZero() { return setConstant(Scalar(0)); }
/** \sa MatrixBase::setOnes() */
TriangularView& setOnes() { return setConstant(Scalar(1)); }
EIGEN_DEVICE_FUNC
TriangularViewType& setOnes() { return setConstant(Scalar(1)); }
/** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part
*/
EIGEN_DEVICE_FUNC
inline Scalar coeff(Index row, Index col) const
{
Base::check_coordinates_internal(row, col);
return m_matrix.coeff(row, col);
return derived().nestedExpression().coeff(row, col);
}
/** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part
*/
EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index row, Index col)
{
EIGEN_STATIC_ASSERT_LVALUE(TriangularViewType);
Base::check_coordinates_internal(row, col);
return m_matrix.const_cast_derived().coeffRef(row, col);
return derived().nestedExpression().coeffRef(row, col);
}
const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
/** Assigns a triangular matrix to a triangular part of a dense matrix */
template<typename OtherDerived>
TriangularView& operator=(const TriangularBase<OtherDerived>& other);
EIGEN_DEVICE_FUNC
TriangularViewType& operator=(const TriangularBase<OtherDerived>& other);
/** Shortcut for\code *this = other.other.triangularView<(*this)::Mode>() \endcode */
template<typename OtherDerived>
TriangularView& operator=(const MatrixBase<OtherDerived>& other);
EIGEN_DEVICE_FUNC
TriangularViewType& operator=(const MatrixBase<OtherDerived>& other);
TriangularView& operator=(const TriangularView& other)
{ return *this = other.nestedExpression(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_DEVICE_FUNC
TriangularViewType& operator=(const TriangularViewImpl& other)
{ return *this = other.derived().nestedExpression(); }
/** \deprecated */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
void lazyAssign(const TriangularBase<OtherDerived>& other);
/** \deprecated */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
void lazyAssign(const MatrixBase<OtherDerived>& other);
/** \sa MatrixBase::conjugate() */
inline TriangularView<MatrixConjugateReturnType,Mode> conjugate()
{ return m_matrix.conjugate(); }
/** \sa MatrixBase::conjugate() const */
inline const TriangularView<MatrixConjugateReturnType,Mode> conjugate() const
{ return m_matrix.conjugate(); }
/** \sa MatrixBase::adjoint() const */
inline const TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> adjoint() const
{ return m_matrix.adjoint(); }
/** \sa MatrixBase::transpose() */
inline TriangularView<Transpose<MatrixType>,TransposeMode> transpose()
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
return m_matrix.const_cast_derived().transpose();
}
/** \sa MatrixBase::transpose() const */
inline const TriangularView<Transpose<MatrixType>,TransposeMode> transpose() const
{
return m_matrix.transpose();
}
#endif
/** Efficient triangular matrix times vector/matrix product */
template<typename OtherDerived>
TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime>
EIGEN_DEVICE_FUNC
const Product<TriangularViewType,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return TriangularProduct
<Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
(m_matrix, rhs.derived());
return Product<TriangularViewType,OtherDerived>(derived(), rhs.derived());
}
/** Efficient vector/matrix times triangular matrix product */
template<typename OtherDerived> friend
TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
{
return TriangularProduct
<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
(lhs.derived(),rhs.m_matrix);
}
#ifdef EIGEN2_SUPPORT
template<typename OtherDerived>
struct eigen2_product_return_type
{
typedef typename TriangularView<MatrixType,Mode>::DenseMatrixType DenseMatrixType;
typedef typename OtherDerived::PlainObject::DenseType OtherPlainObject;
typedef typename ProductReturnType<DenseMatrixType, OtherPlainObject>::Type ProdRetType;
typedef typename ProdRetType::PlainObject type;
};
template<typename OtherDerived>
const typename eigen2_product_return_type<OtherDerived>::type
operator*(const EigenBase<OtherDerived>& rhs) const
{
typename OtherDerived::PlainObject::DenseType rhsPlainObject;
rhs.evalTo(rhsPlainObject);
return this->toDenseMatrix() * rhsPlainObject;
}
template<typename OtherMatrixType>
bool isApprox(const TriangularView<OtherMatrixType, Mode>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
{
return this->toDenseMatrix().isApprox(other.toDenseMatrix(), precision);
}
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
{
return this->toDenseMatrix().isApprox(other, precision);
}
#endif // EIGEN2_SUPPORT
EIGEN_DEVICE_FUNC
const Product<OtherDerived,TriangularViewType>
operator*(const MatrixBase<OtherDerived>& lhs, const TriangularViewImpl& rhs)
{
return Product<OtherDerived,TriangularViewType>(lhs.derived(),rhs.derived());
}
/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular.
*
* This function computes the inverse-matrix matrix product inverse(\c *this) * \a other if
* \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
* \a Side==OnTheRight.
*
* Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
*
* The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
* diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
* is an upper (resp. lower) triangular matrix.
*
* Example: \include Triangular_solve.cpp
* Output: \verbinclude Triangular_solve.out
*
* This function returns an expression of the inverse-multiply and can works in-place if it is assigned
* to the same matrix or vector \a other.
*
* For users coming from BLAS, this function (and more specifically solveInPlace()) offer
* all the operations supported by the \c *TRSV and \c *TRSM BLAS routines.
*
* \sa TriangularView::solveInPlace()
*/
template<int Side, typename Other>
inline const internal::triangular_solve_retval<Side,TriangularView, Other>
EIGEN_DEVICE_FUNC
inline const internal::triangular_solve_retval<Side,TriangularViewType, Other>
solve(const MatrixBase<Other>& other) const;
/** "in-place" version of TriangularView::solve() where the result is written in \a other
*
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
* This function will const_cast it, so constness isn't honored here.
*
* Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
*
* See TriangularView:solve() for the details.
*/
template<int Side, typename OtherDerived>
EIGEN_DEVICE_FUNC
void solveInPlace(const MatrixBase<OtherDerived>& other) const;
template<typename Other>
inline const internal::triangular_solve_retval<OnTheLeft,TriangularView, Other>
solve(const MatrixBase<Other>& other) const
{ return solve<OnTheLeft>(other); }
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
void solveInPlace(const MatrixBase<OtherDerived>& other) const
{ return solveInPlace<OnTheLeft>(other); }
const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const
{
EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
}
SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()
{
EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
}
/** Swaps the coefficients of the common triangular parts of two matrices */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
#ifdef EIGEN_PARSED_BY_DOXYGEN
void swap(TriangularBase<OtherDerived> &other)
#else
void swap(TriangularBase<OtherDerived> const & other)
#endif
{
TriangularView<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
EIGEN_STATIC_ASSERT_LVALUE(OtherDerived);
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
}
/** \deprecated
* Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
void swap(MatrixBase<OtherDerived> const & other)
{
SwapWrapper<MatrixType> swaper(const_cast<MatrixType&>(m_matrix));
TriangularView<SwapWrapper<MatrixType>,Mode>(swaper).lazyAssign(other.derived());
EIGEN_STATIC_ASSERT_LVALUE(OtherDerived);
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
}
Scalar determinant() const
{
if (Mode & UnitDiag)
return 1;
else if (Mode & ZeroDiag)
return 0;
else
return m_matrix.diagonal().prod();
}
// TODO simplify the following:
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{
setZero();
return assignProduct(other,1);
}
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{
return assignProduct(other,1);
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
if(!internal::is_same_dense(dst,rhs))
dst = rhs;
this->solveInPlace(dst);
}
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{
return assignProduct(other,-1);
}
template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
{
setZero();
return assignProduct(other,other.alpha());
}
template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
{
return assignProduct(other,other.alpha());
}
template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
{
return assignProduct(other,-other.alpha());
}
template<typename ProductType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TriangularViewType& _assignProduct(const ProductType& prod, const Scalar& alpha, bool beta);
protected:
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
EIGEN_DEFAULT_COPY_CONSTRUCTOR(TriangularViewImpl)
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TriangularViewImpl)
MatrixTypeNested m_matrix;
};
/***************************************************************************
* Implementation of triangular evaluation/assignment
***************************************************************************/
namespace internal {
template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount, bool ClearOpposite>
struct triangular_assignment_selector
{
enum {
col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
row = (UnrollCount-1) % Derived1::RowsAtCompileTime
};
typedef typename Derived1::Scalar Scalar;
static inline void run(Derived1 &dst, const Derived2 &src)
{
triangular_assignment_selector<Derived1, Derived2, Mode, UnrollCount-1, ClearOpposite>::run(dst, src);
eigen_assert( Mode == Upper || Mode == Lower
|| Mode == StrictlyUpper || Mode == StrictlyLower
|| Mode == UnitUpper || Mode == UnitLower);
if((Mode == Upper && row <= col)
|| (Mode == Lower && row >= col)
|| (Mode == StrictlyUpper && row < col)
|| (Mode == StrictlyLower && row > col)
|| (Mode == UnitUpper && row < col)
|| (Mode == UnitLower && row > col))
dst.copyCoeff(row, col, src);
else if(ClearOpposite)
{
if (Mode&UnitDiag && row==col)
dst.coeffRef(row, col) = Scalar(1);
else
dst.coeffRef(row, col) = Scalar(0);
}
}
};
// prevent buggy user code from causing an infinite recursion
template<typename Derived1, typename Derived2, unsigned int Mode, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, Mode, 0, ClearOpposite>
{
static inline void run(Derived1 &, const Derived2 &) {}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
typedef typename Derived1::Scalar Scalar;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
{
Index maxi = (std::min)(j, dst.rows()-1);
for(Index i = 0; i <= maxi; ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
for(Index i = maxi+1; i < dst.rows(); ++i)
dst.coeffRef(i, j) = Scalar(0);
}
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
{
for(Index i = j; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src);
Index maxi = (std::min)(j, dst.rows());
if (ClearOpposite)
for(Index i = 0; i < maxi; ++i)
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
}
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
typedef typename Derived1::Scalar Scalar;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
{
Index maxi = (std::min)(j, dst.rows());
for(Index i = 0; i < maxi; ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
for(Index i = maxi; i < dst.rows(); ++i)
dst.coeffRef(i, j) = Scalar(0);
}
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
{
for(Index i = j+1; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src);
Index maxi = (std::min)(j, dst.rows()-1);
if (ClearOpposite)
for(Index i = 0; i <= maxi; ++i)
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
}
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
{
Index maxi = (std::min)(j, dst.rows());
for(Index i = 0; i < maxi; ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
{
for(Index i = maxi+1; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0;
}
}
dst.diagonal().setOnes();
}
};
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
{
Index maxi = (std::min)(j, dst.rows());
for(Index i = maxi+1; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
{
for(Index i = 0; i < maxi; ++i)
dst.coeffRef(i, j) = 0;
}
}
dst.diagonal().setOnes();
}
};
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
// FIXME should we keep that possibility
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
inline TriangularView<MatrixType, Mode>&
TriangularView<MatrixType, Mode>::operator=(const MatrixBase<OtherDerived>& other)
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)
{
if(OtherDerived::Flags & EvalBeforeAssigningBit)
{
typename internal::plain_matrix_type<OtherDerived>::type other_evaluated(other.rows(), other.cols());
other_evaluated.template triangularView<Mode>().lazyAssign(other.derived());
lazyAssign(other_evaluated);
}
else
lazyAssign(other.derived());
return *this;
internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
// FIXME should we keep that possibility
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
void TriangularView<MatrixType, Mode>::lazyAssign(const MatrixBase<OtherDerived>& other)
void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
{
enum {
unroll = MatrixType::SizeAtCompileTime != Dynamic
&& internal::traits<OtherDerived>::CoeffReadCost != Dynamic
&& MatrixType::SizeAtCompileTime*internal::traits<OtherDerived>::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT
};
eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
internal::triangular_assignment_selector
<MatrixType, OtherDerived, int(Mode),
unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
false // do not change the opposite triangular part
>::run(m_matrix.const_cast_derived(), other.derived());
internal::call_assignment_no_alias(derived(), other.template triangularView<Mode>());
}
......@@ -628,38 +576,21 @@ void TriangularView<MatrixType, Mode>::lazyAssign(const MatrixBase<OtherDerived>
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
inline TriangularView<MatrixType, Mode>&
TriangularView<MatrixType, Mode>::operator=(const TriangularBase<OtherDerived>& other)
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<OtherDerived>& other)
{
eigen_assert(Mode == int(OtherDerived::Mode));
if(internal::traits<OtherDerived>::Flags & EvalBeforeAssigningBit)
{
typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols());
other_evaluated.template triangularView<Mode>().lazyAssign(other.derived().nestedExpression());
lazyAssign(other_evaluated);
}
else
lazyAssign(other.derived().nestedExpression());
return *this;
internal::call_assignment(derived(), other.derived());
return derived();
}
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
void TriangularView<MatrixType, Mode>::lazyAssign(const TriangularBase<OtherDerived>& other)
void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other)
{
enum {
unroll = MatrixType::SizeAtCompileTime != Dynamic
&& internal::traits<OtherDerived>::CoeffReadCost != Dynamic
&& MatrixType::SizeAtCompileTime * internal::traits<OtherDerived>::CoeffReadCost / 2
<= EIGEN_UNROLLING_LIMIT
};
eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
internal::triangular_assignment_selector
<MatrixType, OtherDerived, int(Mode),
unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
false // preserve the opposite triangular part
>::run(m_matrix.const_cast_derived(), other.derived().nestedExpression());
eigen_assert(Mode == int(OtherDerived::Mode));
internal::call_assignment_no_alias(derived(), other.derived());
}
#endif
/***************************************************************************
* Implementation of TriangularBase methods
......@@ -671,35 +602,7 @@ template<typename Derived>
template<typename DenseDerived>
void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
{
if(internal::traits<Derived>::Flags & EvalBeforeAssigningBit)
{
typename internal::plain_matrix_type<Derived>::type other_evaluated(rows(), cols());
evalToLazy(other_evaluated);
other.derived().swap(other_evaluated);
}
else
evalToLazy(other.derived());
}
/** Assigns a triangular or selfadjoint matrix to a dense matrix.
* If the matrix is triangular, the opposite part is set to zero. */
template<typename Derived>
template<typename DenseDerived>
void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
{
enum {
unroll = DenseDerived::SizeAtCompileTime != Dynamic
&& internal::traits<Derived>::CoeffReadCost != Dynamic
&& DenseDerived::SizeAtCompileTime * internal::traits<Derived>::CoeffReadCost / 2
<= EIGEN_UNROLLING_LIMIT
};
other.derived().resize(this->rows(), this->cols());
internal::triangular_assignment_selector
<DenseDerived, typename internal::traits<Derived>::MatrixTypeNestedCleaned, Derived::Mode,
unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic,
true // clear the opposite triangular part
>::run(other.derived(), derived().nestedExpression());
evalToLazy(other.derived());
}
/***************************************************************************
......@@ -710,49 +613,14 @@ void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
* Implementation of MatrixBase methods
***************************************************************************/
#ifdef EIGEN2_SUPPORT
// implementation of part<>(), including the SelfAdjoint case.
namespace internal {
template<typename MatrixType, unsigned int Mode>
struct eigen2_part_return_type
{
typedef TriangularView<MatrixType, Mode> type;
};
template<typename MatrixType>
struct eigen2_part_return_type<MatrixType, SelfAdjoint>
{
typedef SelfAdjointView<MatrixType, Upper> type;
};
}
/** \deprecated use MatrixBase::triangularView() */
template<typename Derived>
template<unsigned int Mode>
const typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part() const
{
return derived();
}
/** \deprecated use MatrixBase::triangularView() */
template<typename Derived>
template<unsigned int Mode>
typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part()
{
return derived();
}
#endif
/**
* \returns an expression of a triangular view extracted from the current matrix
*
* The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
* \c #Lower, \c #StrictlyLower, \c #UnitLower.
*
* Example: \include MatrixBase_extract.cpp
* Output: \verbinclude MatrixBase_extract.out
* Example: \include MatrixBase_triangularView.cpp
* Output: \verbinclude MatrixBase_triangularView.out
*
* \sa class TriangularView
*/
......@@ -761,7 +629,7 @@ template<unsigned int Mode>
typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
MatrixBase<Derived>::triangularView()
{
return derived();
return typename TriangularViewReturnType<Mode>::Type(derived());
}
/** This is the const version of MatrixBase::triangularView() */
......@@ -770,7 +638,7 @@ template<unsigned int Mode>
typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
MatrixBase<Derived>::triangularView() const
{
return derived();
return typename ConstTriangularViewReturnType<Mode>::Type(derived());
}
/** \returns true if *this is approximately equal to an upper triangular matrix,
......@@ -779,22 +647,22 @@ MatrixBase<Derived>::triangularView() const
* \sa isLowerTriangular()
*/
template<typename Derived>
bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const
{
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
for(Index j = 0; j < cols(); ++j)
{
Index maxi = (std::min)(j, rows()-1);
Index maxi = numext::mini(j, rows()-1);
for(Index i = 0; i <= maxi; ++i)
{
RealScalar absValue = internal::abs(coeff(i,j));
RealScalar absValue = numext::abs(coeff(i,j));
if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
}
}
RealScalar threshold = maxAbsOnUpperPart * prec;
for(Index j = 0; j < cols(); ++j)
for(Index i = j+1; i < rows(); ++i)
if(internal::abs(coeff(i, j)) > threshold) return false;
if(numext::abs(coeff(i, j)) > threshold) return false;
return true;
}
......@@ -804,25 +672,314 @@ bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
* \sa isUpperTriangular()
*/
template<typename Derived>
bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
{
RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
for(Index j = 0; j < cols(); ++j)
for(Index i = j; i < rows(); ++i)
{
RealScalar absValue = internal::abs(coeff(i,j));
RealScalar absValue = numext::abs(coeff(i,j));
if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
}
RealScalar threshold = maxAbsOnLowerPart * prec;
for(Index j = 1; j < cols(); ++j)
{
Index maxi = (std::min)(j, rows()-1);
Index maxi = numext::mini(j, rows()-1);
for(Index i = 0; i < maxi; ++i)
if(internal::abs(coeff(i, j)) > threshold) return false;
if(numext::abs(coeff(i, j)) > threshold) return false;
}
return true;
}
/***************************************************************************
****************************************************************************
* Evaluators and Assignment of triangular expressions
***************************************************************************
***************************************************************************/
namespace internal {
// TODO currently a triangular expression has the form TriangularView<.,.>
// in the future triangular-ness should be defined by the expression traits
// such that Transpose<TriangularView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
template<typename MatrixType, unsigned int Mode>
struct evaluator_traits<TriangularView<MatrixType,Mode> >
{
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef typename glue_shapes<typename evaluator_traits<MatrixType>::Shape, TriangularShape>::type Shape;
};
template<typename MatrixType, unsigned int Mode>
struct unary_evaluator<TriangularView<MatrixType,Mode>, IndexBased>
: evaluator<typename internal::remove_all<MatrixType>::type>
{
typedef TriangularView<MatrixType,Mode> XprType;
typedef evaluator<typename internal::remove_all<MatrixType>::type> Base;
unary_evaluator(const XprType &xpr) : Base(xpr.nestedExpression()) {}
};
// Additional assignment kinds:
struct Triangular2Triangular {};
struct Triangular2Dense {};
struct Dense2Triangular {};
template<typename Kernel, unsigned int Mode, int UnrollCount, bool ClearOpposite> struct triangular_assignment_loop;
/** \internal Specialization of the dense assignment kernel for triangular matrices.
* The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions.
* \tparam UpLo must be either Lower or Upper
* \tparam Mode must be either 0, UnitDiag, ZeroDiag, or SelfAdjoint
*/
template<int UpLo, int Mode, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version = Specialized>
class triangular_dense_assignment_kernel : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version>
{
protected:
typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base;
typedef typename Base::DstXprType DstXprType;
typedef typename Base::SrcXprType SrcXprType;
using Base::m_dst;
using Base::m_src;
using Base::m_functor;
public:
typedef typename Base::DstEvaluatorType DstEvaluatorType;
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
typedef typename Base::Scalar Scalar;
typedef typename Base::AssignmentTraits AssignmentTraits;
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
#ifdef EIGEN_INTERNAL_DEBUGGING
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
{
eigen_internal_assert(row!=col);
Base::assignCoeff(row,col);
}
#else
using Base::assignCoeff;
#endif
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
{
if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1));
else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0));
else if(Mode==0) Base::assignCoeff(id,id);
}
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col)
{
eigen_internal_assert(row!=col);
if(SetOpposite)
m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0));
}
};
template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType, typename Functor>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func)
{
typedef evaluator<DstXprType> DstEvaluatorType;
typedef evaluator<SrcXprType> SrcEvaluatorType;
SrcEvaluatorType srcEvaluator(src);
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
DstEvaluatorType dstEvaluator(dst);
typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,
DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
enum {
unroll = DstXprType::SizeAtCompileTime != Dynamic
&& SrcEvaluatorType::CoeffReadCost < HugeCost
&& DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT
};
triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
}
template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src)
{
call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
}
template<> struct AssignmentKind<TriangularShape,TriangularShape> { typedef Triangular2Triangular Kind; };
template<> struct AssignmentKind<DenseShape,TriangularShape> { typedef Triangular2Dense Kind; };
template<> struct AssignmentKind<TriangularShape,DenseShape> { typedef Dense2Triangular Kind; };
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Triangular>
{
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode));
call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
}
};
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Dense>
{
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
call_triangular_assignment_loop<SrcXprType::Mode, (SrcXprType::Mode&SelfAdjoint)==0>(dst, src, func);
}
};
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Dense2Triangular>
{
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
}
};
template<typename Kernel, unsigned int Mode, int UnrollCount, bool SetOpposite>
struct triangular_assignment_loop
{
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
typedef typename DstEvaluatorType::XprType DstXprType;
enum {
col = (UnrollCount-1) / DstXprType::RowsAtCompileTime,
row = (UnrollCount-1) % DstXprType::RowsAtCompileTime
};
typedef typename Kernel::Scalar Scalar;
EIGEN_DEVICE_FUNC
static inline void run(Kernel &kernel)
{
triangular_assignment_loop<Kernel, Mode, UnrollCount-1, SetOpposite>::run(kernel);
if(row==col)
kernel.assignDiagonalCoeff(row);
else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row<col) )
kernel.assignCoeff(row,col);
else if(SetOpposite)
kernel.assignOppositeCoeff(row,col);
}
};
// prevent buggy user code from causing an infinite recursion
template<typename Kernel, unsigned int Mode, bool SetOpposite>
struct triangular_assignment_loop<Kernel, Mode, 0, SetOpposite>
{
EIGEN_DEVICE_FUNC
static inline void run(Kernel &) {}
};
// TODO: experiment with a recursive assignment procedure splitting the current
// triangular part into one rectangular and two triangular parts.
template<typename Kernel, unsigned int Mode, bool SetOpposite>
struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
{
typedef typename Kernel::Scalar Scalar;
EIGEN_DEVICE_FUNC
static inline void run(Kernel &kernel)
{
for(Index j = 0; j < kernel.cols(); ++j)
{
Index maxi = numext::mini(j, kernel.rows());
Index i = 0;
if (((Mode&Lower) && SetOpposite) || (Mode&Upper))
{
for(; i < maxi; ++i)
if(Mode&Upper) kernel.assignCoeff(i, j);
else kernel.assignOppositeCoeff(i, j);
}
else
i = maxi;
if(i<kernel.rows()) // then i==j
kernel.assignDiagonalCoeff(i++);
if (((Mode&Upper) && SetOpposite) || (Mode&Lower))
{
for(; i < kernel.rows(); ++i)
if(Mode&Lower) kernel.assignCoeff(i, j);
else kernel.assignOppositeCoeff(i, j);
}
}
}
};
} // end namespace internal
/** Assigns a triangular or selfadjoint matrix to a dense matrix.
* If the matrix is triangular, the opposite part is set to zero. */
template<typename Derived>
template<typename DenseDerived>
void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
{
other.derived().resize(this->rows(), this->cols());
internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
}
namespace internal {
// Triangular = Product
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
{
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
dst._assignProduct(src, 1, 0);
}
};
// Triangular += Product
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
{
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)
{
dst._assignProduct(src, 1, 1);
}
};
// Triangular -= Product
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
{
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)
{
dst._assignProduct(src, -1, 1);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULARMATRIX_H
......@@ -13,13 +13,23 @@
namespace Eigen {
namespace internal {
template<typename VectorType, int Size>
struct traits<VectorBlock<VectorType, Size> >
: public traits<Block<VectorType,
traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
{
};
}
/** \class VectorBlock
* \ingroup Core_Module
*
* \brief Expression of a fixed-size or dynamic-size sub-vector
*
* \param VectorType the type of the object in which we are taking a sub-vector
* \param Size size of the sub-vector we are taking at compile time (optional)
* \tparam VectorType the type of the object in which we are taking a sub-vector
* \tparam Size size of the sub-vector we are taking at compile time (optional)
*
* This class represents an expression of either a fixed-size or dynamic-size sub-vector.
* It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
......@@ -43,17 +53,6 @@ namespace Eigen {
*
* \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
*/
namespace internal {
template<typename VectorType, int Size>
struct traits<VectorBlock<VectorType, Size> >
: public traits<Block<VectorType,
traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
{
};
}
template<typename VectorType, int Size> class VectorBlock
: public Block<VectorType,
internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
......@@ -72,6 +71,7 @@ template<typename VectorType, int Size> class VectorBlock
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline VectorBlock(VectorType& vector, Index start, Index size)
: Base(vector,
IsColVector ? start : 0, IsColVector ? 0 : start,
......@@ -82,6 +82,7 @@ template<typename VectorType, int Size> class VectorBlock
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline VectorBlock(VectorType& vector, Index start)
: Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
{
......@@ -90,195 +91,6 @@ template<typename VectorType, int Size> class VectorBlock
};
/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
*
* \only_for_vectors
*
* \param start the first coefficient in the segment
* \param size the number of coefficients in the segment
*
* Example: \include MatrixBase_segment_int_int.cpp
* Output: \verbinclude MatrixBase_segment_int_int.out
*
* \note Even though the returned expression has dynamic size, in the case
* when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
* \sa class Block, segment(Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::SegmentReturnType
DenseBase<Derived>::segment(Index start, Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), start, size);
}
/** This is the const version of segment(Index,Index).*/
template<typename Derived>
inline typename DenseBase<Derived>::ConstSegmentReturnType
DenseBase<Derived>::segment(Index start, Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), start, size);
}
/** \returns a dynamic-size expression of the first coefficients of *this.
*
* \only_for_vectors
*
* \param size the number of coefficients in the block
*
* Example: \include MatrixBase_start_int.cpp
* Output: \verbinclude MatrixBase_start_int.out
*
* \note Even though the returned expression has dynamic size, in the case
* when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
* \sa class Block, block(Index,Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::SegmentReturnType
DenseBase<Derived>::head(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), 0, size);
}
/** This is the const version of head(Index).*/
template<typename Derived>
inline typename DenseBase<Derived>::ConstSegmentReturnType
DenseBase<Derived>::head(Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), 0, size);
}
/** \returns a dynamic-size expression of the last coefficients of *this.
*
* \only_for_vectors
*
* \param size the number of coefficients in the block
*
* Example: \include MatrixBase_end_int.cpp
* Output: \verbinclude MatrixBase_end_int.out
*
* \note Even though the returned expression has dynamic size, in the case
* when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
* \sa class Block, block(Index,Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::SegmentReturnType
DenseBase<Derived>::tail(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), this->size() - size, size);
}
/** This is the const version of tail(Index).*/
template<typename Derived>
inline typename DenseBase<Derived>::ConstSegmentReturnType
DenseBase<Derived>::tail(Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), this->size() - size, size);
}
/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
*
* \only_for_vectors
*
* The template parameter \a Size is the number of coefficients in the block
*
* \param start the index of the first element of the sub-vector
*
* Example: \include MatrixBase_template_int_segment.cpp
* Output: \verbinclude MatrixBase_template_int_segment.out
*
* \sa class Block
*/
template<typename Derived>
template<int Size>
inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
DenseBase<Derived>::segment(Index start)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<Size>::Type(derived(), start);
}
/** This is the const version of segment<int>(Index).*/
template<typename Derived>
template<int Size>
inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
DenseBase<Derived>::segment(Index start) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
}
/** \returns a fixed-size expression of the first coefficients of *this.
*
* \only_for_vectors
*
* The template parameter \a Size is the number of coefficients in the block
*
* Example: \include MatrixBase_template_int_start.cpp
* Output: \verbinclude MatrixBase_template_int_start.out
*
* \sa class Block
*/
template<typename Derived>
template<int Size>
inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
DenseBase<Derived>::head()
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
}
/** This is the const version of head<int>().*/
template<typename Derived>
template<int Size>
inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
DenseBase<Derived>::head() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
}
/** \returns a fixed-size expression of the last coefficients of *this.
*
* \only_for_vectors
*
* The template parameter \a Size is the number of coefficients in the block
*
* Example: \include MatrixBase_template_int_end.cpp
* Output: \verbinclude MatrixBase_template_int_end.out
*
* \sa class Block
*/
template<typename Derived>
template<int Size>
inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
DenseBase<Derived>::tail()
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
}
/** This is the const version of tail<int>.*/
template<typename Derived>
template<int Size>
inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
DenseBase<Derived>::tail() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
}
} // end namespace Eigen
#endif // EIGEN_VECTORBLOCK_H
......@@ -11,7 +11,7 @@
#ifndef EIGEN_PARTIAL_REDUX_H
#define EIGEN_PARTIAL_REDUX_H
namespace Eigen {
namespace Eigen {
/** \class PartialReduxExpr
* \ingroup Core_Module
......@@ -41,75 +41,56 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
typedef typename MatrixType::Scalar InputScalar;
typedef typename nested<MatrixType>::type MatrixTypeNested;
typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
enum {
RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime,
ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0),
TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
};
#if EIGEN_GNUC_AT_LEAST(3,4)
typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
#else
typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
#endif
enum {
CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
Flags = RowsAtCompileTime == 1 ? RowMajorBit : 0,
TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime
};
};
}
template< typename MatrixType, typename MemberOp, int Direction>
class PartialReduxExpr : internal::no_assignment_operator,
public internal::dense_xpr_base< PartialReduxExpr<MatrixType, MemberOp, Direction> >::type
class PartialReduxExpr : public internal::dense_xpr_base< PartialReduxExpr<MatrixType, MemberOp, Direction> >::type,
internal::no_assignment_operator
{
public:
typedef typename internal::dense_xpr_base<PartialReduxExpr>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr)
typedef typename internal::traits<PartialReduxExpr>::MatrixTypeNested MatrixTypeNested;
typedef typename internal::traits<PartialReduxExpr>::_MatrixTypeNested _MatrixTypeNested;
PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
EIGEN_DEVICE_FUNC
explicit PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
: m_matrix(mat), m_functor(func) {}
EIGEN_DEVICE_FUNC
Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
EIGEN_DEVICE_FUNC
Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const
{
if (Direction==Vertical)
return m_functor(m_matrix.col(j));
else
return m_functor(m_matrix.row(i));
}
EIGEN_DEVICE_FUNC
typename MatrixType::Nested nestedExpression() const { return m_matrix; }
const Scalar coeff(Index index) const
{
if (Direction==Vertical)
return m_functor(m_matrix.col(index));
else
return m_functor(m_matrix.row(index));
}
EIGEN_DEVICE_FUNC
const MemberOp& functor() const { return m_functor; }
protected:
MatrixTypeNested m_matrix;
typename MatrixType::Nested m_matrix;
const MemberOp m_functor;
};
#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
template <typename ResultType> \
struct member_##MEMBER { \
EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
struct member_##MEMBER { \
EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
typedef ResultType result_type; \
template<typename Scalar, int Size> struct Cost \
{ enum { value = COST }; }; \
template<typename XprType> \
EIGEN_STRONG_INLINE ResultType operator()(const XprType& mat) const \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
ResultType operator()(const XprType& mat) const \
{ return mat.MEMBER(); } \
}
......@@ -129,17 +110,27 @@ EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost);
template <int p, typename ResultType>
struct member_lpnorm {
typedef ResultType result_type;
template<typename Scalar, int Size> struct Cost
{ enum { value = (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost }; };
EIGEN_DEVICE_FUNC member_lpnorm() {}
template<typename XprType>
EIGEN_DEVICE_FUNC inline ResultType operator()(const XprType& mat) const
{ return mat.template lpNorm<p>(); }
};
template <typename BinaryOp, typename Scalar>
struct member_redux {
typedef typename result_of<
BinaryOp(Scalar)
BinaryOp(const Scalar&,const Scalar&)
>::type result_type;
template<typename _Scalar, int Size> struct Cost
{ enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; };
member_redux(const BinaryOp func) : m_functor(func) {}
EIGEN_DEVICE_FUNC explicit member_redux(const BinaryOp func) : m_functor(func) {}
template<typename Derived>
inline result_type operator()(const DenseBase<Derived>& mat) const
EIGEN_DEVICE_FUNC inline result_type operator()(const DenseBase<Derived>& mat) const
{ return mat.redux(m_functor); }
const BinaryOp m_functor;
};
......@@ -150,8 +141,8 @@ struct member_redux {
*
* \brief Pseudo expression providing partial reduction operations
*
* \param ExpressionType the type of the object on which to do partial reductions
* \param Direction indicates the direction of the redux (#Vertical or #Horizontal)
* \tparam ExpressionType the type of the object on which to do partial reductions
* \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)
*
* This class represents a pseudo expression with partial reduction features.
* It is the return type of DenseBase::colwise() and DenseBase::rowwise()
......@@ -168,16 +159,15 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
typedef typename ExpressionType::Scalar Scalar;
typedef typename ExpressionType::RealScalar RealScalar;
typedef typename ExpressionType::Index Index;
typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
ExpressionType, ExpressionType&>::type ExpressionTypeNested;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename internal::ref_selector<ExpressionType>::non_const_type ExpressionTypeNested;
typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned;
template<template<typename _Scalar> class Functor,
typename Scalar=typename internal::traits<ExpressionType>::Scalar> struct ReturnType
typename Scalar_=Scalar> struct ReturnType
{
typedef PartialReduxExpr<ExpressionType,
Functor<Scalar>,
Functor<Scalar_>,
Direction
> Type;
};
......@@ -185,23 +175,24 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
template<typename BinaryOp> struct ReduxReturnType
{
typedef PartialReduxExpr<ExpressionType,
internal::member_redux<BinaryOp,typename internal::traits<ExpressionType>::Scalar>,
internal::member_redux<BinaryOp,Scalar>,
Direction
> Type;
};
enum {
IsVertical = (Direction==Vertical) ? 1 : 0,
IsHorizontal = (Direction==Horizontal) ? 1 : 0
isVertical = (Direction==Vertical) ? 1 : 0,
isHorizontal = (Direction==Horizontal) ? 1 : 0
};
protected:
/** \internal
* \returns the i-th subvector according to the \c Direction */
typedef typename internal::conditional<Direction==Vertical,
typedef typename internal::conditional<isVertical,
typename ExpressionType::ColXpr,
typename ExpressionType::RowXpr>::type SubVector;
/** \internal
* \returns the i-th subvector according to the \c Direction */
EIGEN_DEVICE_FUNC
SubVector subVector(Index i)
{
return SubVector(m_matrix.derived(),i);
......@@ -209,36 +200,62 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** \internal
* \returns the number of subvectors in the direction \c Direction */
EIGEN_DEVICE_FUNC
Index subVectors() const
{ return Direction==Vertical?m_matrix.cols():m_matrix.rows(); }
{ return isVertical?m_matrix.cols():m_matrix.rows(); }
template<typename OtherDerived> struct ExtendedType {
typedef Replicate<OtherDerived,
Direction==Vertical ? 1 : ExpressionType::RowsAtCompileTime,
Direction==Horizontal ? 1 : ExpressionType::ColsAtCompileTime> Type;
isVertical ? 1 : ExpressionType::RowsAtCompileTime,
isHorizontal ? 1 : ExpressionType::ColsAtCompileTime> Type;
};
/** \internal
* Replicates a vector to match the size of \c *this */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
typename ExtendedType<OtherDerived>::Type
extendedTo(const DenseBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Vertical, OtherDerived::MaxColsAtCompileTime==1),
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxColsAtCompileTime==1),
YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Horizontal, OtherDerived::MaxRowsAtCompileTime==1),
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxRowsAtCompileTime==1),
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
return typename ExtendedType<OtherDerived>::Type
(other.derived(),
Direction==Vertical ? 1 : m_matrix.rows(),
Direction==Horizontal ? 1 : m_matrix.cols());
isVertical ? 1 : m_matrix.rows(),
isHorizontal ? 1 : m_matrix.cols());
}
public:
template<typename OtherDerived> struct OppositeExtendedType {
typedef Replicate<OtherDerived,
isHorizontal ? 1 : ExpressionType::RowsAtCompileTime,
isVertical ? 1 : ExpressionType::ColsAtCompileTime> Type;
};
inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {}
/** \internal
* Replicates a vector in the opposite direction to match the size of \c *this */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
typename OppositeExtendedType<OtherDerived>::Type
extendedToOpposite(const DenseBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxColsAtCompileTime==1),
YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxRowsAtCompileTime==1),
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
return typename OppositeExtendedType<OtherDerived>::Type
(other.derived(),
isHorizontal ? 1 : m_matrix.rows(),
isVertical ? 1 : m_matrix.cols());
}
public:
EIGEN_DEVICE_FUNC
explicit inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {}
/** \internal */
EIGEN_DEVICE_FUNC
inline const ExpressionType& _expression() const { return m_matrix; }
/** \returns a row or column vector expression of \c *this reduxed by \a func
......@@ -249,76 +266,126 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise()
*/
template<typename BinaryOp>
EIGEN_DEVICE_FUNC
const typename ReduxReturnType<BinaryOp>::Type
redux(const BinaryOp& func = BinaryOp()) const
{ return typename ReduxReturnType<BinaryOp>::Type(_expression(), func); }
{ return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func)); }
typedef typename ReturnType<internal::member_minCoeff>::Type MinCoeffReturnType;
typedef typename ReturnType<internal::member_maxCoeff>::Type MaxCoeffReturnType;
typedef typename ReturnType<internal::member_squaredNorm,RealScalar>::Type SquaredNormReturnType;
typedef typename ReturnType<internal::member_norm,RealScalar>::Type NormReturnType;
typedef typename ReturnType<internal::member_blueNorm,RealScalar>::Type BlueNormReturnType;
typedef typename ReturnType<internal::member_stableNorm,RealScalar>::Type StableNormReturnType;
typedef typename ReturnType<internal::member_hypotNorm,RealScalar>::Type HypotNormReturnType;
typedef typename ReturnType<internal::member_sum>::Type SumReturnType;
typedef typename ReturnType<internal::member_mean>::Type MeanReturnType;
typedef typename ReturnType<internal::member_all>::Type AllReturnType;
typedef typename ReturnType<internal::member_any>::Type AnyReturnType;
typedef PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> CountReturnType;
typedef typename ReturnType<internal::member_prod>::Type ProdReturnType;
typedef Reverse<const ExpressionType, Direction> ConstReverseReturnType;
typedef Reverse<ExpressionType, Direction> ReverseReturnType;
template<int p> struct LpNormReturnType {
typedef PartialReduxExpr<ExpressionType, internal::member_lpnorm<p,RealScalar>,Direction> Type;
};
/** \returns a row (or column) vector expression of the smallest coefficient
* of each column (or row) of the referenced expression.
*
* \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_minCoeff.cpp
* Output: \verbinclude PartialRedux_minCoeff.out
*
* \sa DenseBase::minCoeff() */
const typename ReturnType<internal::member_minCoeff>::Type minCoeff() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const MinCoeffReturnType minCoeff() const
{ return MinCoeffReturnType(_expression()); }
/** \returns a row (or column) vector expression of the largest coefficient
* of each column (or row) of the referenced expression.
*
* \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_maxCoeff.cpp
* Output: \verbinclude PartialRedux_maxCoeff.out
*
* \sa DenseBase::maxCoeff() */
const typename ReturnType<internal::member_maxCoeff>::Type maxCoeff() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const MaxCoeffReturnType maxCoeff() const
{ return MaxCoeffReturnType(_expression()); }
/** \returns a row (or column) vector expression of the squared norm
* of each column (or row) of the referenced expression.
* This is a vector with real entries, even if the original matrix has complex entries.
*
* Example: \include PartialRedux_squaredNorm.cpp
* Output: \verbinclude PartialRedux_squaredNorm.out
*
* \sa DenseBase::squaredNorm() */
const typename ReturnType<internal::member_squaredNorm,RealScalar>::Type squaredNorm() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const SquaredNormReturnType squaredNorm() const
{ return SquaredNormReturnType(_expression()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression.
* This is a vector with real entries, even if the original matrix has complex entries.
*
* Example: \include PartialRedux_norm.cpp
* Output: \verbinclude PartialRedux_norm.out
*
* \sa DenseBase::norm() */
const typename ReturnType<internal::member_norm,RealScalar>::Type norm() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const NormReturnType norm() const
{ return NormReturnType(_expression()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression.
* This is a vector with real entries, even if the original matrix has complex entries.
*
* Example: \include PartialRedux_norm.cpp
* Output: \verbinclude PartialRedux_norm.out
*
* \sa DenseBase::norm() */
template<int p>
EIGEN_DEVICE_FUNC
const typename LpNormReturnType<p>::Type lpNorm() const
{ return typename LpNormReturnType<p>::Type(_expression()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression, using
* blue's algorithm.
* Blue's algorithm.
* This is a vector with real entries, even if the original matrix has complex entries.
*
* \sa DenseBase::blueNorm() */
const typename ReturnType<internal::member_blueNorm,RealScalar>::Type blueNorm() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const BlueNormReturnType blueNorm() const
{ return BlueNormReturnType(_expression()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression, avoiding
* underflow and overflow.
* This is a vector with real entries, even if the original matrix has complex entries.
*
* \sa DenseBase::stableNorm() */
const typename ReturnType<internal::member_stableNorm,RealScalar>::Type stableNorm() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const StableNormReturnType stableNorm() const
{ return StableNormReturnType(_expression()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression, avoiding
* underflow and overflow using a concatenation of hypot() calls.
* This is a vector with real entries, even if the original matrix has complex entries.
*
* \sa DenseBase::hypotNorm() */
const typename ReturnType<internal::member_hypotNorm,RealScalar>::Type hypotNorm() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const HypotNormReturnType hypotNorm() const
{ return HypotNormReturnType(_expression()); }
/** \returns a row (or column) vector expression of the sum
* of each column (or row) of the referenced expression.
......@@ -327,39 +394,48 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* Output: \verbinclude PartialRedux_sum.out
*
* \sa DenseBase::sum() */
const typename ReturnType<internal::member_sum>::Type sum() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const SumReturnType sum() const
{ return SumReturnType(_expression()); }
/** \returns a row (or column) vector expression of the mean
* of each column (or row) of the referenced expression.
*
* \sa DenseBase::mean() */
const typename ReturnType<internal::member_mean>::Type mean() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const MeanReturnType mean() const
{ return MeanReturnType(_expression()); }
/** \returns a row (or column) vector expression representing
* whether \b all coefficients of each respective column (or row) are \c true.
* This expression can be assigned to a vector with entries of type \c bool.
*
* \sa DenseBase::all() */
const typename ReturnType<internal::member_all>::Type all() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const AllReturnType all() const
{ return AllReturnType(_expression()); }
/** \returns a row (or column) vector expression representing
* whether \b at \b least one coefficient of each respective column (or row) is \c true.
* This expression can be assigned to a vector with entries of type \c bool.
*
* \sa DenseBase::any() */
const typename ReturnType<internal::member_any>::Type any() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const AnyReturnType any() const
{ return AnyReturnType(_expression()); }
/** \returns a row (or column) vector expression representing
* the number of \c true coefficients of each respective column (or row).
* This expression can be assigned to a vector whose entries have the same type as is used to
* index entries of the original matrix; for dense matrices, this is \c std::ptrdiff_t .
*
* Example: \include PartialRedux_count.cpp
* Output: \verbinclude PartialRedux_count.out
*
* \sa DenseBase::count() */
const PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> count() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const CountReturnType count() const
{ return CountReturnType(_expression()); }
/** \returns a row (or column) vector expression of the product
* of each column (or row) of the referenced expression.
......@@ -368,8 +444,9 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* Output: \verbinclude PartialRedux_prod.out
*
* \sa DenseBase::prod() */
const typename ReturnType<internal::member_prod>::Type prod() const
{ return _expression(); }
EIGEN_DEVICE_FUNC
const ProdReturnType prod() const
{ return ProdReturnType(_expression()); }
/** \returns a matrix expression
......@@ -379,10 +456,20 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* Output: \verbinclude Vectorwise_reverse.out
*
* \sa DenseBase::reverse() */
const Reverse<ExpressionType, Direction> reverse() const
{ return Reverse<ExpressionType, Direction>( _expression() ); }
EIGEN_DEVICE_FUNC
const ConstReverseReturnType reverse() const
{ return ConstReverseReturnType( _expression() ); }
/** \returns a writable matrix expression
* where each column (or row) are reversed.
*
* \sa reverse() const */
EIGEN_DEVICE_FUNC
ReverseReturnType reverse()
{ return ReverseReturnType( _expression() ); }
typedef Replicate<ExpressionType,Direction==Vertical?Dynamic:1,Direction==Horizontal?Dynamic:1> ReplicateReturnType;
typedef Replicate<ExpressionType,(isVertical?Dynamic:1),(isHorizontal?Dynamic:1)> ReplicateReturnType;
EIGEN_DEVICE_FUNC
const ReplicateReturnType replicate(Index factor) const;
/**
......@@ -394,17 +481,20 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate
*/
// NOTE implemented here because of sunstudio's compilation errors
template<int Factor> const Replicate<ExpressionType,(IsVertical?Factor:1),(IsHorizontal?Factor:1)>
// isVertical*Factor+isHorizontal instead of (isVertical?Factor:1) to handle CUDA bug with ternary operator
template<int Factor> const Replicate<ExpressionType,isVertical*Factor+isHorizontal,isHorizontal*Factor+isVertical>
EIGEN_DEVICE_FUNC
replicate(Index factor = Factor) const
{
return Replicate<ExpressionType,Direction==Vertical?Factor:1,Direction==Horizontal?Factor:1>
(_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
return Replicate<ExpressionType,(isVertical?Factor:1),(isHorizontal?Factor:1)>
(_expression(),isVertical?factor:1,isHorizontal?factor:1);
}
/////////// Artithmetic operators ///////////
/** Copies the vector \a other to each subvector of \c *this */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
ExpressionType& operator=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
......@@ -415,6 +505,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Adds the vector \a other to each subvector of \c *this */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
ExpressionType& operator+=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
......@@ -424,6 +515,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Substracts the vector \a other to each subvector of \c *this */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
ExpressionType& operator-=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
......@@ -433,6 +525,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Multiples each subvector of \c *this by the vector \a other */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
ExpressionType& operator*=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
......@@ -444,6 +537,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Divides each subvector of \c *this by the vector \a other */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
ExpressionType& operator/=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
......@@ -454,8 +548,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
}
/** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
template<typename OtherDerived> EIGEN_STRONG_INLINE
CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
CwiseBinaryOp<internal::scalar_sum_op<Scalar,typename OtherDerived::Scalar>,
const ExpressionTypeNestedCleaned,
const typename ExtendedType<OtherDerived>::Type>
operator+(const DenseBase<OtherDerived>& other) const
......@@ -467,7 +561,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Returns the expression of the difference between each subvector of \c *this and the vector \a other */
template<typename OtherDerived>
CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
EIGEN_DEVICE_FUNC
CwiseBinaryOp<internal::scalar_difference_op<Scalar,typename OtherDerived::Scalar>,
const ExpressionTypeNestedCleaned,
const typename ExtendedType<OtherDerived>::Type>
operator-(const DenseBase<OtherDerived>& other) const
......@@ -479,10 +574,11 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Returns the expression where each subvector is the product of the vector \a other
* by the corresponding subvector of \c *this */
template<typename OtherDerived> EIGEN_STRONG_INLINE
template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
CwiseBinaryOp<internal::scalar_product_op<Scalar>,
const ExpressionTypeNestedCleaned,
const typename ExtendedType<OtherDerived>::Type>
EIGEN_DEVICE_FUNC
operator*(const DenseBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
......@@ -494,6 +590,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** Returns the expression where each subvector is the quotient of the corresponding
* subvector of \c *this by the vector \a other */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
const ExpressionTypeNestedCleaned,
const typename ExtendedType<OtherDerived>::Type>
......@@ -505,14 +602,35 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
return m_matrix / extendedTo(other.derived());
}
/** \returns an expression where each column (or row) of the referenced matrix are normalized.
* The referenced matrix is \b not modified.
* \sa MatrixBase::normalized(), normalize()
*/
EIGEN_DEVICE_FUNC
CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
const ExpressionTypeNestedCleaned,
const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type>
normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); }
/** Normalize in-place each row or columns of the referenced matrix.
* \sa MatrixBase::normalize(), normalized()
*/
EIGEN_DEVICE_FUNC void normalize() {
m_matrix = this->normalized();
}
EIGEN_DEVICE_FUNC inline void reverseInPlace();
/////////// Geometry module ///////////
#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
Homogeneous<ExpressionType,Direction> homogeneous() const;
#endif
typedef Homogeneous<ExpressionType,Direction> HomogeneousReturnType;
EIGEN_DEVICE_FUNC
HomogeneousReturnType homogeneous() const;
typedef typename ExpressionType::PlainObject CrossReturnType;
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const;
enum {
......@@ -537,25 +655,15 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
Direction==Horizontal ? HNormalized_SizeMinusOne : 1> >
HNormalizedReturnType;
EIGEN_DEVICE_FUNC
const HNormalizedReturnType hnormalized() const;
protected:
ExpressionTypeNested m_matrix;
};
/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
*
* Example: \include MatrixBase_colwise.cpp
* Output: \verbinclude MatrixBase_colwise.out
*
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
template<typename Derived>
inline const typename DenseBase<Derived>::ConstColwiseReturnType
DenseBase<Derived>::colwise() const
{
return derived();
}
//const colwise moved to DenseBase.h due to CUDA compiler bug
/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
*
......@@ -565,22 +673,11 @@ template<typename Derived>
inline typename DenseBase<Derived>::ColwiseReturnType
DenseBase<Derived>::colwise()
{
return derived();
return ColwiseReturnType(derived());
}
/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
*
* Example: \include MatrixBase_rowwise.cpp
* Output: \verbinclude MatrixBase_rowwise.out
*
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
template<typename Derived>
inline const typename DenseBase<Derived>::ConstRowwiseReturnType
DenseBase<Derived>::rowwise() const
{
return derived();
}
//const rowwise moved to DenseBase.h due to CUDA compiler bug
/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
*
......@@ -590,7 +687,7 @@ template<typename Derived>
inline typename DenseBase<Derived>::RowwiseReturnType
DenseBase<Derived>::rowwise()
{
return derived();
return RowwiseReturnType(derived());
}
} // end namespace Eigen
......
......@@ -22,6 +22,7 @@ struct visitor_impl
row = (UnrollCount-1) % Derived::RowsAtCompileTime
};
EIGEN_DEVICE_FUNC
static inline void run(const Derived &mat, Visitor& visitor)
{
visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
......@@ -32,6 +33,7 @@ struct visitor_impl
template<typename Visitor, typename Derived>
struct visitor_impl<Visitor, Derived, 1>
{
EIGEN_DEVICE_FUNC
static inline void run(const Derived &mat, Visitor& visitor)
{
return visitor.init(mat.coeff(0, 0), 0, 0);
......@@ -41,7 +43,7 @@ struct visitor_impl<Visitor, Derived, 1>
template<typename Visitor, typename Derived>
struct visitor_impl<Visitor, Derived, Dynamic>
{
typedef typename Derived::Index Index;
EIGEN_DEVICE_FUNC
static inline void run(const Derived& mat, Visitor& visitor)
{
visitor.init(mat.coeff(0,0), 0, 0);
......@@ -53,6 +55,33 @@ struct visitor_impl<Visitor, Derived, Dynamic>
}
};
// evaluator adaptor
template<typename XprType>
class visitor_evaluator
{
public:
EIGEN_DEVICE_FUNC
explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
RowsAtCompileTime = XprType::RowsAtCompileTime,
CoeffReadCost = internal::evaluator<XprType>::CoeffReadCost
};
EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const
{ return m_evaluator.coeff(row, col); }
protected:
internal::evaluator<XprType> m_evaluator;
const XprType &m_xpr;
};
} // end namespace internal
/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector.
......@@ -74,16 +103,17 @@ struct visitor_impl<Visitor, Derived, Dynamic>
*/
template<typename Derived>
template<typename Visitor>
EIGEN_DEVICE_FUNC
void DenseBase<Derived>::visit(Visitor& visitor) const
{
enum { unroll = SizeAtCompileTime != Dynamic
&& CoeffReadCost != Dynamic
&& (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
&& SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
<= EIGEN_UNROLLING_LIMIT };
return internal::visitor_impl<Visitor, Derived,
unroll ? int(SizeAtCompileTime) : Dynamic
>::run(derived(), visitor);
typedef typename internal::visitor_evaluator<Derived> ThisEvaluator;
ThisEvaluator thisEval(derived());
enum {
unroll = SizeAtCompileTime != Dynamic
&& SizeAtCompileTime * ThisEvaluator::CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost <= EIGEN_UNROLLING_LIMIT
};
return internal::visitor_impl<Visitor, ThisEvaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(thisEval, visitor);
}
namespace internal {
......@@ -94,10 +124,10 @@ namespace internal {
template <typename Derived>
struct coeff_visitor
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
Index row, col;
Scalar res;
EIGEN_DEVICE_FUNC
inline void init(const Scalar& value, Index i, Index j)
{
res = value;
......@@ -114,8 +144,8 @@ struct coeff_visitor
template <typename Derived>
struct min_coeff_visitor : coeff_visitor<Derived>
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
EIGEN_DEVICE_FUNC
void operator() (const Scalar& value, Index i, Index j)
{
if(value < this->res)
......@@ -142,8 +172,8 @@ struct functor_traits<min_coeff_visitor<Scalar> > {
template <typename Derived>
struct max_coeff_visitor : coeff_visitor<Derived>
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Scalar Scalar;
EIGEN_DEVICE_FUNC
void operator() (const Scalar& value, Index i, Index j)
{
if(value > this->res)
......@@ -164,64 +194,70 @@ struct functor_traits<max_coeff_visitor<Scalar> > {
} // end namespace internal
/** \returns the minimum of all coefficients of *this
* and puts in *row and *col its location.
/** \fn DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
* \returns the minimum of all coefficients of *this and puts in *row and *col its location.
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
* \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff()
*/
template<typename Derived>
template<typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(IndexType* row, IndexType* col) const
DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
{
internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*row = minVisitor.row;
if (col) *col = minVisitor.col;
*rowId = minVisitor.row;
if (colId) *colId = minVisitor.col;
return minVisitor.res;
}
/** \returns the minimum of all coefficients of *this
* and puts in *index its location.
/** \returns the minimum of all coefficients of *this and puts in *index its location.
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::minCoeff()
*/
template<typename Derived>
template<typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(IndexType* index) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
*index = IndexType((RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row);
return minVisitor.res;
}
/** \returns the maximum of all coefficients of *this
* and puts in *row and *col its location.
/** \fn DenseBase<Derived>::maxCoeff(IndexType* rowId, IndexType* colId) const
* \returns the maximum of all coefficients of *this and puts in *row and *col its location.
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff()
*/
template<typename Derived>
template<typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(IndexType* row, IndexType* col) const
DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
{
internal::max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
*row = maxVisitor.row;
if (col) *col = maxVisitor.col;
*rowPtr = maxVisitor.row;
if (colPtr) *colPtr = maxVisitor.col;
return maxVisitor.res;
}
/** \returns the maximum of all coefficients of *this
* and puts in *index its location.
/** \returns the maximum of all coefficients of *this and puts in *index its location.
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
template<typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(IndexType* index) const
{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment