"test/vscode:/vscode.git/clone" did not exist on "b34bc7d33d07fce4b62070eeb58c5eee89b2b7a9"
Unverified Commit 4c4419e0 authored by Lucas Beyer's avatar Lucas Beyer Committed by GitHub
Browse files

Merge pull request #108 from STulling/master

Fix Windows MSVC install by updating Eigen Library
parents 4d5343c3 13b115ab
...@@ -13,47 +13,14 @@ ...@@ -13,47 +13,14 @@
namespace Eigen { namespace Eigen {
/** \class Block
* \ingroup Core_Module
*
* \brief Expression of a fixed-size or dynamic-size block
*
* \param XprType the type of the expression in which we are taking a block
* \param BlockRows the number of rows of the block we are taking at compile time (optional)
* \param BlockCols the number of columns of the block we are taking at compile time (optional)
* \param _DirectAccessStatus \internal used for partial specialization
*
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return
* type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
* most of the time this is the only way it is used.
*
* However, if you want to directly maniputate block expressions,
* for instance if you want to write a function returning such an expression, you
* will need to use this class.
*
* Here is an example illustrating the dynamic case:
* \include class_Block.cpp
* Output: \verbinclude class_Block.out
*
* \note Even though this expression has dynamic size, in the case where \a XprType
* has fixed size, this expression inherits a fixed maximal size which means that evaluating
* it does not cause a dynamic memory allocation.
*
* Here is an example illustrating the fixed-size case:
* \include class_FixedBlock.cpp
* Output: \verbinclude class_FixedBlock.out
*
* \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
*/
namespace internal { namespace internal {
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> > : traits<XprType> struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprType>
{ {
typedef typename traits<XprType>::Scalar Scalar; typedef typename traits<XprType>::Scalar Scalar;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::XprKind XprKind; typedef typename traits<XprType>::XprKind XprKind;
typedef typename nested<XprType>::type XprTypeNested; typedef typename ref_selector<XprType>::type XprTypeNested;
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested; typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
enum{ enum{
MatrixRows = traits<XprType>::RowsAtCompileTime, MatrixRows = traits<XprType>::RowsAtCompileTime,
...@@ -66,6 +33,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> ...@@ -66,6 +33,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess>
MaxColsAtCompileTime = BlockCols==0 ? 0 MaxColsAtCompileTime = BlockCols==0 ? 0
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
: int(traits<XprType>::MaxColsAtCompileTime), : int(traits<XprType>::MaxColsAtCompileTime),
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0, XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
...@@ -78,44 +46,76 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> ...@@ -78,44 +46,76 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess>
OuterStrideAtCompileTime = HasSameStorageOrderAsXprType OuterStrideAtCompileTime = HasSameStorageOrderAsXprType
? int(outer_stride_at_compile_time<XprType>::ret) ? int(outer_stride_at_compile_time<XprType>::ret)
: int(inner_stride_at_compile_time<XprType>::ret), : int(inner_stride_at_compile_time<XprType>::ret),
MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
&& (InnerStrideAtCompileTime == 1) // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further
? PacketAccessBit : 0,
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0, FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) | Flags = (traits<XprType>::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit,
DirectAccessBit | // FIXME DirectAccessBit should not be handled by expressions
MaskPacketAccessBit | //
MaskAlignedBit), // Alignment is needed by MapBase's assertions
Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator
Alignment = 0
}; };
}; };
}
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class Block template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
: public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> >::type bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense;
{
public: } // end namespace internal
typedef typename internal::dense_xpr_base<Block>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Block)
class InnerIterator; template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl;
/** \class Block
* \ingroup Core_Module
*
* \brief Expression of a fixed-size or dynamic-size block
*
* \tparam XprType the type of the expression in which we are taking a block
* \tparam BlockRows the number of rows of the block we are taking at compile time (optional)
* \tparam BlockCols the number of columns of the block we are taking at compile time (optional)
* \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or
* to set of columns of a column major matrix (optional). The parameter allows to determine
* at compile time whether aligned access is possible on the block expression.
*
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return
* type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
* most of the time this is the only way it is used.
*
* However, if you want to directly maniputate block expressions,
* for instance if you want to write a function returning such an expression, you
* will need to use this class.
*
* Here is an example illustrating the dynamic case:
* \include class_Block.cpp
* Output: \verbinclude class_Block.out
*
* \note Even though this expression has dynamic size, in the case where \a XprType
* has fixed size, this expression inherits a fixed maximal size which means that evaluating
* it does not cause a dynamic memory allocation.
*
* Here is an example illustrating the fixed-size case:
* \include class_FixedBlock.cpp
* Output: \verbinclude class_FixedBlock.out
*
* \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
*/
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class Block
: public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind>
{
typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl;
public:
//typedef typename Impl::Base Base;
typedef Impl Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
typedef typename internal::remove_all<XprType>::type NestedExpression;
/** Column or Row constructor /** Column or Row constructor
*/ */
inline Block(XprType& xpr, Index i) EIGEN_DEVICE_FUNC
: m_xpr(xpr), inline Block(XprType& xpr, Index i) : Impl(xpr,i)
// It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
// and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
// all other cases are invalid.
// The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{ {
eigen_assert( (i>=0) && ( eigen_assert( (i>=0) && (
((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows()) ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
...@@ -124,86 +124,151 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H ...@@ -124,86 +124,151 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
/** Fixed-size constructor /** Fixed-size constructor
*/ */
EIGEN_DEVICE_FUNC
inline Block(XprType& xpr, Index startRow, Index startCol) inline Block(XprType& xpr, Index startRow, Index startCol)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), : Impl(xpr, startRow, startCol)
m_blockRows(BlockRows), m_blockCols(BlockCols)
{ {
EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() eigen_assert(startRow >= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows()
&& startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols()); && startCol >= 0 && BlockCols >= 0 && startCol + BlockCols <= xpr.cols());
} }
/** Dynamic-size constructor /** Dynamic-size constructor
*/ */
EIGEN_DEVICE_FUNC
inline Block(XprType& xpr, inline Block(XprType& xpr,
Index startRow, Index startCol, Index startRow, Index startCol,
Index blockRows, Index blockCols) Index blockRows, Index blockCols)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), : Impl(xpr, startRow, startCol, blockRows, blockCols)
m_blockRows(blockRows), m_blockCols(blockCols)
{ {
eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
&& (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows
&& startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols()); && startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols);
} }
};
// The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense
// that must be specialized for direct and non-direct access...
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
: public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel>
{
typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;
typedef typename XprType::StorageIndex StorageIndex;
public:
typedef Impl Base;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {}
EIGEN_DEVICE_FUNC
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Impl(xpr, startRow, startCol, blockRows, blockCols) {}
};
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) namespace internal {
/** \internal Internal implementation of dense Blocks in the general case. */
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class BlockImpl_dense
: public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel> >::type
{
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;
public:
typedef typename internal::dense_xpr_base<BlockType>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
inline Index rows() const { return m_blockRows.value(); } // class InnerIterator; // FIXME apparently never used
inline Index cols() const { return m_blockCols.value(); }
inline Scalar& coeffRef(Index row, Index col) /** Column or Row constructor
*/
EIGEN_DEVICE_FUNC
inline BlockImpl_dense(XprType& xpr, Index i)
: m_xpr(xpr),
// It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
// and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
// all other cases are invalid.
// The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
m_blockRows(BlockRows), m_blockCols(BlockCols)
{}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline BlockImpl_dense(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
m_blockRows(blockRows), m_blockCols(blockCols)
{}
EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); }
EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index rowId, Index colId)
{ {
EIGEN_STATIC_ASSERT_LVALUE(XprType) EIGEN_STATIC_ASSERT_LVALUE(XprType)
return m_xpr.const_cast_derived() return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
.coeffRef(row + m_startRow.value(), col + m_startCol.value());
} }
inline const Scalar& coeffRef(Index row, Index col) const EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index rowId, Index colId) const
{ {
return m_xpr.derived() return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
.coeffRef(row + m_startRow.value(), col + m_startCol.value());
} }
EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const
{ {
return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value()); return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());
} }
EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index index) inline Scalar& coeffRef(Index index)
{ {
EIGEN_STATIC_ASSERT_LVALUE(XprType) EIGEN_STATIC_ASSERT_LVALUE(XprType)
return m_xpr.const_cast_derived() return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
} }
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index index) const inline const Scalar& coeffRef(Index index) const
{ {
return m_xpr.const_cast_derived() return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
} }
EIGEN_DEVICE_FUNC
inline const CoeffReturnType coeff(Index index) const inline const CoeffReturnType coeff(Index index) const
{ {
return m_xpr return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
} }
template<int LoadMode> template<int LoadMode>
inline PacketScalar packet(Index row, Index col) const inline PacketScalar packet(Index rowId, Index colId) const
{ {
return m_xpr.template packet<Unaligned> return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value());
(row + m_startRow.value(), col + m_startCol.value());
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(Index row, Index col, const PacketScalar& x) inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{ {
m_xpr.const_cast_derived().template writePacket<Unaligned> m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val);
(row + m_startRow.value(), col + m_startCol.value(), x);
} }
template<int LoadMode> template<int LoadMode>
...@@ -215,116 +280,140 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H ...@@ -215,116 +280,140 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(Index index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& val)
{ {
m_xpr.const_cast_derived().template writePacket<Unaligned> m_xpr.template writePacket<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x); m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);
} }
#ifdef EIGEN_PARSED_BY_DOXYGEN #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \sa MapBase::data() */ /** \sa MapBase::data() */
inline const Scalar* data() const; EIGEN_DEVICE_FUNC inline const Scalar* data() const;
inline Index innerStride() const; EIGEN_DEVICE_FUNC inline Index innerStride() const;
inline Index outerStride() const; EIGEN_DEVICE_FUNC inline Index outerStride() const;
#endif #endif
const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const EIGEN_DEVICE_FUNC
const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
{ {
return m_xpr; return m_xpr;
} }
EIGEN_DEVICE_FUNC
XprType& nestedExpression() { return m_xpr; }
Index startRow() const EIGEN_DEVICE_FUNC
StorageIndex startRow() const
{ {
return m_startRow.value(); return m_startRow.value();
} }
Index startCol() const EIGEN_DEVICE_FUNC
StorageIndex startCol() const
{ {
return m_startCol.value(); return m_startCol.value();
} }
protected: protected:
const typename XprType::Nested m_xpr; XprTypeNested m_xpr;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow; const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol; const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows; const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols; const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols;
}; };
/** \internal */ /** \internal Internal implementation of dense Blocks in the direct access case.*/
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class Block<XprType,BlockRows,BlockCols, InnerPanel,true> class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
: public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel, true> > : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> >
{ {
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;
enum {
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0
};
public: public:
typedef MapBase<Block> Base; typedef MapBase<BlockType> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Block) EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
/** Column or Row constructor /** Column or Row constructor
*/ */
inline Block(XprType& xpr, Index i) EIGEN_DEVICE_FUNC
: Base(internal::const_cast_ptr(&xpr.coeffRef( inline BlockImpl_dense(XprType& xpr, Index i)
(BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0, : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor))
(BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)), || ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()),
BlockRows==1 ? 1 : xpr.rows(), BlockRows==1 ? 1 : xpr.rows(),
BlockCols==1 ? 1 : xpr.cols()), BlockCols==1 ? 1 : xpr.cols()),
m_xpr(xpr) m_xpr(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)
{ {
eigen_assert( (i>=0) && (
((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
init(); init();
} }
/** Fixed-size constructor /** Fixed-size constructor
*/ */
inline Block(XprType& xpr, Index startRow, Index startCol) EIGEN_DEVICE_FUNC
: Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr) inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)),
m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)
{ {
eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
&& startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
init(); init();
} }
/** Dynamic-size constructor /** Dynamic-size constructor
*/ */
inline Block(XprType& xpr, EIGEN_DEVICE_FUNC
inline BlockImpl_dense(XprType& xpr,
Index startRow, Index startCol, Index startRow, Index startCol,
Index blockRows, Index blockCols) Index blockRows, Index blockCols)
: Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols), : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols),
m_xpr(xpr) m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)
{ {
eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
&& (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
&& startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
init(); init();
} }
const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const EIGEN_DEVICE_FUNC
const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
{ {
return m_xpr; return m_xpr;
} }
EIGEN_DEVICE_FUNC
XprType& nestedExpression() { return m_xpr; }
/** \sa MapBase::innerStride() */ /** \sa MapBase::innerStride() */
EIGEN_DEVICE_FUNC
inline Index innerStride() const inline Index innerStride() const
{ {
return internal::traits<Block>::HasSameStorageOrderAsXprType return internal::traits<BlockType>::HasSameStorageOrderAsXprType
? m_xpr.innerStride() ? m_xpr.innerStride()
: m_xpr.outerStride(); : m_xpr.outerStride();
} }
/** \sa MapBase::outerStride() */ /** \sa MapBase::outerStride() */
EIGEN_DEVICE_FUNC
inline Index outerStride() const inline Index outerStride() const
{ {
return m_outerStride; return m_outerStride;
} }
EIGEN_DEVICE_FUNC
StorageIndex startRow() const
{
return m_startRow.value();
}
EIGEN_DEVICE_FUNC
StorageIndex startCol() const
{
return m_startCol.value();
}
#ifndef __SUNPRO_CC #ifndef __SUNPRO_CC
// FIXME sunstudio is not friendly with the above friend... // FIXME sunstudio is not friendly with the above friend...
// META-FIXME there is no 'friend' keyword around here. Is this obsolete? // META-FIXME there is no 'friend' keyword around here. Is this obsolete?
...@@ -333,7 +422,8 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true> ...@@ -333,7 +422,8 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */ /** \internal used by allowAligned() */
inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) EIGEN_DEVICE_FUNC
inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
: Base(data, blockRows, blockCols), m_xpr(xpr) : Base(data, blockRows, blockCols), m_xpr(xpr)
{ {
init(); init();
...@@ -341,17 +431,22 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true> ...@@ -341,17 +431,22 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
#endif #endif
protected: protected:
EIGEN_DEVICE_FUNC
void init() void init()
{ {
m_outerStride = internal::traits<Block>::HasSameStorageOrderAsXprType m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType
? m_xpr.outerStride() ? m_xpr.outerStride()
: m_xpr.innerStride(); : m_xpr.innerStride();
} }
typename XprType::Nested m_xpr; XprTypeNested m_xpr;
const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
Index m_outerStride; Index m_outerStride;
}; };
} // end namespace internal
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_BLOCK_H #endif // EIGEN_BLOCK_H
...@@ -17,9 +17,10 @@ namespace internal { ...@@ -17,9 +17,10 @@ namespace internal {
template<typename Derived, int UnrollCount> template<typename Derived, int UnrollCount>
struct all_unroller struct all_unroller
{ {
typedef typename Derived::ExpressionTraits Traits;
enum { enum {
col = (UnrollCount-1) / Derived::RowsAtCompileTime, col = (UnrollCount-1) / Traits::RowsAtCompileTime,
row = (UnrollCount-1) % Derived::RowsAtCompileTime row = (UnrollCount-1) % Traits::RowsAtCompileTime
}; };
static inline bool run(const Derived &mat) static inline bool run(const Derived &mat)
...@@ -29,9 +30,9 @@ struct all_unroller ...@@ -29,9 +30,9 @@ struct all_unroller
}; };
template<typename Derived> template<typename Derived>
struct all_unroller<Derived, 1> struct all_unroller<Derived, 0>
{ {
static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } static inline bool run(const Derived &/*mat*/) { return true; }
}; };
template<typename Derived> template<typename Derived>
...@@ -43,11 +44,12 @@ struct all_unroller<Derived, Dynamic> ...@@ -43,11 +44,12 @@ struct all_unroller<Derived, Dynamic>
template<typename Derived, int UnrollCount> template<typename Derived, int UnrollCount>
struct any_unroller struct any_unroller
{ {
typedef typename Derived::ExpressionTraits Traits;
enum { enum {
col = (UnrollCount-1) / Derived::RowsAtCompileTime, col = (UnrollCount-1) / Traits::RowsAtCompileTime,
row = (UnrollCount-1) % Derived::RowsAtCompileTime row = (UnrollCount-1) % Traits::RowsAtCompileTime
}; };
static inline bool run(const Derived &mat) static inline bool run(const Derived &mat)
{ {
return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col); return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col);
...@@ -55,9 +57,9 @@ struct any_unroller ...@@ -55,9 +57,9 @@ struct any_unroller
}; };
template<typename Derived> template<typename Derived>
struct any_unroller<Derived, 1> struct any_unroller<Derived, 0>
{ {
static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } static inline bool run(const Derived & /*mat*/) { return false; }
}; };
template<typename Derived> template<typename Derived>
...@@ -78,21 +80,19 @@ struct any_unroller<Derived, Dynamic> ...@@ -78,21 +80,19 @@ struct any_unroller<Derived, Dynamic>
template<typename Derived> template<typename Derived>
inline bool DenseBase<Derived>::all() const inline bool DenseBase<Derived>::all() const
{ {
typedef internal::evaluator<Derived> Evaluator;
enum { enum {
unroll = SizeAtCompileTime != Dynamic unroll = SizeAtCompileTime != Dynamic
&& CoeffReadCost != Dynamic && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
&& NumTraits<Scalar>::AddCost != Dynamic
&& SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
}; };
Evaluator evaluator(derived());
if(unroll) if(unroll)
return internal::all_unroller<Derived, return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator);
unroll ? int(SizeAtCompileTime) : Dynamic
>::run(derived());
else else
{ {
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if (!coeff(i, j)) return false; if (!evaluator.coeff(i, j)) return false;
return true; return true;
} }
} }
...@@ -104,21 +104,19 @@ inline bool DenseBase<Derived>::all() const ...@@ -104,21 +104,19 @@ inline bool DenseBase<Derived>::all() const
template<typename Derived> template<typename Derived>
inline bool DenseBase<Derived>::any() const inline bool DenseBase<Derived>::any() const
{ {
typedef internal::evaluator<Derived> Evaluator;
enum { enum {
unroll = SizeAtCompileTime != Dynamic unroll = SizeAtCompileTime != Dynamic
&& CoeffReadCost != Dynamic && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
&& NumTraits<Scalar>::AddCost != Dynamic
&& SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
}; };
Evaluator evaluator(derived());
if(unroll) if(unroll)
return internal::any_unroller<Derived, return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator);
unroll ? int(SizeAtCompileTime) : Dynamic
>::run(derived());
else else
{ {
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if (coeff(i, j)) return true; if (evaluator.coeff(i, j)) return true;
return false; return false;
} }
} }
...@@ -128,11 +126,39 @@ inline bool DenseBase<Derived>::any() const ...@@ -128,11 +126,39 @@ inline bool DenseBase<Derived>::any() const
* \sa all(), any() * \sa all(), any()
*/ */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const inline Eigen::Index DenseBase<Derived>::count() const
{ {
return derived().template cast<bool>().template cast<Index>().sum(); return derived().template cast<bool>().template cast<Index>().sum();
} }
/** \returns true is \c *this contains at least one Not A Number (NaN).
*
* \sa allFinite()
*/
template<typename Derived>
inline bool DenseBase<Derived>::hasNaN() const
{
#if EIGEN_COMP_MSVC || (defined __FAST_MATH__)
return derived().array().isNaN().any();
#else
return !((derived().array()==derived().array()).all());
#endif
}
/** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values.
*
* \sa hasNaN()
*/
template<typename Derived>
inline bool DenseBase<Derived>::allFinite() const
{
#if EIGEN_COMP_MSVC || (defined __FAST_MATH__)
return derived().array().isFinite().all();
#else
return !((derived()-derived()).hasNaN());
#endif
}
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_ALLANDANY_H #endif // EIGEN_ALLANDANY_H
...@@ -22,14 +22,14 @@ namespace Eigen { ...@@ -22,14 +22,14 @@ namespace Eigen {
* the return type of MatrixBase::operator<<, and most of the time this is the only * the return type of MatrixBase::operator<<, and most of the time this is the only
* way it is used. * way it is used.
* *
* \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
*/ */
template<typename XprType> template<typename XprType>
struct CommaInitializer struct CommaInitializer
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename XprType::Index Index;
EIGEN_DEVICE_FUNC
inline CommaInitializer(XprType& xpr, const Scalar& s) inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{ {
...@@ -37,13 +37,27 @@ struct CommaInitializer ...@@ -37,13 +37,27 @@ struct CommaInitializer
} }
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other) inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
{ {
m_xpr.block(0, 0, other.rows(), other.cols()) = other; m_xpr.block(0, 0, other.rows(), other.cols()) = other;
} }
/* Copy/Move constructor which transfers ownership. This is crucial in
* absence of return value optimization to avoid assertions during destruction. */
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
EIGEN_DEVICE_FUNC
inline CommaInitializer(const CommaInitializer& o)
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
// Mark original object as finished. In absence of R-value references we need to const_cast:
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
}
/* inserts a scalar value in the target matrix */ /* inserts a scalar value in the target matrix */
EIGEN_DEVICE_FUNC
CommaInitializer& operator,(const Scalar& s) CommaInitializer& operator,(const Scalar& s)
{ {
if (m_col==m_xpr.cols()) if (m_col==m_xpr.cols())
...@@ -63,11 +77,10 @@ struct CommaInitializer ...@@ -63,11 +77,10 @@ struct CommaInitializer
/* inserts a matrix expression in the target matrix */ /* inserts a matrix expression in the target matrix */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
CommaInitializer& operator,(const DenseBase<OtherDerived>& other) CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{ {
if(other.cols()==0 || other.rows()==0) if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows))
return *this;
if (m_col==m_xpr.cols())
{ {
m_row+=m_currentBlockRows; m_row+=m_currentBlockRows;
m_col = 0; m_col = 0;
...@@ -75,24 +88,22 @@ struct CommaInitializer ...@@ -75,24 +88,22 @@ struct CommaInitializer
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)"); && "Too many rows passed to comma initializer (operator<<)");
} }
eigen_assert(m_col<m_xpr.cols() eigen_assert((m_col + other.cols() <= m_xpr.cols())
&& "Too many coefficients passed to comma initializer (operator<<)"); && "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==other.rows()); eigen_assert(m_currentBlockRows==other.rows());
if (OtherDerived::SizeAtCompileTime != Dynamic) m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>
m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1, (m_row, m_col, other.rows(), other.cols()) = other;
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
(m_row, m_col) = other;
else
m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
m_col += other.cols(); m_col += other.cols();
return *this; return *this;
} }
EIGEN_DEVICE_FUNC
inline ~CommaInitializer() inline ~CommaInitializer()
#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS
EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception)
#endif
{ {
eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows() finished();
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
} }
/** \returns the built matrix once all its coefficients have been set. /** \returns the built matrix once all its coefficients have been set.
...@@ -102,9 +113,15 @@ struct CommaInitializer ...@@ -102,9 +113,15 @@ struct CommaInitializer
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode * \endcode
*/ */
inline XprType& finished() { return m_xpr; } EIGEN_DEVICE_FUNC
inline XprType& finished() {
eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0)
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
return m_xpr;
}
XprType& m_xpr; // target expression XprType& m_xpr; // target expression
Index m_row; // current row id Index m_row; // current row id
Index m_col; // current col id Index m_col; // current col id
Index m_currentBlockRows; // current block height Index m_currentBlockRows; // current block height
...@@ -118,6 +135,8 @@ struct CommaInitializer ...@@ -118,6 +135,8 @@ struct CommaInitializer
* *
* Example: \include MatrixBase_set.cpp * Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out * Output: \verbinclude MatrixBase_set.out
*
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
* *
* \sa CommaInitializer::finished(), class CommaInitializer * \sa CommaInitializer::finished(), class CommaInitializer
*/ */
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONDITIONESTIMATOR_H
#define EIGEN_CONDITIONESTIMATOR_H
namespace Eigen {
namespace internal {
template <typename Vector, typename RealVector, bool IsComplex>
struct rcond_compute_sign {
static inline Vector run(const Vector& v) {
const RealVector v_abs = v.cwiseAbs();
return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
.select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
}
};
// Partial specialization to avoid elementwise division for real vectors.
template <typename Vector>
struct rcond_compute_sign<Vector, Vector, false> {
static inline Vector run(const Vector& v) {
return (v.array() < static_cast<typename Vector::RealScalar>(0))
.select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
}
};
/**
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
* \a matrix that implements .solve() and .adjoint().solve() methods.
*
* This function implements Algorithms 4.1 and 5.1 from
* http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
* which also forms the basis for the condition number estimators in
* LAPACK. Since at most 10 calls to the solve method of dec are
* performed, the total cost is O(dims^2), as opposed to O(dims^3)
* needed to compute the inverse matrix explicitly.
*
* The most common usage is in estimating the condition number
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
* computed directly in O(n^2) operations.
*
* Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
* LLT.
*
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
*/
template <typename Decomposition>
typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec)
{
typedef typename Decomposition::MatrixType MatrixType;
typedef typename Decomposition::Scalar Scalar;
typedef typename Decomposition::RealScalar RealScalar;
typedef typename internal::plain_col_type<MatrixType>::type Vector;
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
eigen_assert(dec.rows() == dec.cols());
const Index n = dec.rows();
if (n == 0)
return 0;
// Disable Index to float conversion warning
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning ( disable : 2259 )
#endif
Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
// lower_bound is a lower bound on
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
// and is the objective maximized by the ("super-") gradient ascent
// algorithm below.
RealScalar lower_bound = v.template lpNorm<1>();
if (n == 1)
return lower_bound;
// Gradient ascent algorithm follows: We know that the optimum is achieved at
// one of the simplices v = e_i, so in each iteration we follow a
// super-gradient to move towards the optimal one.
RealScalar old_lower_bound = lower_bound;
Vector sign_vector(n);
Vector old_sign_vector;
Index v_max_abs_index = -1;
Index old_v_max_abs_index = v_max_abs_index;
for (int k = 0; k < 4; ++k)
{
sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);
if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
// Break if the solution stagnated.
break;
}
// v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
v = dec.adjoint().solve(sign_vector);
v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
if (v_max_abs_index == old_v_max_abs_index) {
// Break if the solution stagnated.
break;
}
// Move to the new simplex e_j, where j = v_max_abs_index.
v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
lower_bound = v.template lpNorm<1>();
if (lower_bound <= old_lower_bound) {
// Break if the gradient step did not increase the lower_bound.
break;
}
if (!is_complex) {
old_sign_vector = sign_vector;
}
old_v_max_abs_index = v_max_abs_index;
old_lower_bound = lower_bound;
}
// The following calculates an independent estimate of ||matrix||_1 by
// multiplying matrix by a vector with entries of slowly increasing
// magnitude and alternating sign:
// v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
// This improvement to Hager's algorithm above is due to Higham. It was
// added to make the algorithm more robust in certain corner cases where
// large elements in the matrix might otherwise escape detection due to
// exact cancellation (especially when op and op_adjoint correspond to a
// sequence of backsubstitutions and permutations), which could cause
// Hager's algorithm to vastly underestimate ||matrix||_1.
Scalar alternating_sign(RealScalar(1));
for (Index i = 0; i < n; ++i) {
// The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
alternating_sign = -alternating_sign;
}
v = dec.solve(v);
const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
return numext::maxi(lower_bound, alternate_lower_bound);
}
/** \brief Reciprocal condition number estimator.
*
* Computing a decomposition of a dense matrix takes O(n^3) operations, while
* this method estimates the condition number quickly and reliably in O(n^2)
* operations.
*
* \returns an estimate of the reciprocal condition number
* (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
* its decomposition. Supports the following decompositions: FullPivLU,
* PartialPivLU, LDLT, and LLT.
*
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
*/
template <typename Decomposition>
typename Decomposition::RealScalar
rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec)
{
typedef typename Decomposition::RealScalar RealScalar;
eigen_assert(dec.rows() == dec.cols());
if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
if (matrix_norm == RealScalar(0)) return RealScalar(0);
if (dec.rows() == 1) return RealScalar(1);
const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0)
: (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
}
} // namespace internal
} // namespace Eigen
#endif
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COREEVALUATORS_H
#define EIGEN_COREEVALUATORS_H
namespace Eigen {
namespace internal {
// This class returns the evaluator kind from the expression storage kind.
// Default assumes index based accessors
template<typename StorageKind>
struct storage_kind_to_evaluator_kind {
typedef IndexBased Kind;
};
// This class returns the evaluator shape from the expression storage kind.
// It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
template<typename StorageKind> struct storage_kind_to_shape;
template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; };
template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; };
template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; };
template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; };
// Evaluators have to be specialized with respect to various criteria such as:
// - storage/structure/shape
// - scalar type
// - etc.
// Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
// We currently distinguish the following kind of evaluators:
// - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
// - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
// - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
// - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
// - mapbase_evaluator for Map, Block, Ref
// - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
template< typename T,
typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind,
typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind,
typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind,
typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
template< typename T,
typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
template< typename T,
typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind,
typename Scalar = typename T::Scalar> struct unary_evaluator;
// evaluator_traits<T> contains traits for evaluator<T>
template<typename T>
struct evaluator_traits_base
{
// by default, get evaluator kind and shape from storage
typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;
typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
};
// Default evaluator traits
template<typename T>
struct evaluator_traits : public evaluator_traits_base<T>
{
};
template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
struct evaluator_assume_aliasing {
static const bool value = false;
};
// By default, we assume a unary expression:
template<typename T>
struct evaluator : public unary_evaluator<T>
{
typedef unary_evaluator<T> Base;
EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {}
};
// TODO: Think about const-correctness
template<typename T>
struct evaluator<const T>
: evaluator<T>
{
EIGEN_DEVICE_FUNC
explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
};
// ---------- base class for all evaluators ----------
template<typename ExpressionType>
struct evaluator_base : public noncopyable
{
// TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
typedef traits<ExpressionType> ExpressionTraits;
enum {
Alignment = 0
};
};
// -------------------- Matrix and Array --------------------
//
// evaluator<PlainObjectBase> is a common base class for the
// Matrix and Array evaluators.
// Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
// so no need for more sophisticated dispatching.
template<typename Derived>
struct evaluator<PlainObjectBase<Derived> >
: evaluator_base<Derived>
{
typedef PlainObjectBase<Derived> PlainObjectType;
typedef typename PlainObjectType::Scalar Scalar;
typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
enum {
IsRowMajor = PlainObjectType::IsRowMajor,
IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
Flags = traits<Derived>::EvaluatorFlags,
Alignment = traits<Derived>::Alignment
};
EIGEN_DEVICE_FUNC evaluator()
: m_data(0),
m_outerStride(IsVectorAtCompileTime ? 0
: int(IsRowMajor) ? ColsAtCompileTime
: RowsAtCompileTime)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m)
: m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
if (IsRowMajor)
return m_data[row * m_outerStride.value() + col];
else
return m_data[row + col * m_outerStride.value()];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_data[index];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
if (IsRowMajor)
return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col];
else
return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
return const_cast<Scalar*>(m_data)[index];
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
if (IsRowMajor)
return ploadt<PacketType, LoadMode>(m_data + row * m_outerStride.value() + col);
else
return ploadt<PacketType, LoadMode>(m_data + row + col * m_outerStride.value());
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return ploadt<PacketType, LoadMode>(m_data + index);
}
template<int StoreMode,typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
{
if (IsRowMajor)
return pstoret<Scalar, PacketType, StoreMode>
(const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x);
else
return pstoret<Scalar, PacketType, StoreMode>
(const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_data) + index, x);
}
protected:
const Scalar *m_data;
// We do not need to know the outer stride for vectors
variable_if_dynamic<Index, IsVectorAtCompileTime ? 0
: int(IsRowMajor) ? ColsAtCompileTime
: RowsAtCompileTime> m_outerStride;
};
template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
: evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
{
typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
EIGEN_DEVICE_FUNC evaluator() {}
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
: evaluator<PlainObjectBase<XprType> >(m)
{ }
};
template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
: evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
{
typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
EIGEN_DEVICE_FUNC evaluator() {}
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
: evaluator<PlainObjectBase<XprType> >(m)
{ }
};
// -------------------- Transpose --------------------
template<typename ArgType>
struct unary_evaluator<Transpose<ArgType>, IndexBased>
: evaluator_base<Transpose<ArgType> >
{
typedef Transpose<ArgType> XprType;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
Alignment = evaluator<ArgType>::Alignment
};
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_argImpl.coeff(col, row);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_argImpl.coeff(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
return m_argImpl.coeffRef(col, row);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename XprType::Scalar& coeffRef(Index index)
{
return m_argImpl.coeffRef(index);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
return m_argImpl.template packet<LoadMode,PacketType>(col, row);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return m_argImpl.template packet<LoadMode,PacketType>(index);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
{
m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
}
protected:
evaluator<ArgType> m_argImpl;
};
// -------------------- CwiseNullaryOp --------------------
// Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
// Likewise, there is not need to more sophisticated dispatching here.
template<typename Scalar,typename NullaryOp,
bool has_nullary = has_nullary_operator<NullaryOp>::value,
bool has_unary = has_unary_operator<NullaryOp>::value,
bool has_binary = has_binary_operator<NullaryOp>::value>
struct nullary_wrapper
{
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
};
template<typename Scalar,typename NullaryOp>
struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
{
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
};
template<typename Scalar,typename NullaryOp>
struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
{
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
};
// We need the following specialization for vector-only functors assigned to a runtime vector,
// for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
// In this case, i==0 and j is used for the actual iteration.
template<typename Scalar,typename NullaryOp>
struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
{
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
eigen_assert(i==0 || j==0);
return op(i+j);
}
template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
eigen_assert(i==0 || j==0);
return op.template packetOp<T>(i+j);
}
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
template <typename T, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
};
template<typename Scalar,typename NullaryOp>
struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
#if 0 && EIGEN_COMP_MSVC>0
// Disable this ugly workaround. This is now handled in traits<Ref>::match,
// but this piece of code might still become handly if some other weird compilation
// erros pop up again.
// MSVC exhibits a weird compilation error when
// compiling:
// Eigen::MatrixXf A = MatrixXf::Random(3,3);
// Ref<const MatrixXf> R = 2.f*A;
// and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
// The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
// and at that time has_*ary_operator<T> returns true regardless of T.
// Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
// The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
// and packet() are really instantiated as implemented below:
// This is a simple wrapper around Index to enforce the re-instantiation of
// has_*ary_operator when needed.
template<typename T> struct nullary_wrapper_workaround_msvc {
nullary_wrapper_workaround_msvc(const T&);
operator T()const;
};
template<typename Scalar,typename NullaryOp>
struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
{
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
}
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
}
template <typename T, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
}
template <typename T, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
}
};
#endif // MSVC workaround
template<typename NullaryOp, typename PlainObjectType>
struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
: evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
{
typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;
enum {
CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
Flags = (evaluator<PlainObjectTypeCleaned>::Flags
& ( HereditaryBits
| (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
| (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
| (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
Alignment = AlignedMax
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
: m_functor(n.functor()), m_wrapper()
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(IndexType row, IndexType col) const
{
return m_wrapper(m_functor, row, col);
}
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(IndexType index) const
{
return m_wrapper(m_functor,index);
}
template<int LoadMode, typename PacketType, typename IndexType>
EIGEN_STRONG_INLINE
PacketType packet(IndexType row, IndexType col) const
{
return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
}
template<int LoadMode, typename PacketType, typename IndexType>
EIGEN_STRONG_INLINE
PacketType packet(IndexType index) const
{
return m_wrapper.template packetOp<PacketType>(m_functor, index);
}
protected:
const NullaryOp m_functor;
const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
};
// -------------------- CwiseUnaryOp --------------------
template<typename UnaryOp, typename ArgType>
struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
: evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
{
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
Flags = evaluator<ArgType>::Flags
& (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
Alignment = evaluator<ArgType>::Alignment
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit unary_evaluator(const XprType& op)
: m_functor(op.functor()),
m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_functor(m_argImpl.coeff(row, col));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_functor(m_argImpl.coeff(index));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(row, col));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(index));
}
protected:
const UnaryOp m_functor;
evaluator<ArgType> m_argImpl;
};
// -------------------- CwiseTernaryOp --------------------
// this is a ternary expression
template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
: public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
{
typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
: evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
{
typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
enum {
CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost,
Arg1Flags = evaluator<Arg1>::Flags,
Arg2Flags = evaluator<Arg2>::Flags,
Arg3Flags = evaluator<Arg3>::Flags,
SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,
StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
HereditaryBits
| (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
( (StorageOrdersAgree ? LinearAccessBit : 0)
| (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
)
)
),
Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
Alignment = EIGEN_PLAIN_ENUM_MIN(
EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
evaluator<Arg3>::Alignment)
};
EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_arg1Impl(xpr.arg1()),
m_arg2Impl(xpr.arg2()),
m_arg3Impl(xpr.arg3())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(row, col),
m_arg2Impl.template packet<LoadMode,PacketType>(row, col),
m_arg3Impl.template packet<LoadMode,PacketType>(row, col));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(index),
m_arg2Impl.template packet<LoadMode,PacketType>(index),
m_arg3Impl.template packet<LoadMode,PacketType>(index));
}
protected:
const TernaryOp m_functor;
evaluator<Arg1> m_arg1Impl;
evaluator<Arg2> m_arg2Impl;
evaluator<Arg3> m_arg3Impl;
};
// -------------------- CwiseBinaryOp --------------------
// this is a binary expression
template<typename BinaryOp, typename Lhs, typename Rhs>
struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
: public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
LhsFlags = evaluator<Lhs>::Flags,
RhsFlags = evaluator<Rhs>::Flags,
SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
HereditaryBits
| (int(LhsFlags) & int(RhsFlags) &
( (StorageOrdersAgree ? LinearAccessBit : 0)
| (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
)
)
),
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
};
EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(row, col),
m_rhsImpl.template packet<LoadMode,PacketType>(row, col));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(index),
m_rhsImpl.template packet<LoadMode,PacketType>(index));
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
};
// -------------------- CwiseUnaryView --------------------
template<typename UnaryOp, typename ArgType>
struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
: evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
{
typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
};
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op)
: m_unaryOp(op.functor()),
m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_unaryOp(m_argImpl.coeff(row, col));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_unaryOp(m_argImpl.coeff(index));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
return m_unaryOp(m_argImpl.coeffRef(row, col));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
return m_unaryOp(m_argImpl.coeffRef(index));
}
protected:
const UnaryOp m_unaryOp;
evaluator<ArgType> m_argImpl;
};
// -------------------- Map --------------------
// FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
// but that might complicate template specialization
template<typename Derived, typename PlainObjectType>
struct mapbase_evaluator;
template<typename Derived, typename PlainObjectType>
struct mapbase_evaluator : evaluator_base<Derived>
{
typedef Derived XprType;
typedef typename XprType::PointerType PointerType;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
IsRowMajor = XprType::RowsAtCompileTime,
ColsAtCompileTime = XprType::ColsAtCompileTime,
CoeffReadCost = NumTraits<Scalar>::ReadCost
};
EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map)
: m_data(const_cast<PointerType>(map.data())),
m_innerStride(map.innerStride()),
m_outerStride(map.outerStride())
{
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_data[col * colStride() + row * rowStride()];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_data[index * m_innerStride.value()];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
return m_data[col * colStride() + row * rowStride()];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
return m_data[index * m_innerStride.value()];
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
PointerType ptr = m_data + row * rowStride() + col * colStride();
return internal::ploadt<PacketType, LoadMode>(ptr);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
{
PointerType ptr = m_data + row * rowStride() + col * colStride();
return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
}
protected:
EIGEN_DEVICE_FUNC
inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }
EIGEN_DEVICE_FUNC
inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }
PointerType m_data;
const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
};
template<typename PlainObjectType, int MapOptions, typename StrideType>
struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
: public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
{
typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
typedef typename XprType::Scalar Scalar;
// TODO: should check for smaller packet types once we can handle multi-sized packet types
typedef typename packet_traits<Scalar>::type PacketScalar;
enum {
InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
? int(PlainObjectType::InnerStrideAtCompileTime)
: int(StrideType::InnerStrideAtCompileTime),
OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
? int(PlainObjectType::OuterStrideAtCompileTime)
: int(StrideType::OuterStrideAtCompileTime),
HasNoInnerStride = InnerStrideAtCompileTime == 1,
HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
HasNoStride = HasNoInnerStride && HasNoOuterStride,
IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
Alignment = int(MapOptions)&int(AlignedMask)
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
: mapbase_evaluator<XprType, PlainObjectType>(map)
{ }
};
// -------------------- Ref --------------------
template<typename PlainObjectType, int RefOptions, typename StrideType>
struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
: public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
{
typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
enum {
Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref)
: mapbase_evaluator<XprType, PlainObjectType>(ref)
{ }
};
// -------------------- Block --------------------
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
: block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
typedef typename XprType::Scalar Scalar;
// TODO: should check for smaller packet types once we can handle multi-sized packet types
typedef typename packet_traits<Scalar>::type PacketScalar;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
: (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
: ArgTypeIsRowMajor,
HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
? int(inner_stride_at_compile_time<ArgType>::ret)
: int(outer_stride_at_compile_time<ArgType>::ret),
OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
? int(outer_stride_at_compile_time<ArgType>::ret)
: int(inner_stride_at_compile_time<ArgType>::ret),
MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
FlagsRowMajorBit = XprType::Flags&RowMajorBit,
Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
DirectAccessBit |
MaskPacketAccessBit),
Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
PacketAlignment = unpacket_traits<PacketScalar>::alignment,
Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
&& (OuterStrideAtCompileTime!=0)
&& (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
};
typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
};
// no direct-access => dispatch to a unary evaluator
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
: unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
: unary_evaluator<XprType>(block)
{}
};
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
: evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)
: m_argImpl(block.nestedExpression()),
m_startRow(block.startRow()),
m_startCol(block.startCol()),
m_linear_offset(InnerPanel?(XprType::IsRowMajor ? block.startRow()*block.cols() : block.startCol()*block.rows()):0)
{ }
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
RowsAtCompileTime = XprType::RowsAtCompileTime,
ForwardLinearAccess = InnerPanel && bool(evaluator<ArgType>::Flags&LinearAccessBit)
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
if (ForwardLinearAccess)
return m_argImpl.coeff(m_linear_offset.value() + index);
else
return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
if (ForwardLinearAccess)
return m_argImpl.coeffRef(m_linear_offset.value() + index);
else
return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
if (ForwardLinearAccess)
return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
else
return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
RowsAtCompileTime == 1 ? index : 0);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
{
return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
if (ForwardLinearAccess)
return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
else
return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
RowsAtCompileTime == 1 ? index : 0,
x);
}
protected:
evaluator<ArgType> m_argImpl;
const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
const variable_if_dynamic<Index, InnerPanel ? Dynamic : 0> m_linear_offset;
};
// TODO: This evaluator does not actually use the child evaluator;
// all action is via the data() as returned by the Block expression.
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
: mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
typedef typename XprType::Scalar Scalar;
EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
: mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
{
// TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
}
};
// -------------------- Select --------------------
// NOTE shall we introduce a ternary_evaluator?
// TODO enable vectorization for Select
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
: evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
{
typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
enum {
CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
+ EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
evaluator<ElseMatrixType>::CoeffReadCost),
Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select)
: m_conditionImpl(select.conditionMatrix()),
m_thenImpl(select.thenMatrix()),
m_elseImpl(select.elseMatrix())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
if (m_conditionImpl.coeff(row, col))
return m_thenImpl.coeff(row, col);
else
return m_elseImpl.coeff(row, col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
if (m_conditionImpl.coeff(index))
return m_thenImpl.coeff(index);
else
return m_elseImpl.coeff(index);
}
protected:
evaluator<ConditionMatrixType> m_conditionImpl;
evaluator<ThenMatrixType> m_thenImpl;
evaluator<ElseMatrixType> m_elseImpl;
};
// -------------------- Replicate --------------------
template<typename ArgType, int RowFactor, int ColFactor>
struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
: evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
{
typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
};
typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
enum {
CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
};
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate)
: m_arg(replicate.nestedExpression()),
m_argImpl(m_arg),
m_rows(replicate.nestedExpression().rows()),
m_cols(replicate.nestedExpression().cols())
{}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
// try to avoid using modulo; this is a pure optimization strategy
const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row
: row % m_rows.value();
const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col % m_cols.value();
return m_argImpl.coeff(actual_row, actual_col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
// try to avoid using modulo; this is a pure optimization strategy
const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
? (ColFactor==1 ? index : index%m_cols.value())
: (RowFactor==1 ? index : index%m_rows.value());
return m_argImpl.coeff(actual_index);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row
: row % m_rows.value();
const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col % m_cols.value();
return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
? (ColFactor==1 ? index : index%m_cols.value())
: (RowFactor==1 ? index : index%m_rows.value());
return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
}
protected:
const ArgTypeNested m_arg;
evaluator<ArgTypeNestedCleaned> m_argImpl;
const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
};
// -------------------- PartialReduxExpr --------------------
template< typename ArgType, typename MemberOp, int Direction>
struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
: evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> >
{
typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;
typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested;
typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
typedef typename ArgType::Scalar InputScalar;
typedef typename XprType::Scalar Scalar;
enum {
TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime)
};
typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
enum {
CoeffReadCost = TraversalSize==Dynamic ? HugeCost
: TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value),
Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit,
Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr)
: m_arg(xpr.nestedExpression()), m_functor(xpr.functor())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value));
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Scalar coeff(Index i, Index j) const
{
if (Direction==Vertical)
return m_functor(m_arg.col(j));
else
return m_functor(m_arg.row(i));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Scalar coeff(Index index) const
{
if (Direction==Vertical)
return m_functor(m_arg.col(index));
else
return m_functor(m_arg.row(index));
}
protected:
typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg;
const MemberOp m_functor;
};
// -------------------- MatrixWrapper and ArrayWrapper --------------------
//
// evaluator_wrapper_base<T> is a common base class for the
// MatrixWrapper and ArrayWrapper evaluators.
template<typename XprType>
struct evaluator_wrapper_base
: evaluator_base<XprType>
{
typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = evaluator<ArgType>::Flags,
Alignment = evaluator<ArgType>::Alignment
};
EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
typedef typename ArgType::Scalar Scalar;
typedef typename ArgType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_argImpl.coeff(row, col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_argImpl.coeff(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
return m_argImpl.coeffRef(row, col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
return m_argImpl.coeffRef(index);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
return m_argImpl.template packet<LoadMode,PacketType>(row, col);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
return m_argImpl.template packet<LoadMode,PacketType>(index);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
{
m_argImpl.template writePacket<StoreMode>(row, col, x);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
m_argImpl.template writePacket<StoreMode>(index, x);
}
protected:
evaluator<ArgType> m_argImpl;
};
template<typename TArgType>
struct unary_evaluator<MatrixWrapper<TArgType> >
: evaluator_wrapper_base<MatrixWrapper<TArgType> >
{
typedef MatrixWrapper<TArgType> XprType;
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
: evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
{ }
};
template<typename TArgType>
struct unary_evaluator<ArrayWrapper<TArgType> >
: evaluator_wrapper_base<ArrayWrapper<TArgType> >
{
typedef ArrayWrapper<TArgType> XprType;
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
: evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
{ }
};
// -------------------- Reverse --------------------
// defined in Reverse.h:
template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
template<typename ArgType, int Direction>
struct unary_evaluator<Reverse<ArgType, Direction> >
: evaluator_base<Reverse<ArgType, Direction> >
{
typedef Reverse<ArgType, Direction> XprType;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
IsRowMajor = XprType::IsRowMajor,
IsColMajor = !IsRowMajor,
ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
ReversePacket = (Direction == BothDirections)
|| ((Direction == Vertical) && IsColMajor)
|| ((Direction == Horizontal) && IsRowMajor),
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
// let's enable LinearAccess only with vectorization because of the product overhead
// FIXME enable DirectAccess with negative strides?
Flags0 = evaluator<ArgType>::Flags,
LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
|| ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
? LinearAccessBit : 0,
Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
};
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse)
: m_argImpl(reverse.nestedExpression()),
m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
{ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
ReverseCol ? m_cols.value() - col - 1 : col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
ReverseCol ? m_cols.value() - col - 1 : col);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index row, Index col) const
{
enum {
PacketSize = unpacket_traits<PacketType>::size,
OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
};
typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
ReverseRow ? m_rows.value() - row - OffsetRow : row,
ReverseCol ? m_cols.value() - col - OffsetCol : col));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
enum { PacketSize = unpacket_traits<PacketType>::size };
return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
{
// FIXME we could factorize some code with packet(i,j)
enum {
PacketSize = unpacket_traits<PacketType>::size,
OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
};
typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
m_argImpl.template writePacket<LoadMode>(
ReverseRow ? m_rows.value() - row - OffsetRow : row,
ReverseCol ? m_cols.value() - col - OffsetCol : col,
reverse_packet::run(x));
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
enum { PacketSize = unpacket_traits<PacketType>::size };
m_argImpl.template writePacket<LoadMode>
(m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
}
protected:
evaluator<ArgType> m_argImpl;
// If we do not reverse rows, then we do not need to know the number of rows; same for columns
// Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;
const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;
};
// -------------------- Diagonal --------------------
template<typename ArgType, int DiagIndex>
struct evaluator<Diagonal<ArgType, DiagIndex> >
: evaluator_base<Diagonal<ArgType, DiagIndex> >
{
typedef Diagonal<ArgType, DiagIndex> XprType;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
Alignment = 0
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal)
: m_argImpl(diagonal.nestedExpression()),
m_index(diagonal.index())
{ }
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index) const
{
return m_argImpl.coeff(row + rowOffset(), row + colOffset());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
return m_argImpl.coeff(index + rowOffset(), index + colOffset());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index)
{
return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
}
protected:
evaluator<ArgType> m_argImpl;
const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
private:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
};
//----------------------------------------------------------------------
// deprecated code
//----------------------------------------------------------------------
// -------------------- EvalToTemp --------------------
// expression class for evaluating nested expression to a temporary
template<typename ArgType> class EvalToTemp;
template<typename ArgType>
struct traits<EvalToTemp<ArgType> >
: public traits<ArgType>
{ };
template<typename ArgType>
class EvalToTemp
: public dense_xpr_base<EvalToTemp<ArgType> >::type
{
public:
typedef typename dense_xpr_base<EvalToTemp>::type Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
explicit EvalToTemp(const ArgType& arg)
: m_arg(arg)
{ }
const ArgType& arg() const
{
return m_arg;
}
Index rows() const
{
return m_arg.rows();
}
Index cols() const
{
return m_arg.cols();
}
private:
const ArgType& m_arg;
};
template<typename ArgType>
struct evaluator<EvalToTemp<ArgType> >
: public evaluator<typename ArgType::PlainObject>
{
typedef EvalToTemp<ArgType> XprType;
typedef typename ArgType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
: m_result(xpr.arg())
{
::new (static_cast<Base*>(this)) Base(m_result);
}
// This constructor is used when nesting an EvalTo evaluator in another evaluator
EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
: m_result(arg)
{
::new (static_cast<Base*>(this)) Base(m_result);
}
protected:
PlainObject m_result;
};
} // namespace internal
} // end namespace Eigen
#endif // EIGEN_COREEVALUATORS_H
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COREITERATORS_H
#define EIGEN_COREITERATORS_H
namespace Eigen {
/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
*/
namespace internal {
template<typename XprType, typename EvaluatorKind>
class inner_iterator_selector;
}
/** \class InnerIterator
* \brief An InnerIterator allows to loop over the element of any matrix expression.
*
* \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed.
*
* TODO: add a usage example
*/
template<typename XprType>
class InnerIterator
{
protected:
typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType;
typedef internal::evaluator<XprType> EvaluatorType;
typedef typename internal::traits<XprType>::Scalar Scalar;
public:
/** Construct an iterator over the \a outerId -th row or column of \a xpr */
InnerIterator(const XprType &xpr, const Index &outerId)
: m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize())
{}
/// \returns the value of the current coefficient.
EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); }
/** Increment the iterator \c *this to the next non-zero coefficient.
* Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView
*/
EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; }
/// \returns the column or row index of the current coefficient.
EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
/// \returns the row index of the current coefficient.
EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
/// \returns the column index of the current coefficient.
EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
/// \returns \c true if the iterator \c *this still references a valid coefficient.
EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
protected:
EvaluatorType m_eval;
IteratorType m_iter;
private:
// If you get here, then you're not using the right InnerIterator type, e.g.:
// SparseMatrix<double,RowMajor> A;
// SparseMatrix<double>::InnerIterator it(A,0);
template<typename T> InnerIterator(const EigenBase<T>&,Index outer);
};
namespace internal {
// Generic inner iterator implementation for dense objects
template<typename XprType>
class inner_iterator_selector<XprType, IndexBased>
{
protected:
typedef evaluator<XprType> EvaluatorType;
typedef typename traits<XprType>::Scalar Scalar;
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize)
: m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize)
{}
EIGEN_STRONG_INLINE Scalar value() const
{
return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner)
: m_eval.coeff(m_inner, m_outer);
}
EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; }
EIGEN_STRONG_INLINE Index index() const { return m_inner; }
inline Index row() const { return IsRowMajor ? m_outer : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const EvaluatorType& m_eval;
Index m_inner;
const Index m_outer;
const Index m_end;
};
// For iterator-based evaluator, inner-iterator is already implemented as
// evaluator<>::InnerIterator
template<typename XprType>
class inner_iterator_selector<XprType, IteratorBased>
: public evaluator<XprType>::InnerIterator
{
protected:
typedef typename evaluator<XprType>::InnerIterator Base;
typedef evaluator<XprType> EvaluatorType;
public:
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/)
: Base(eval, outerId)
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_COREITERATORS_H
// This file is part of Eigen, a lightweight C++ template library // This file is part of Eigen, a lightweight C++ template library
// for linear algebra. // for linear algebra.
// //
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// //
// This Source Code Form is subject to the terms of the Mozilla // This Source Code Form is subject to the terms of the Mozilla
...@@ -13,26 +13,6 @@ ...@@ -13,26 +13,6 @@
namespace Eigen { namespace Eigen {
/** \class CwiseBinaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
*
* \param BinaryOp template functor implementing the operator
* \param Lhs the type of the left-hand side
* \param Rhs the type of the right-hand side
*
* This class represents an expression where a coefficient-wise binary operator is applied to two expressions.
* It is the return type of binary operators, by which we mean only those binary operators where
* both the left-hand side and the right-hand side are Eigen expressions.
* For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
*
* Most of the time, this is the only way that it is used, so you typically don't have to name
* CwiseBinaryOp types explicitly.
*
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
*/
namespace internal { namespace internal {
template<typename BinaryOp, typename Lhs, typename Rhs> template<typename BinaryOp, typename Lhs, typename Rhs>
struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
...@@ -52,85 +32,85 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > ...@@ -52,85 +32,85 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
// we still want to handle the case when the result type is different. // we still want to handle the case when the result type is different.
typedef typename result_of< typedef typename result_of<
BinaryOp( BinaryOp(
typename Lhs::Scalar, const typename Lhs::Scalar&,
typename Rhs::Scalar const typename Rhs::Scalar&
) )
>::type Scalar; >::type Scalar;
typedef typename promote_storage_type<typename traits<Lhs>::StorageKind, typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind,
typename traits<Rhs>::StorageKind>::ret StorageKind; typename traits<Rhs>::StorageKind,
typedef typename promote_index_type<typename traits<Lhs>::Index, BinaryOp>::ret StorageKind;
typename traits<Rhs>::Index>::type Index; typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
typename traits<Rhs>::StorageIndex>::type StorageIndex;
typedef typename Lhs::Nested LhsNested; typedef typename Lhs::Nested LhsNested;
typedef typename Rhs::Nested RhsNested; typedef typename Rhs::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested; typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested; typedef typename remove_reference<RhsNested>::type _RhsNested;
enum { enum {
LhsCoeffReadCost = _LhsNested::CoeffReadCost, Flags = cwise_promote_storage_order<typename traits<Lhs>::StorageKind,typename traits<Rhs>::StorageKind,_LhsNested::Flags & RowMajorBit,_RhsNested::Flags & RowMajorBit>::value
RhsCoeffReadCost = _RhsNested::CoeffReadCost,
LhsFlags = _LhsNested::Flags,
RhsFlags = _RhsNested::Flags,
SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit),
Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
HereditaryBits
| (int(LhsFlags) & int(RhsFlags) &
( AlignedBit
| (StorageOrdersAgree ? LinearAccessBit : 0)
| (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
)
)
),
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
}; };
}; };
} // end namespace internal } // end namespace internal
// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
// that would take two operands of different types. If there were such an example, then this check should be
// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
// currently they take only one typename Scalar template parameter.
// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
// add together a float matrix and a double matrix.
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex<BINOP>::ret \
? int(internal::is_same<typename NumTraits<LHS>::Real, typename NumTraits<RHS>::Real>::value) \
: int(internal::is_same<LHS, RHS>::value)), \
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind> template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
class CwiseBinaryOpImpl; class CwiseBinaryOpImpl;
template<typename BinaryOp, typename Lhs, typename Rhs> /** \class CwiseBinaryOp
class CwiseBinaryOp : internal::no_assignment_operator, * \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
*
* \tparam BinaryOp template functor implementing the operator
* \tparam LhsType the type of the left-hand side
* \tparam RhsType the type of the right-hand side
*
* This class represents an expression where a coefficient-wise binary operator is applied to two expressions.
* It is the return type of binary operators, by which we mean only those binary operators where
* both the left-hand side and the right-hand side are Eigen expressions.
* For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
*
* Most of the time, this is the only way that it is used, so you typically don't have to name
* CwiseBinaryOp types explicitly.
*
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
*/
template<typename BinaryOp, typename LhsType, typename RhsType>
class CwiseBinaryOp :
public CwiseBinaryOpImpl< public CwiseBinaryOpImpl<
BinaryOp, Lhs, Rhs, BinaryOp, LhsType, RhsType,
typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind, typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
typename internal::traits<Rhs>::StorageKind>::ret> typename internal::traits<RhsType>::StorageKind,
BinaryOp>::ret>,
internal::no_assignment_operator
{ {
public: public:
typedef typename internal::remove_all<BinaryOp>::type Functor;
typedef typename internal::remove_all<LhsType>::type Lhs;
typedef typename internal::remove_all<RhsType>::type Rhs;
typedef typename CwiseBinaryOpImpl< typedef typename CwiseBinaryOpImpl<
BinaryOp, Lhs, Rhs, BinaryOp, LhsType, RhsType,
typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind, typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
typename internal::traits<Rhs>::StorageKind>::ret>::Base Base; typename internal::traits<Rhs>::StorageKind,
BinaryOp>::ret>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
typedef typename internal::nested<Lhs>::type LhsNested; typedef typename internal::ref_selector<LhsType>::type LhsNested;
typedef typename internal::nested<Rhs>::type RhsNested; typedef typename internal::ref_selector<RhsType>::type RhsNested;
typedef typename internal::remove_reference<LhsNested>::type _LhsNested; typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
typedef typename internal::remove_reference<RhsNested>::type _RhsNested; typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) EIGEN_DEVICE_FUNC
: m_lhs(lhs), m_rhs(rhs), m_functor(func) EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
: m_lhs(aLhs), m_rhs(aRhs), m_functor(func)
{ {
EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar); EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
// require the sizes to match // require the sizes to match
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
} }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index rows() const { EIGEN_STRONG_INLINE Index rows() const {
// return the fixed size type if available to enable compile time optimizations // return the fixed size type if available to enable compile time optimizations
if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic) if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
...@@ -138,6 +118,7 @@ class CwiseBinaryOp : internal::no_assignment_operator, ...@@ -138,6 +118,7 @@ class CwiseBinaryOp : internal::no_assignment_operator,
else else
return m_lhs.rows(); return m_lhs.rows();
} }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index cols() const { EIGEN_STRONG_INLINE Index cols() const {
// return the fixed size type if available to enable compile time optimizations // return the fixed size type if available to enable compile time optimizations
if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic) if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
...@@ -147,10 +128,13 @@ class CwiseBinaryOp : internal::no_assignment_operator, ...@@ -147,10 +128,13 @@ class CwiseBinaryOp : internal::no_assignment_operator,
} }
/** \returns the left hand side nested expression */ /** \returns the left hand side nested expression */
EIGEN_DEVICE_FUNC
const _LhsNested& lhs() const { return m_lhs; } const _LhsNested& lhs() const { return m_lhs; }
/** \returns the right hand side nested expression */ /** \returns the right hand side nested expression */
EIGEN_DEVICE_FUNC
const _RhsNested& rhs() const { return m_rhs; } const _RhsNested& rhs() const { return m_rhs; }
/** \returns the functor representing the binary operation */ /** \returns the functor representing the binary operation */
EIGEN_DEVICE_FUNC
const BinaryOp& functor() const { return m_functor; } const BinaryOp& functor() const { return m_functor; }
protected: protected:
...@@ -159,41 +143,13 @@ class CwiseBinaryOp : internal::no_assignment_operator, ...@@ -159,41 +143,13 @@ class CwiseBinaryOp : internal::no_assignment_operator,
const BinaryOp m_functor; const BinaryOp m_functor;
}; };
template<typename BinaryOp, typename Lhs, typename Rhs> // Generic API dispatcher
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense> template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
: public internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type class CwiseBinaryOpImpl
: public internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
{ {
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived; public:
public: typedef typename internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
typedef typename internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return derived().functor()(derived().lhs().coeff(row, col),
derived().rhs().coeff(row, col));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col),
derived().rhs().template packet<LoadMode>(row, col));
}
EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
return derived().functor()(derived().lhs().coeff(index),
derived().rhs().coeff(index));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
derived().rhs().template packet<LoadMode>(index));
}
}; };
/** replaces \c *this by \c *this - \a other. /** replaces \c *this by \c *this - \a other.
...@@ -205,8 +161,7 @@ template<typename OtherDerived> ...@@ -205,8 +161,7 @@ template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived & EIGEN_STRONG_INLINE Derived &
MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other) MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
{ {
SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived()); call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
tmp = other.derived();
return derived(); return derived();
} }
...@@ -219,11 +174,11 @@ template<typename OtherDerived> ...@@ -219,11 +174,11 @@ template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived & EIGEN_STRONG_INLINE Derived &
MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other) MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
{ {
SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived()); call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
tmp = other.derived();
return derived(); return derived();
} }
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_CWISE_BINARY_OP_H #endif // EIGEN_CWISE_BINARY_OP_H
...@@ -12,13 +12,24 @@ ...@@ -12,13 +12,24 @@
namespace Eigen { namespace Eigen {
namespace internal {
template<typename NullaryOp, typename PlainObjectType>
struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>
{
enum {
Flags = traits<PlainObjectType>::Flags & RowMajorBit
};
};
} // namespace internal
/** \class CwiseNullaryOp /** \class CwiseNullaryOp
* \ingroup Core_Module * \ingroup Core_Module
* *
* \brief Generic expression of a matrix where all coefficients are defined by a functor * \brief Generic expression of a matrix where all coefficients are defined by a functor
* *
* \param NullaryOp template functor implementing the operator * \tparam NullaryOp template functor implementing the operator
* \param PlainObjectType the underlying plain matrix/array type * \tparam PlainObjectType the underlying plain matrix/array type
* *
* This class represents an expression of a generic nullary operator. * This class represents an expression of a generic nullary operator.
* It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
...@@ -27,33 +38,33 @@ namespace Eigen { ...@@ -27,33 +38,33 @@ namespace Eigen {
* However, if you want to write a function returning such an expression, you * However, if you want to write a function returning such an expression, you
* will need to use this class. * will need to use this class.
* *
* \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr() * The functor NullaryOp must expose one of the following method:
<table class="manual">
<tr ><td>\c operator()() </td><td>if the procedural generation does not depend on the coefficient entries (e.g., random numbers)</td></tr>
<tr class="alt"><td>\c operator()(Index i)</td><td>if the procedural generation makes sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace) </td></tr>
<tr ><td>\c operator()(Index i,Index j)</td><td>if the procedural generation depends on the matrix coordinates \c i, \c j (e.g., to generate a checkerboard with 0 and 1)</td></tr>
</table>
* It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized for vectors.
*
* See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding
* C++11 random number generators.
*
* A nullary expression can also be used to implement custom sophisticated matrix manipulations
* that cannot be covered by the existing set of natively supported matrix manipulations.
* See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations
* on the behavior of CwiseNullaryOp.
*
* \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr
*/ */
namespace internal {
template<typename NullaryOp, typename PlainObjectType>
struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>
{
enum {
Flags = (traits<PlainObjectType>::Flags
& ( HereditaryBits
| (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
| (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
| (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
CoeffReadCost = functor_traits<NullaryOp>::Cost
};
};
}
template<typename NullaryOp, typename PlainObjectType> template<typename NullaryOp, typename PlainObjectType>
class CwiseNullaryOp : internal::no_assignment_operator, class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type, internal::no_assignment_operator
public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type
{ {
public: public:
typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base; typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
EIGEN_DEVICE_FUNC
CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
: m_rows(rows), m_cols(cols), m_functor(func) : m_rows(rows), m_cols(cols), m_functor(func)
{ {
...@@ -63,32 +74,13 @@ class CwiseNullaryOp : internal::no_assignment_operator, ...@@ -63,32 +74,13 @@ class CwiseNullaryOp : internal::no_assignment_operator,
&& (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
} }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); } EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const
{
return m_functor(rows, cols);
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return m_functor.packetOp(row, col);
}
EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
return m_functor(index);
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return m_functor.packetOp(index);
}
/** \returns the functor representing the nullary operation */ /** \returns the functor representing the nullary operation */
EIGEN_DEVICE_FUNC
const NullaryOp& functor() const { return m_functor; } const NullaryOp& functor() const { return m_functor; }
protected: protected:
...@@ -113,10 +105,10 @@ class CwiseNullaryOp : internal::no_assignment_operator, ...@@ -113,10 +105,10 @@ class CwiseNullaryOp : internal::no_assignment_operator,
*/ */
template<typename Derived> template<typename Derived>
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
{ {
return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func); return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func);
} }
/** \returns an expression of a matrix defined by a custom functor \a func /** \returns an expression of a matrix defined by a custom functor \a func
...@@ -132,16 +124,19 @@ DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& f ...@@ -132,16 +124,19 @@ DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& f
* *
* The template parameter \a CustomNullaryOp is the type of the functor. * The template parameter \a CustomNullaryOp is the type of the functor.
* *
* Here is an example with C++11 random generators: \include random_cpp11.cpp
* Output: \verbinclude random_cpp11.out
*
* \sa class CwiseNullaryOp * \sa class CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived> EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func) DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func); if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, PlainObject>(1, size, func);
else return CwiseNullaryOp<CustomNullaryOp, Derived>(size, 1, func); else return CwiseNullaryOp<CustomNullaryOp, PlainObject>(size, 1, func);
} }
/** \returns an expression of a matrix defined by a custom functor \a func /** \returns an expression of a matrix defined by a custom functor \a func
...@@ -155,10 +150,10 @@ DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func) ...@@ -155,10 +150,10 @@ DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
*/ */
template<typename Derived> template<typename Derived>
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func) DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
{ {
return CwiseNullaryOp<CustomNullaryOp, Derived>(RowsAtCompileTime, ColsAtCompileTime, func); return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func);
} }
/** \returns an expression of a constant matrix of value \a value /** \returns an expression of a constant matrix of value \a value
...@@ -197,7 +192,7 @@ DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value) ...@@ -197,7 +192,7 @@ DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
* \sa class CwiseNullaryOp * \sa class CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(Index size, const Scalar& value) DenseBase<Derived>::Constant(Index size, const Scalar& value)
{ {
return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value)); return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
...@@ -213,53 +208,40 @@ DenseBase<Derived>::Constant(Index size, const Scalar& value) ...@@ -213,53 +208,40 @@ DenseBase<Derived>::Constant(Index size, const Scalar& value)
* \sa class CwiseNullaryOp * \sa class CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(const Scalar& value) DenseBase<Derived>::Constant(const Scalar& value)
{ {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value)); return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));
} }
/** /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)
* \brief Sets a linearly space vector.
* *
* The function generates 'size' equally spaced values in the closed interval [low,high]. * \sa LinSpaced(Index,Scalar,Scalar), setLinSpaced(Index,const Scalar&,const Scalar&)
* This particular version of LinSpaced() uses sequential access, i.e. vector access is
* assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization
* and yields faster code than the random access version.
*
* When size is set to 1, a vector of length 1 containing 'high' is returned.
*
* \only_for_vectors
*
* Example: \include DenseBase_LinSpaced_seq.cpp
* Output: \verbinclude DenseBase_LinSpaced_seq.out
*
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size)); return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
} }
/** /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
* \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&) *
* Special version for fixed size types which does not require the size parameter. * \sa LinSpaced(Scalar,Scalar)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,false>(low,high,Derived::SizeAtCompileTime)); return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
} }
/** /**
* \brief Sets a linearly space vector. * \brief Sets a linearly spaced vector.
* *
* The function generates 'size' equally spaced values in the closed interval [low,high]. * The function generates 'size' equally spaced values in the closed interval [low,high].
* When size is set to 1, a vector of length 1 containing 'high' is returned. * When size is set to 1, a vector of length 1 containing 'high' is returned.
...@@ -269,14 +251,24 @@ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig ...@@ -269,14 +251,24 @@ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig
* Example: \include DenseBase_LinSpaced.cpp * Example: \include DenseBase_LinSpaced.cpp
* Output: \verbinclude DenseBase_LinSpaced.out * Output: \verbinclude DenseBase_LinSpaced.out
* *
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp * For integer scalar types, an even spacing is possible if and only if the length of the range,
* i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the
* number of values \c high-low+1 (meaning each value can be repeated the same number of time).
* If one of these two considions is not satisfied, then \c high is lowered to the largest value
* satisfying one of this constraint.
* Here are some examples:
*
* Example: \include DenseBase_LinSpacedInt.cpp
* Output: \verbinclude DenseBase_LinSpacedInt.out
*
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high) DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,true>(low,high,size)); return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
} }
/** /**
...@@ -284,22 +276,23 @@ DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high) ...@@ -284,22 +276,23 @@ DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
* Special version for fixed size types which does not require the size parameter. * Special version for fixed size types which does not require the size parameter.
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high) DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,true>(low,high,Derived::SizeAtCompileTime)); return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
} }
/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ /** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
template<typename Derived> template<typename Derived>
bool DenseBase<Derived>::isApproxToConstant EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApproxToConstant
(const Scalar& value, RealScalar prec) const (const Scalar& val, const RealScalar& prec) const
{ {
typename internal::nested_eval<Derived,1>::type self(derived());
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if(!internal::isApprox(this->coeff(i, j), value, prec)) if(!internal::isApprox(self.coeff(i, j), val, prec))
return false; return false;
return true; return true;
} }
...@@ -308,33 +301,33 @@ bool DenseBase<Derived>::isApproxToConstant ...@@ -308,33 +301,33 @@ bool DenseBase<Derived>::isApproxToConstant
* *
* \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template<typename Derived> template<typename Derived>
bool DenseBase<Derived>::isConstant EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isConstant
(const Scalar& value, RealScalar prec) const (const Scalar& val, const RealScalar& prec) const
{ {
return isApproxToConstant(value, prec); return isApproxToConstant(val, prec);
} }
/** Alias for setConstant(): sets all coefficients in this expression to \a value. /** Alias for setConstant(): sets all coefficients in this expression to \a val.
* *
* \sa setConstant(), Constant(), class CwiseNullaryOp * \sa setConstant(), Constant(), class CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& value) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
{ {
setConstant(value); setConstant(val);
} }
/** Sets all coefficients in this expression to \a value. /** Sets all coefficients in this expression to value \a val.
* *
* \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
{ {
return derived() = Constant(rows(), cols(), value); return derived() = Constant(rows(), cols(), val);
} }
/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value. /** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val.
* *
* \only_for_vectors * \only_for_vectors
* *
...@@ -344,18 +337,18 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value ...@@ -344,18 +337,18 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value) PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)
{ {
resize(size); resize(size);
return setConstant(value); return setConstant(val);
} }
/** Resizes to the given size, and sets all coefficients in this expression to the given \a value. /** Resizes to the given size, and sets all coefficients in this expression to the given value \a val.
* *
* \param rows the new number of rows * \param rows the new number of rows
* \param cols the new number of columns * \param cols the new number of columns
* \param value the value to which all coefficients are set * \param val the value to which all coefficients are set
* *
* Example: \include Matrix_setConstant_int_int.cpp * Example: \include Matrix_setConstant_int_int.cpp
* Output: \verbinclude Matrix_setConstant_int_int.out * Output: \verbinclude Matrix_setConstant_int_int.out
...@@ -363,15 +356,15 @@ PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value) ...@@ -363,15 +356,15 @@ PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value)
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& value) PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& val)
{ {
resize(rows, cols); resize(rows, cols);
return setConstant(value); return setConstant(val);
} }
/** /**
* \brief Sets a linearly space vector. * \brief Sets a linearly spaced vector.
* *
* The function generates 'size' equally spaced values in the closed interval [low,high]. * The function generates 'size' equally spaced values in the closed interval [low,high].
* When size is set to 1, a vector of length 1 containing 'high' is returned. * When size is set to 1, a vector of length 1 containing 'high' is returned.
...@@ -381,27 +374,33 @@ PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& valu ...@@ -381,27 +374,33 @@ PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& valu
* Example: \include DenseBase_setLinSpaced.cpp * Example: \include DenseBase_setLinSpaced.cpp
* Output: \verbinclude DenseBase_setLinSpaced.out * Output: \verbinclude DenseBase_setLinSpaced.out
* *
* \sa CwiseNullaryOp * For integer scalar types, do not miss the explanations on the definition
* of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
*
* \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index size, const Scalar& low, const Scalar& high) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return derived() = Derived::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size)); return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize));
} }
/** /**
* \brief Sets a linearly space vector. * \brief Sets a linearly spaced vector.
* *
* The function fill *this with equally spaced values in the closed interval [low,high]. * The function fills \c *this with equally spaced values in the closed interval [low,high].
* When size is set to 1, a vector of length 1 containing 'high' is returned. * When size is set to 1, a vector of length 1 containing 'high' is returned.
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp * For integer scalar types, do not miss the explanations on the definition
* of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
*
* \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return setLinSpaced(size(), low, high); return setLinSpaced(size(), low, high);
...@@ -424,7 +423,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, ...@@ -424,7 +423,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low,
* \sa Zero(), Zero(Index) * \sa Zero(), Zero(Index)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero(Index rows, Index cols) DenseBase<Derived>::Zero(Index rows, Index cols)
{ {
return Constant(rows, cols, Scalar(0)); return Constant(rows, cols, Scalar(0));
...@@ -447,7 +446,7 @@ DenseBase<Derived>::Zero(Index rows, Index cols) ...@@ -447,7 +446,7 @@ DenseBase<Derived>::Zero(Index rows, Index cols)
* \sa Zero(), Zero(Index,Index) * \sa Zero(), Zero(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero(Index size) DenseBase<Derived>::Zero(Index size)
{ {
return Constant(size, Scalar(0)); return Constant(size, Scalar(0));
...@@ -464,7 +463,7 @@ DenseBase<Derived>::Zero(Index size) ...@@ -464,7 +463,7 @@ DenseBase<Derived>::Zero(Index size)
* \sa Zero(Index), Zero(Index,Index) * \sa Zero(Index), Zero(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero() DenseBase<Derived>::Zero()
{ {
return Constant(Scalar(0)); return Constant(Scalar(0));
...@@ -479,11 +478,12 @@ DenseBase<Derived>::Zero() ...@@ -479,11 +478,12 @@ DenseBase<Derived>::Zero()
* \sa class CwiseNullaryOp, Zero() * \sa class CwiseNullaryOp, Zero()
*/ */
template<typename Derived> template<typename Derived>
bool DenseBase<Derived>::isZero(RealScalar prec) const EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isZero(const RealScalar& prec) const
{ {
typename internal::nested_eval<Derived,1>::type self(derived());
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec)) if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<Scalar>(1), prec))
return false; return false;
return true; return true;
} }
...@@ -496,7 +496,7 @@ bool DenseBase<Derived>::isZero(RealScalar prec) const ...@@ -496,7 +496,7 @@ bool DenseBase<Derived>::isZero(RealScalar prec) const
* \sa class CwiseNullaryOp, Zero() * \sa class CwiseNullaryOp, Zero()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
{ {
return setConstant(Scalar(0)); return setConstant(Scalar(0));
} }
...@@ -511,10 +511,10 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero() ...@@ -511,10 +511,10 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
* \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setZero(Index size) PlainObjectBase<Derived>::setZero(Index newSize)
{ {
resize(size); resize(newSize);
return setConstant(Scalar(0)); return setConstant(Scalar(0));
} }
...@@ -529,7 +529,7 @@ PlainObjectBase<Derived>::setZero(Index size) ...@@ -529,7 +529,7 @@ PlainObjectBase<Derived>::setZero(Index size)
* \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setZero(Index rows, Index cols) PlainObjectBase<Derived>::setZero(Index rows, Index cols)
{ {
resize(rows, cols); resize(rows, cols);
...@@ -553,7 +553,7 @@ PlainObjectBase<Derived>::setZero(Index rows, Index cols) ...@@ -553,7 +553,7 @@ PlainObjectBase<Derived>::setZero(Index rows, Index cols)
* \sa Ones(), Ones(Index), isOnes(), class Ones * \sa Ones(), Ones(Index), isOnes(), class Ones
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones(Index rows, Index cols) DenseBase<Derived>::Ones(Index rows, Index cols)
{ {
return Constant(rows, cols, Scalar(1)); return Constant(rows, cols, Scalar(1));
...@@ -561,7 +561,7 @@ DenseBase<Derived>::Ones(Index rows, Index cols) ...@@ -561,7 +561,7 @@ DenseBase<Derived>::Ones(Index rows, Index cols)
/** \returns an expression of a vector where all coefficients equal one. /** \returns an expression of a vector where all coefficients equal one.
* *
* The parameter \a size is the size of the returned vector. * The parameter \a newSize is the size of the returned vector.
* Must be compatible with this MatrixBase type. * Must be compatible with this MatrixBase type.
* *
* \only_for_vectors * \only_for_vectors
...@@ -576,10 +576,10 @@ DenseBase<Derived>::Ones(Index rows, Index cols) ...@@ -576,10 +576,10 @@ DenseBase<Derived>::Ones(Index rows, Index cols)
* \sa Ones(), Ones(Index,Index), isOnes(), class Ones * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones(Index size) DenseBase<Derived>::Ones(Index newSize)
{ {
return Constant(size, Scalar(1)); return Constant(newSize, Scalar(1));
} }
/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. /** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
...@@ -593,7 +593,7 @@ DenseBase<Derived>::Ones(Index size) ...@@ -593,7 +593,7 @@ DenseBase<Derived>::Ones(Index size)
* \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones() DenseBase<Derived>::Ones()
{ {
return Constant(Scalar(1)); return Constant(Scalar(1));
...@@ -608,8 +608,8 @@ DenseBase<Derived>::Ones() ...@@ -608,8 +608,8 @@ DenseBase<Derived>::Ones()
* \sa class CwiseNullaryOp, Ones() * \sa class CwiseNullaryOp, Ones()
*/ */
template<typename Derived> template<typename Derived>
bool DenseBase<Derived>::isOnes EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isOnes
(RealScalar prec) const (const RealScalar& prec) const
{ {
return isApproxToConstant(Scalar(1), prec); return isApproxToConstant(Scalar(1), prec);
} }
...@@ -622,12 +622,12 @@ bool DenseBase<Derived>::isOnes ...@@ -622,12 +622,12 @@ bool DenseBase<Derived>::isOnes
* \sa class CwiseNullaryOp, Ones() * \sa class CwiseNullaryOp, Ones()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
{ {
return setConstant(Scalar(1)); return setConstant(Scalar(1));
} }
/** Resizes to the given \a size, and sets all coefficients in this expression to one. /** Resizes to the given \a newSize, and sets all coefficients in this expression to one.
* *
* \only_for_vectors * \only_for_vectors
* *
...@@ -637,10 +637,10 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes() ...@@ -637,10 +637,10 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
* \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setOnes(Index size) PlainObjectBase<Derived>::setOnes(Index newSize)
{ {
resize(size); resize(newSize);
return setConstant(Scalar(1)); return setConstant(Scalar(1));
} }
...@@ -655,7 +655,7 @@ PlainObjectBase<Derived>::setOnes(Index size) ...@@ -655,7 +655,7 @@ PlainObjectBase<Derived>::setOnes(Index size)
* \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setOnes(Index rows, Index cols) PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
{ {
resize(rows, cols); resize(rows, cols);
...@@ -679,7 +679,7 @@ PlainObjectBase<Derived>::setOnes(Index rows, Index cols) ...@@ -679,7 +679,7 @@ PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
* \sa Identity(), setIdentity(), isIdentity() * \sa Identity(), setIdentity(), isIdentity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
MatrixBase<Derived>::Identity(Index rows, Index cols) MatrixBase<Derived>::Identity(Index rows, Index cols)
{ {
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>()); return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
...@@ -696,7 +696,7 @@ MatrixBase<Derived>::Identity(Index rows, Index cols) ...@@ -696,7 +696,7 @@ MatrixBase<Derived>::Identity(Index rows, Index cols)
* \sa Identity(Index,Index), setIdentity(), isIdentity() * \sa Identity(Index,Index), setIdentity(), isIdentity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
MatrixBase<Derived>::Identity() MatrixBase<Derived>::Identity()
{ {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
...@@ -714,20 +714,21 @@ MatrixBase<Derived>::Identity() ...@@ -714,20 +714,21 @@ MatrixBase<Derived>::Identity()
*/ */
template<typename Derived> template<typename Derived>
bool MatrixBase<Derived>::isIdentity bool MatrixBase<Derived>::isIdentity
(RealScalar prec) const (const RealScalar& prec) const
{ {
typename internal::nested_eval<Derived,1>::type self(derived());
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
{ {
for(Index i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
{ {
if(i == j) if(i == j)
{ {
if(!internal::isApprox(this->coeff(i, j), static_cast<Scalar>(1), prec)) if(!internal::isApprox(self.coeff(i, j), static_cast<Scalar>(1), prec))
return false; return false;
} }
else else
{ {
if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<RealScalar>(1), prec)) if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<RealScalar>(1), prec))
return false; return false;
} }
} }
...@@ -740,6 +741,7 @@ namespace internal { ...@@ -740,6 +741,7 @@ namespace internal {
template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)> template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>
struct setIdentity_impl struct setIdentity_impl
{ {
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Derived& run(Derived& m) static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{ {
return m = Derived::Identity(m.rows(), m.cols()); return m = Derived::Identity(m.rows(), m.cols());
...@@ -749,11 +751,11 @@ struct setIdentity_impl ...@@ -749,11 +751,11 @@ struct setIdentity_impl
template<typename Derived> template<typename Derived>
struct setIdentity_impl<Derived, true> struct setIdentity_impl<Derived, true>
{ {
typedef typename Derived::Index Index; EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Derived& run(Derived& m) static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{ {
m.setZero(); m.setZero();
const Index size = (std::min)(m.rows(), m.cols()); const Index size = numext::mini(m.rows(), m.cols());
for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
return m; return m;
} }
...@@ -769,7 +771,7 @@ struct setIdentity_impl<Derived, true> ...@@ -769,7 +771,7 @@ struct setIdentity_impl<Derived, true>
* \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
{ {
return internal::setIdentity_impl<Derived>::run(derived()); return internal::setIdentity_impl<Derived>::run(derived());
} }
...@@ -785,7 +787,7 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity() ...@@ -785,7 +787,7 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
{ {
derived().resize(rows, cols); derived().resize(rows, cols);
return setIdentity(); return setIdentity();
...@@ -798,10 +800,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index ...@@ -798,10 +800,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index
* \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index size, Index i) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(size,size), i); return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i);
} }
/** \returns an expression of the i-th unit (basis) vector. /** \returns an expression of the i-th unit (basis) vector.
...@@ -813,7 +815,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa ...@@ -813,7 +815,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(),i); return BasisReturnType(SquareMatrixType::Identity(),i);
...@@ -826,7 +828,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa ...@@ -826,7 +828,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
{ return Derived::Unit(0); } { return Derived::Unit(0); }
/** \returns an expression of the Y axis unit vector (0,1{,0}^*) /** \returns an expression of the Y axis unit vector (0,1{,0}^*)
...@@ -836,7 +838,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa ...@@ -836,7 +838,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
{ return Derived::Unit(1); } { return Derived::Unit(1); }
/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) /** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
...@@ -846,7 +848,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa ...@@ -846,7 +848,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
{ return Derived::Unit(2); } { return Derived::Unit(2); }
/** \returns an expression of the W axis unit vector (0,0,0,1) /** \returns an expression of the W axis unit vector (0,0,0,1)
...@@ -856,7 +858,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa ...@@ -856,7 +858,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW() EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
{ return Derived::Unit(3); } { return Derived::Unit(3); }
} // end namespace Eigen } // end namespace Eigen
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_TERNARY_OP_H
#define EIGEN_CWISE_TERNARY_OP_H
namespace Eigen {
namespace internal {
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
struct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > {
// we must not inherit from traits<Arg1> since it has
// the potential to cause problems with MSVC
typedef typename remove_all<Arg1>::type Ancestor;
typedef typename traits<Ancestor>::XprKind XprKind;
enum {
RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
};
// even though we require Arg1, Arg2, and Arg3 to have the same scalar type
// (see CwiseTernaryOp constructor),
// we still want to handle the case when the result type is different.
typedef typename result_of<TernaryOp(
const typename Arg1::Scalar&, const typename Arg2::Scalar&,
const typename Arg3::Scalar&)>::type Scalar;
typedef typename internal::traits<Arg1>::StorageKind StorageKind;
typedef typename internal::traits<Arg1>::StorageIndex StorageIndex;
typedef typename Arg1::Nested Arg1Nested;
typedef typename Arg2::Nested Arg2Nested;
typedef typename Arg3::Nested Arg3Nested;
typedef typename remove_reference<Arg1Nested>::type _Arg1Nested;
typedef typename remove_reference<Arg2Nested>::type _Arg2Nested;
typedef typename remove_reference<Arg3Nested>::type _Arg3Nested;
enum { Flags = _Arg1Nested::Flags & RowMajorBit };
};
} // end namespace internal
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3,
typename StorageKind>
class CwiseTernaryOpImpl;
/** \class CwiseTernaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise ternary operator is
* applied to two expressions
*
* \tparam TernaryOp template functor implementing the operator
* \tparam Arg1Type the type of the first argument
* \tparam Arg2Type the type of the second argument
* \tparam Arg3Type the type of the third argument
*
* This class represents an expression where a coefficient-wise ternary
* operator is applied to three expressions.
* It is the return type of ternary operators, by which we mean only those
* ternary operators where
* all three arguments are Eigen expressions.
* For example, the return type of betainc(matrix1, matrix2, matrix3) is a
* CwiseTernaryOp.
*
* Most of the time, this is the only way that it is used, so you typically
* don't have to name
* CwiseTernaryOp types explicitly.
*
* \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const
* MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp,
* class CwiseUnaryOp, class CwiseNullaryOp
*/
template <typename TernaryOp, typename Arg1Type, typename Arg2Type,
typename Arg3Type>
class CwiseTernaryOp : public CwiseTernaryOpImpl<
TernaryOp, Arg1Type, Arg2Type, Arg3Type,
typename internal::traits<Arg1Type>::StorageKind>,
internal::no_assignment_operator
{
public:
typedef typename internal::remove_all<Arg1Type>::type Arg1;
typedef typename internal::remove_all<Arg2Type>::type Arg2;
typedef typename internal::remove_all<Arg3Type>::type Arg3;
typedef typename CwiseTernaryOpImpl<
TernaryOp, Arg1Type, Arg2Type, Arg3Type,
typename internal::traits<Arg1Type>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp)
typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested;
typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested;
typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested;
typedef typename internal::remove_reference<Arg1Nested>::type _Arg1Nested;
typedef typename internal::remove_reference<Arg2Nested>::type _Arg2Nested;
typedef typename internal::remove_reference<Arg3Nested>::type _Arg3Nested;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2,
const Arg3& a3,
const TernaryOp& func = TernaryOp())
: m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) {
// require the sizes to match
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3)
// The index types should match
EIGEN_STATIC_ASSERT((internal::is_same<
typename internal::traits<Arg1Type>::StorageKind,
typename internal::traits<Arg2Type>::StorageKind>::value),
STORAGE_KIND_MUST_MATCH)
EIGEN_STATIC_ASSERT((internal::is_same<
typename internal::traits<Arg1Type>::StorageKind,
typename internal::traits<Arg3Type>::StorageKind>::value),
STORAGE_KIND_MUST_MATCH)
eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() &&
a1.rows() == a3.rows() && a1.cols() == a3.cols());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index rows() const {
// return the fixed size type if available to enable compile time
// optimizations
if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
RowsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg2Nested>::type>::
RowsAtCompileTime == Dynamic)
return m_arg3.rows();
else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
RowsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg3Nested>::type>::
RowsAtCompileTime == Dynamic)
return m_arg2.rows();
else
return m_arg1.rows();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index cols() const {
// return the fixed size type if available to enable compile time
// optimizations
if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
ColsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg2Nested>::type>::
ColsAtCompileTime == Dynamic)
return m_arg3.cols();
else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
ColsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg3Nested>::type>::
ColsAtCompileTime == Dynamic)
return m_arg2.cols();
else
return m_arg1.cols();
}
/** \returns the first argument nested expression */
EIGEN_DEVICE_FUNC
const _Arg1Nested& arg1() const { return m_arg1; }
/** \returns the first argument nested expression */
EIGEN_DEVICE_FUNC
const _Arg2Nested& arg2() const { return m_arg2; }
/** \returns the third argument nested expression */
EIGEN_DEVICE_FUNC
const _Arg3Nested& arg3() const { return m_arg3; }
/** \returns the functor representing the ternary operation */
EIGEN_DEVICE_FUNC
const TernaryOp& functor() const { return m_functor; }
protected:
Arg1Nested m_arg1;
Arg2Nested m_arg2;
Arg3Nested m_arg3;
const TernaryOp m_functor;
};
// Generic API dispatcher
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3,
typename StorageKind>
class CwiseTernaryOpImpl
: public internal::generic_xpr_base<
CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type {
public:
typedef typename internal::generic_xpr_base<
CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type Base;
};
} // end namespace Eigen
#endif // EIGEN_CWISE_TERNARY_OP_H
// This file is part of Eigen, a lightweight C++ template library // This file is part of Eigen, a lightweight C++ template library
// for linear algebra. // for linear algebra.
// //
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// //
// This Source Code Form is subject to the terms of the Mozilla // This Source Code Form is subject to the terms of the Mozilla
...@@ -13,41 +13,18 @@ ...@@ -13,41 +13,18 @@
namespace Eigen { namespace Eigen {
/** \class CwiseUnaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise unary operator is applied to an expression
*
* \param UnaryOp template functor implementing the operator
* \param XprType the type of the expression to which we are applying the unary operator
*
* This class represents an expression where a unary operator is applied to an expression.
* It is the return type of all operations taking exactly 1 input expression, regardless of the
* presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
* is considered unary, because only the right-hand side is an expression, and its
* return type is a specialization of CwiseUnaryOp.
*
* Most of the time, this is the only way that it is used, so you typically don't have to name
* CwiseUnaryOp types explicitly.
*
* \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
*/
namespace internal { namespace internal {
template<typename UnaryOp, typename XprType> template<typename UnaryOp, typename XprType>
struct traits<CwiseUnaryOp<UnaryOp, XprType> > struct traits<CwiseUnaryOp<UnaryOp, XprType> >
: traits<XprType> : traits<XprType>
{ {
typedef typename result_of< typedef typename result_of<
UnaryOp(typename XprType::Scalar) UnaryOp(const typename XprType::Scalar&)
>::type Scalar; >::type Scalar;
typedef typename XprType::Nested XprTypeNested; typedef typename XprType::Nested XprTypeNested;
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested; typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
enum { enum {
Flags = _XprTypeNested::Flags & ( Flags = _XprTypeNested::Flags & RowMajorBit
HereditaryBits | LinearAccessBit | AlignedBit
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
}; };
}; };
} }
...@@ -55,70 +32,70 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> > ...@@ -55,70 +32,70 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
template<typename UnaryOp, typename XprType, typename StorageKind> template<typename UnaryOp, typename XprType, typename StorageKind>
class CwiseUnaryOpImpl; class CwiseUnaryOpImpl;
/** \class CwiseUnaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise unary operator is applied to an expression
*
* \tparam UnaryOp template functor implementing the operator
* \tparam XprType the type of the expression to which we are applying the unary operator
*
* This class represents an expression where a unary operator is applied to an expression.
* It is the return type of all operations taking exactly 1 input expression, regardless of the
* presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
* is considered unary, because only the right-hand side is an expression, and its
* return type is a specialization of CwiseUnaryOp.
*
* Most of the time, this is the only way that it is used, so you typically don't have to name
* CwiseUnaryOp types explicitly.
*
* \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
*/
template<typename UnaryOp, typename XprType> template<typename UnaryOp, typename XprType>
class CwiseUnaryOp : internal::no_assignment_operator, class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>, internal::no_assignment_operator
public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>
{ {
public: public:
typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base; typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
typedef typename internal::ref_selector<XprType>::type XprTypeNested;
typedef typename internal::remove_all<XprType>::type NestedExpression;
inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
: m_xpr(xpr), m_functor(func) {} : m_xpr(xpr), m_functor(func) {}
EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); } Index rows() const { return m_xpr.rows(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index cols() const { return m_xpr.cols(); }
/** \returns the functor representing the unary operation */ /** \returns the functor representing the unary operation */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const UnaryOp& functor() const { return m_functor; } const UnaryOp& functor() const { return m_functor; }
/** \returns the nested expression */ /** \returns the nested expression */
const typename internal::remove_all<typename XprType::Nested>::type& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<XprTypeNested>::type&
nestedExpression() const { return m_xpr; } nestedExpression() const { return m_xpr; }
/** \returns the nested expression */ /** \returns the nested expression */
typename internal::remove_all<typename XprType::Nested>::type& EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
nestedExpression() { return m_xpr.const_cast_derived(); } typename internal::remove_all<XprTypeNested>::type&
nestedExpression() { return m_xpr; }
protected: protected:
typename XprType::Nested m_xpr; XprTypeNested m_xpr;
const UnaryOp m_functor; const UnaryOp m_functor;
}; };
// This is the generic implementation for dense storage. // Generic API dispatcher
// It can be used for any expression types implementing the dense concept. template<typename UnaryOp, typename XprType, typename StorageKind>
template<typename UnaryOp, typename XprType> class CwiseUnaryOpImpl
class CwiseUnaryOpImpl<UnaryOp,XprType,Dense> : public internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
: public internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
{ {
public: public:
typedef typename internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
typedef typename internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return derived().functor()(derived().nestedExpression().coeff(row, col));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
}
EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
return derived().functor()(derived().nestedExpression().coeff(index));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
}
}; };
} // end namespace Eigen } // end namespace Eigen
......
...@@ -12,33 +12,19 @@ ...@@ -12,33 +12,19 @@
namespace Eigen { namespace Eigen {
/** \class CwiseUnaryView
* \ingroup Core_Module
*
* \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
*
* \param ViewOp template functor implementing the view
* \param MatrixType the type of the matrix we are applying the unary operator
*
* This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
* It is the return type of real() and imag(), and most of the time this is the only way it is used.
*
* \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
*/
namespace internal { namespace internal {
template<typename ViewOp, typename MatrixType> template<typename ViewOp, typename MatrixType>
struct traits<CwiseUnaryView<ViewOp, MatrixType> > struct traits<CwiseUnaryView<ViewOp, MatrixType> >
: traits<MatrixType> : traits<MatrixType>
{ {
typedef typename result_of< typedef typename result_of<
ViewOp(typename traits<MatrixType>::Scalar) ViewOp(const typename traits<MatrixType>::Scalar&)
>::type Scalar; >::type Scalar;
typedef typename MatrixType::Nested MatrixTypeNested; typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested; typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
enum { enum {
Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)), FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits<ViewOp>::Cost, Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions
MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret, MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
// need to cast the sizeof's from size_t to int explicitly, otherwise: // need to cast the sizeof's from size_t to int explicitly, otherwise:
// "error: no integral type can represent all of the enumerator values // "error: no integral type can represent all of the enumerator values
...@@ -55,16 +41,30 @@ struct traits<CwiseUnaryView<ViewOp, MatrixType> > ...@@ -55,16 +41,30 @@ struct traits<CwiseUnaryView<ViewOp, MatrixType> >
template<typename ViewOp, typename MatrixType, typename StorageKind> template<typename ViewOp, typename MatrixType, typename StorageKind>
class CwiseUnaryViewImpl; class CwiseUnaryViewImpl;
/** \class CwiseUnaryView
* \ingroup Core_Module
*
* \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
*
* \tparam ViewOp template functor implementing the view
* \tparam MatrixType the type of the matrix we are applying the unary operator
*
* This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
* It is the return type of real() and imag(), and most of the time this is the only way it is used.
*
* \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
*/
template<typename ViewOp, typename MatrixType> template<typename ViewOp, typename MatrixType>
class CwiseUnaryView : internal::no_assignment_operator, class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
{ {
public: public:
typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base; typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
inline CwiseUnaryView(const MatrixType& mat, const ViewOp& func = ViewOp()) explicit inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp())
: m_matrix(mat), m_functor(func) {} : m_matrix(mat), m_functor(func) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
...@@ -76,19 +76,27 @@ class CwiseUnaryView : internal::no_assignment_operator, ...@@ -76,19 +76,27 @@ class CwiseUnaryView : internal::no_assignment_operator,
const ViewOp& functor() const { return m_functor; } const ViewOp& functor() const { return m_functor; }
/** \returns the nested expression */ /** \returns the nested expression */
const typename internal::remove_all<typename MatrixType::Nested>::type& const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; } nestedExpression() const { return m_matrix; }
/** \returns the nested expression */ /** \returns the nested expression */
typename internal::remove_all<typename MatrixType::Nested>::type& typename internal::remove_reference<MatrixTypeNested>::type&
nestedExpression() { return m_matrix.const_cast_derived(); } nestedExpression() { return m_matrix.const_cast_derived(); }
protected: protected:
// FIXME changed from MatrixType::Nested because of a weird compilation error with sun CC MatrixTypeNested m_matrix;
typename internal::nested<MatrixType>::type m_matrix;
ViewOp m_functor; ViewOp m_functor;
}; };
// Generic API dispatcher
template<typename ViewOp, typename XprType, typename StorageKind>
class CwiseUnaryViewImpl
: public internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type
{
public:
typedef typename internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type Base;
};
template<typename ViewOp, typename MatrixType> template<typename ViewOp, typename MatrixType>
class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense> class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
: public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type : public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type
...@@ -99,36 +107,22 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense> ...@@ -99,36 +107,22 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base; typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); }
EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); }
inline Index innerStride() const EIGEN_DEVICE_FUNC inline Index innerStride() const
{ {
return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar); return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
} }
inline Index outerStride() const EIGEN_DEVICE_FUNC inline Index outerStride() const
{ {
return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar); return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
} }
protected:
EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl)
{
return derived().functor()(derived().nestedExpression().coeff(row, col));
}
EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return derived().functor()(derived().nestedExpression().coeff(index));
}
EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{
return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col));
}
EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index));
}
}; };
} // end namespace Eigen } // end namespace Eigen
......
...@@ -13,6 +13,16 @@ ...@@ -13,6 +13,16 @@
namespace Eigen { namespace Eigen {
namespace internal {
// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
// This dummy function simply aims at checking that at compile time.
static inline void check_DenseIndex_is_signed() {
EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
}
} // end namespace internal
/** \class DenseBase /** \class DenseBase
* \ingroup Core_Module * \ingroup Core_Module
* *
...@@ -24,37 +34,45 @@ namespace Eigen { ...@@ -24,37 +34,45 @@ namespace Eigen {
* \tparam Derived is the derived type, e.g., a matrix type or an expression. * \tparam Derived is the derived type, e.g., a matrix type or an expression.
* *
* This class can be extended with the help of the plugin mechanism described on the page * This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
* *
* \sa \ref TopicClassHierarchy * \sa \blank \ref TopicClassHierarchy
*/ */
template<typename Derived> class DenseBase template<typename Derived> class DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar, : public DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value>
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
#else #else
: public DenseCoeffsBase<Derived> : public DenseCoeffsBase<Derived,DirectWriteAccessors>
#endif // not EIGEN_PARSED_BY_DOXYGEN #endif // not EIGEN_PARSED_BY_DOXYGEN
{ {
public: public:
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
class InnerIterator; /** Inner iterator type to iterate over the coefficients of a row or column.
* \sa class InnerIterator
*/
typedef Eigen::InnerIterator<Derived> InnerIterator;
typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageKind StorageKind;
/** \brief The type of indices /**
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. * \brief The type used to store indices
* \sa \ref TopicPreprocessorDirectives. * \details This typedef is relevant for types that store multiple indices such as
*/ * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index
typedef typename internal::traits<Derived>::Index Index; * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.
*/
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */
typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
*
* It is an alias for the Scalar type */
typedef Scalar value_type;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> Base;
typedef DenseCoeffsBase<Derived> Base;
using Base::derived; using Base::derived;
using Base::const_cast_derived; using Base::const_cast_derived;
using Base::rows; using Base::rows;
...@@ -64,16 +82,6 @@ template<typename Derived> class DenseBase ...@@ -64,16 +82,6 @@ template<typename Derived> class DenseBase
using Base::colIndexByOuterInner; using Base::colIndexByOuterInner;
using Base::coeff; using Base::coeff;
using Base::coeffByOuterInner; using Base::coeffByOuterInner;
using Base::packet;
using Base::packetByOuterInner;
using Base::writePacket;
using Base::writePacketByOuterInner;
using Base::coeffRef;
using Base::coeffRefByOuterInner;
using Base::copyCoeff;
using Base::copyCoeffByOuterInner;
using Base::copyPacket;
using Base::copyPacketByOuterInner;
using Base::operator(); using Base::operator();
using Base::operator[]; using Base::operator[];
using Base::x; using Base::x;
...@@ -159,30 +167,54 @@ template<typename Derived> class DenseBase ...@@ -159,30 +167,54 @@ template<typename Derived> class DenseBase
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
: int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
/**< This is a rough measure of how expensive it is to read one coefficient from
* this expression.
*/
InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret, InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
}; };
typedef typename internal::find_best_packet<Scalar,SizeAtCompileTime>::type PacketScalar;
enum { ThisConstantIsPrivateInPlainObjectBase }; enum { IsPlainObjectBase = 0 };
/** The plain matrix type corresponding to this expression.
* \sa PlainObject */
typedef Matrix<typename internal::traits<Derived>::Scalar,
internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime,
AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime
> PlainMatrix;
/** The plain array type corresponding to this expression.
* \sa PlainObject */
typedef Array<typename internal::traits<Derived>::Scalar,
internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime,
AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime
> PlainArray;
/** \brief The plain matrix or array type corresponding to this expression.
*
* This is not necessarily exactly the return type of eval(). In the case of plain matrices,
* the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
* that the return type of eval() is either PlainObject or const PlainObject&.
*/
typedef typename internal::conditional<internal::is_same<typename internal::traits<Derived>::XprKind,MatrixXpr >::value,
PlainMatrix, PlainArray>::type PlainObject;
/** \returns the number of nonzero coefficients which is in practice the number /** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */ * of stored coefficients. */
EIGEN_DEVICE_FUNC
inline Index nonZeros() const { return size(); } inline Index nonZeros() const { return size(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
* \sa rows(), cols(), IsVectorAtCompileTime. */
/** \returns the outer size. /** \returns the outer size.
* *
* \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
* column-major matrix, and the number of rows for a row-major matrix. */ * column-major matrix, and the number of rows for a row-major matrix. */
EIGEN_DEVICE_FUNC
Index outerSize() const Index outerSize() const
{ {
return IsVectorAtCompileTime ? 1 return IsVectorAtCompileTime ? 1
...@@ -194,6 +226,7 @@ template<typename Derived> class DenseBase ...@@ -194,6 +226,7 @@ template<typename Derived> class DenseBase
* \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
* column-major matrix, and the number of columns for a row-major matrix. */ * column-major matrix, and the number of columns for a row-major matrix. */
EIGEN_DEVICE_FUNC
Index innerSize() const Index innerSize() const
{ {
return IsVectorAtCompileTime ? this->size() return IsVectorAtCompileTime ? this->size()
...@@ -204,16 +237,18 @@ template<typename Derived> class DenseBase ...@@ -204,16 +237,18 @@ template<typename Derived> class DenseBase
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else. * nothing else.
*/ */
void resize(Index size) EIGEN_DEVICE_FUNC
void resize(Index newSize)
{ {
EIGEN_ONLY_USED_FOR_DEBUG(size); EIGEN_ONLY_USED_FOR_DEBUG(newSize);
eigen_assert(size == this->size() eigen_assert(newSize == this->size()
&& "DenseBase::resize() does not actually allow to resize."); && "DenseBase::resize() does not actually allow to resize.");
} }
/** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else. * nothing else.
*/ */
EIGEN_DEVICE_FUNC
void resize(Index rows, Index cols) void resize(Index rows, Index cols)
{ {
EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(rows);
...@@ -223,13 +258,12 @@ template<typename Derived> class DenseBase ...@@ -223,13 +258,12 @@ template<typename Derived> class DenseBase
} }
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal Represents a matrix with all coefficients equal to one another*/ /** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType; typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;
/** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */ /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
typedef CwiseNullaryOp<internal::linspaced_op<Scalar,false>,Derived> SequentialLinSpacedReturnType; typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType;
/** \internal Represents a vector with linearly spaced coefficients that allows random access. */ /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
typedef CwiseNullaryOp<internal::linspaced_op<Scalar,true>,Derived> RandomAccessLinSpacedReturnType; typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType;
/** \internal the return type of MatrixBase::eigenvalues() */ /** \internal the return type of MatrixBase::eigenvalues() */
typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType; typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
...@@ -237,138 +271,133 @@ template<typename Derived> class DenseBase ...@@ -237,138 +271,133 @@ template<typename Derived> class DenseBase
/** Copies \a other into *this. \returns a reference to *this. */ /** Copies \a other into *this. \returns a reference to *this. */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator=(const DenseBase<OtherDerived>& other); Derived& operator=(const DenseBase<OtherDerived>& other);
/** Special case of the template operator=, in order to prevent the compiler /** Special case of the template operator=, in order to prevent the compiler
* from generating a default operator= (issue hit with g++ 4.1) * from generating a default operator= (issue hit with g++ 4.1)
*/ */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator=(const DenseBase& other); Derived& operator=(const DenseBase& other);
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& operator=(const EigenBase<OtherDerived> &other); Derived& operator=(const EigenBase<OtherDerived> &other);
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& operator+=(const EigenBase<OtherDerived> &other); Derived& operator+=(const EigenBase<OtherDerived> &other);
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& operator-=(const EigenBase<OtherDerived> &other); Derived& operator-=(const EigenBase<OtherDerived> &other);
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& operator=(const ReturnByValue<OtherDerived>& func); Derived& operator=(const ReturnByValue<OtherDerived>& func);
#ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal
/** Copies \a other into *this without evaluating other. \returns a reference to *this. */ * Copies \a other into *this without evaluating other. \returns a reference to *this.
* \deprecated */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& lazyAssign(const DenseBase<OtherDerived>& other); Derived& lazyAssign(const DenseBase<OtherDerived>& other);
#endif // not EIGEN_PARSED_BY_DOXYGEN
EIGEN_DEVICE_FUNC
CommaInitializer<Derived> operator<< (const Scalar& s); CommaInitializer<Derived> operator<< (const Scalar& s);
/** \deprecated it now returns \c *this */
template<unsigned int Added,unsigned int Removed> template<unsigned int Added,unsigned int Removed>
const Flagged<Derived, Added, Removed> flagged() const; EIGEN_DEPRECATED
const Derived& flagged() const
{ return derived(); }
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other); CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
Eigen::Transpose<Derived> transpose(); typedef Transpose<Derived> TransposeReturnType;
typedef const Transpose<const Derived> ConstTransposeReturnType; EIGEN_DEVICE_FUNC
TransposeReturnType transpose();
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
EIGEN_DEVICE_FUNC
ConstTransposeReturnType transpose() const; ConstTransposeReturnType transpose() const;
EIGEN_DEVICE_FUNC
void transposeInPlace(); void transposeInPlace();
#ifndef EIGEN_NO_DEBUG
protected:
template<typename OtherDerived>
void checkTransposeAliasing(const OtherDerived& other) const;
public:
#endif
typedef VectorBlock<Derived> SegmentReturnType;
typedef const VectorBlock<const Derived> ConstSegmentReturnType;
template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
// Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
SegmentReturnType segment(Index start, Index size);
typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const;
SegmentReturnType head(Index size);
typename DenseBase::ConstSegmentReturnType head(Index size) const;
SegmentReturnType tail(Index size);
typename DenseBase::ConstSegmentReturnType tail(Index size) const;
template<int Size> typename FixedSegmentReturnType<Size>::Type head();
template<int Size> typename ConstFixedSegmentReturnType<Size>::Type head() const;
template<int Size> typename FixedSegmentReturnType<Size>::Type tail();
template<int Size> typename ConstFixedSegmentReturnType<Size>::Type tail() const;
template<int Size> typename FixedSegmentReturnType<Size>::Type segment(Index start); EIGEN_DEVICE_FUNC static const ConstantReturnType
template<int Size> typename ConstFixedSegmentReturnType<Size>::Type segment(Index start) const;
static const ConstantReturnType
Constant(Index rows, Index cols, const Scalar& value); Constant(Index rows, Index cols, const Scalar& value);
static const ConstantReturnType EIGEN_DEVICE_FUNC static const ConstantReturnType
Constant(Index size, const Scalar& value); Constant(Index size, const Scalar& value);
static const ConstantReturnType EIGEN_DEVICE_FUNC static const ConstantReturnType
Constant(const Scalar& value); Constant(const Scalar& value);
static const SequentialLinSpacedReturnType EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType
LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);
static const RandomAccessLinSpacedReturnType EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType
LinSpaced(Index size, const Scalar& low, const Scalar& high); LinSpaced(Index size, const Scalar& low, const Scalar& high);
static const SequentialLinSpacedReturnType EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType
LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
static const RandomAccessLinSpacedReturnType EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType
LinSpaced(const Scalar& low, const Scalar& high); LinSpaced(const Scalar& low, const Scalar& high);
template<typename CustomNullaryOp> template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC
static const CwiseNullaryOp<CustomNullaryOp, Derived> static const CwiseNullaryOp<CustomNullaryOp, PlainObject>
NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
template<typename CustomNullaryOp> template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC
static const CwiseNullaryOp<CustomNullaryOp, Derived> static const CwiseNullaryOp<CustomNullaryOp, PlainObject>
NullaryExpr(Index size, const CustomNullaryOp& func); NullaryExpr(Index size, const CustomNullaryOp& func);
template<typename CustomNullaryOp> template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC
static const CwiseNullaryOp<CustomNullaryOp, Derived> static const CwiseNullaryOp<CustomNullaryOp, PlainObject>
NullaryExpr(const CustomNullaryOp& func); NullaryExpr(const CustomNullaryOp& func);
static const ConstantReturnType Zero(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols);
static const ConstantReturnType Zero(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size);
static const ConstantReturnType Zero(); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero();
static const ConstantReturnType Ones(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols);
static const ConstantReturnType Ones(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size);
static const ConstantReturnType Ones(); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones();
void fill(const Scalar& value); EIGEN_DEVICE_FUNC void fill(const Scalar& value);
Derived& setConstant(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value);
Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
Derived& setLinSpaced(const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high);
Derived& setZero(); EIGEN_DEVICE_FUNC Derived& setZero();
Derived& setOnes(); EIGEN_DEVICE_FUNC Derived& setOnes();
Derived& setRandom(); EIGEN_DEVICE_FUNC Derived& setRandom();
template<typename OtherDerived> template<typename OtherDerived> EIGEN_DEVICE_FUNC
bool isApprox(const DenseBase<OtherDerived>& other, bool isApprox(const DenseBase<OtherDerived>& other,
RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
EIGEN_DEVICE_FUNC
bool isMuchSmallerThan(const RealScalar& other, bool isMuchSmallerThan(const RealScalar& other,
RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived> template<typename OtherDerived> EIGEN_DEVICE_FUNC
bool isMuchSmallerThan(const DenseBase<OtherDerived>& other, bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
bool isConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
inline bool hasNaN() const;
inline bool allFinite() const;
inline Derived& operator*=(const Scalar& other); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
inline Derived& operator/=(const Scalar& other); Derived& operator*=(const Scalar& other);
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator/=(const Scalar& other);
typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType; typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType;
/** \returns the matrix or vector obtained by evaluating this expression. /** \returns the matrix or vector obtained by evaluating this expression.
* *
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy. * a const reference, in order to avoid a useless copy.
*
* \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE EvalReturnType eval() const EIGEN_STRONG_INLINE EvalReturnType eval() const
{ {
// Even though MSVC does not honor strong inlining when the return type // Even though MSVC does not honor strong inlining when the return type
...@@ -376,61 +405,78 @@ template<typename Derived> class DenseBase ...@@ -376,61 +405,78 @@ template<typename Derived> class DenseBase
// size types on MSVC. // size types on MSVC.
return typename internal::eval<Derived>::type(derived()); return typename internal::eval<Derived>::type(derived());
} }
/** swaps *this with the expression \a other. /** swaps *this with the expression \a other.
* *
*/ */
template<typename OtherDerived> template<typename OtherDerived>
void swap(const DenseBase<OtherDerived>& other, EIGEN_DEVICE_FUNC
int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase) void swap(const DenseBase<OtherDerived>& other)
{ {
SwapWrapper<Derived>(derived()).lazyAssign(other.derived()); EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
eigen_assert(rows()==other.rows() && cols()==other.cols());
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
} }
/** swaps *this with the matrix or array \a other. /** swaps *this with the matrix or array \a other.
* *
*/ */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
void swap(PlainObjectBase<OtherDerived>& other) void swap(PlainObjectBase<OtherDerived>& other)
{ {
SwapWrapper<Derived>(derived()).lazyAssign(other.derived()); eigen_assert(rows()==other.rows() && cols()==other.cols());
call_assignment(derived(), other.derived(), internal::swap_assign_op<Scalar>());
} }
EIGEN_DEVICE_FUNC inline const NestByValue<Derived> nestByValue() const;
EIGEN_DEVICE_FUNC inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
EIGEN_DEVICE_FUNC inline ForceAlignedAccess<Derived> forceAlignedAccess();
template<bool Enable> EIGEN_DEVICE_FUNC
inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;
template<bool Enable> EIGEN_DEVICE_FUNC
inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
inline const NestByValue<Derived> nestByValue() const; EIGEN_DEVICE_FUNC Scalar sum() const;
inline const ForceAlignedAccess<Derived> forceAlignedAccess() const; EIGEN_DEVICE_FUNC Scalar mean() const;
inline ForceAlignedAccess<Derived> forceAlignedAccess(); EIGEN_DEVICE_FUNC Scalar trace() const;
template<bool Enable> inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;
template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
Scalar sum() const;
Scalar mean() const;
Scalar trace() const;
Scalar prod() const; EIGEN_DEVICE_FUNC Scalar prod() const;
typename internal::traits<Derived>::Scalar minCoeff() const; EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const;
typename internal::traits<Derived>::Scalar maxCoeff() const; EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const;
template<typename IndexType> template<typename IndexType> EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const; typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
template<typename IndexType> template<typename IndexType> EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const; typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
template<typename IndexType> template<typename IndexType> EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const; typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
template<typename IndexType> template<typename IndexType> EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const; typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
template<typename BinaryOp> template<typename BinaryOp>
typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type EIGEN_DEVICE_FUNC
redux(const BinaryOp& func) const; Scalar redux(const BinaryOp& func) const;
template<typename Visitor> template<typename Visitor>
EIGEN_DEVICE_FUNC
void visit(Visitor& func) const; void visit(Visitor& func) const;
inline const WithFormat<Derived> format(const IOFormat& fmt) const; /** \returns a WithFormat proxy object allowing to print a matrix the with given
* format \a fmt.
*
* See class IOFormat for some examples.
*
* \sa class IOFormat, class WithFormat
*/
inline const WithFormat<Derived> format(const IOFormat& fmt) const
{
return WithFormat<Derived>(derived(), fmt);
}
/** \returns the unique coefficient of a 1x1 expression */ /** \returns the unique coefficient of a 1x1 expression */
EIGEN_DEVICE_FUNC
CoeffReturnType value() const CoeffReturnType value() const
{ {
EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
...@@ -438,25 +484,44 @@ template<typename Derived> class DenseBase ...@@ -438,25 +484,44 @@ template<typename Derived> class DenseBase
return derived().coeff(0,0); return derived().coeff(0,0);
} }
/////////// Array module /////////// EIGEN_DEVICE_FUNC bool all() const;
EIGEN_DEVICE_FUNC bool any() const;
bool all(void) const; EIGEN_DEVICE_FUNC Index count() const;
bool any(void) const;
Index count() const;
typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType; typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType; typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType; typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType; typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
ConstRowwiseReturnType rowwise() const; /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
RowwiseReturnType rowwise(); *
ConstColwiseReturnType colwise() const; * Example: \include MatrixBase_rowwise.cpp
ColwiseReturnType colwise(); * Output: \verbinclude MatrixBase_rowwise.out
*
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
//Code moved here due to a CUDA compiler bug
EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const {
return ConstRowwiseReturnType(derived());
}
EIGEN_DEVICE_FUNC RowwiseReturnType rowwise();
/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
*
* Example: \include MatrixBase_colwise.cpp
* Output: \verbinclude MatrixBase_colwise.out
*
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const {
return ConstColwiseReturnType(derived());
}
EIGEN_DEVICE_FUNC ColwiseReturnType colwise();
static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols); typedef CwiseNullaryOp<internal::scalar_random_op<Scalar>,PlainObject> RandomReturnType;
static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index size); static const RandomReturnType Random(Index rows, Index cols);
static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(); static const RandomReturnType Random(Index size);
static const RandomReturnType Random();
template<typename ThenDerived,typename ElseDerived> template<typename ThenDerived,typename ElseDerived>
const Select<Derived,ThenDerived,ElseDerived> const Select<Derived,ThenDerived,ElseDerived>
...@@ -465,55 +530,69 @@ template<typename Derived> class DenseBase ...@@ -465,55 +530,69 @@ template<typename Derived> class DenseBase
template<typename ThenDerived> template<typename ThenDerived>
inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType> inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
select(const DenseBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const; select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const;
template<typename ElseDerived> template<typename ElseDerived>
inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived > inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
select(typename ElseDerived::Scalar thenScalar, const DenseBase<ElseDerived>& elseMatrix) const; select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
template<int p> RealScalar lpNorm() const; template<int p> RealScalar lpNorm() const;
template<int RowFactor, int ColFactor> template<int RowFactor, int ColFactor>
EIGEN_DEVICE_FUNC
const Replicate<Derived,RowFactor,ColFactor> replicate() const; const Replicate<Derived,RowFactor,ColFactor> replicate() const;
const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const; /**
* \return an expression of the replication of \c *this
*
* Example: \include MatrixBase_replicate_int_int.cpp
* Output: \verbinclude MatrixBase_replicate_int_int.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
*/
//Code moved here due to a CUDA compiler bug
EIGEN_DEVICE_FUNC
const Replicate<Derived, Dynamic, Dynamic> replicate(Index rowFactor, Index colFactor) const
{
return Replicate<Derived, Dynamic, Dynamic>(derived(), rowFactor, colFactor);
}
typedef Reverse<Derived, BothDirections> ReverseReturnType; typedef Reverse<Derived, BothDirections> ReverseReturnType;
typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType; typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
ReverseReturnType reverse(); EIGEN_DEVICE_FUNC ReverseReturnType reverse();
ConstReverseReturnType reverse() const; /** This is the const version of reverse(). */
void reverseInPlace(); //Code moved here due to a CUDA compiler bug
EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const
{
return ConstReverseReturnType(derived());
}
EIGEN_DEVICE_FUNC void reverseInPlace();
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
# include "../plugins/BlockMethods.h" # include "../plugins/BlockMethods.h"
# ifdef EIGEN_DENSEBASE_PLUGIN # ifdef EIGEN_DENSEBASE_PLUGIN
# include EIGEN_DENSEBASE_PLUGIN # include EIGEN_DENSEBASE_PLUGIN
# endif # endif
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#ifdef EIGEN2_SUPPORT #undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
Block<Derived> corner(CornerType type, Index cRows, Index cCols);
const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
template<int CRows, int CCols>
Block<Derived, CRows, CCols> corner(CornerType type);
template<int CRows, int CCols>
const Block<Derived, CRows, CCols> corner(CornerType type) const;
#endif // EIGEN2_SUPPORT
// disable the use of evalTo for dense objects with a nice compilation error // disable the use of evalTo for dense objects with a nice compilation error
template<typename Dest> inline void evalTo(Dest& ) const template<typename Dest>
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& ) const
{ {
EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
} }
protected: protected:
EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase)
/** Default constructor. Do nothing. */ /** Default constructor. Do nothing. */
DenseBase() EIGEN_DEVICE_FUNC DenseBase()
{ {
/* Just checks for self-consistency of the flags. /* Just checks for self-consistency of the flags.
* Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down
*/ */
#ifdef EIGEN_INTERNAL_DEBUGGING #ifdef EIGEN_INTERNAL_DEBUGGING
EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
...@@ -523,9 +602,9 @@ template<typename Derived> class DenseBase ...@@ -523,9 +602,9 @@ template<typename Derived> class DenseBase
} }
private: private:
explicit DenseBase(int); EIGEN_DEVICE_FUNC explicit DenseBase(int);
DenseBase(int,int); EIGEN_DEVICE_FUNC DenseBase(int,int);
template<typename OtherDerived> explicit DenseBase(const DenseBase<OtherDerived>&); template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase<OtherDerived>&);
}; };
} // end namespace Eigen } // end namespace Eigen
......
...@@ -35,7 +35,6 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -35,7 +35,6 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
{ {
public: public:
typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar;
...@@ -61,6 +60,7 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -61,6 +60,7 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
using Base::size; using Base::size;
using Base::derived; using Base::derived;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
{ {
return int(Derived::RowsAtCompileTime) == 1 ? 0 return int(Derived::RowsAtCompileTime) == 1 ? 0
...@@ -69,6 +69,7 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -69,6 +69,7 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
: inner; : inner;
} }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
{ {
return int(Derived::ColsAtCompileTime) == 1 ? 0 return int(Derived::ColsAtCompileTime) == 1 ? 0
...@@ -91,13 +92,15 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -91,13 +92,15 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
* *
* \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
{ {
eigen_internal_assert(row >= 0 && row < rows() eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
return derived().coeff(row, col); return internal::evaluator<Derived>(derived()).coeff(row,col);
} }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
{ {
return coeff(rowIndexByOuterInner(outer, inner), return coeff(rowIndexByOuterInner(outer, inner),
...@@ -108,11 +111,12 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -108,11 +111,12 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
* *
* \sa operator()(Index,Index), operator[](Index) * \sa operator()(Index,Index), operator[](Index)
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const
{ {
eigen_assert(row >= 0 && row < rows() eigen_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
return derived().coeff(row, col); return coeff(row, col);
} }
/** Short version: don't use this function, use /** Short version: don't use this function, use
...@@ -130,11 +134,14 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -130,11 +134,14 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
* \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
coeff(Index index) const coeff(Index index) const
{ {
EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
eigen_internal_assert(index >= 0 && index < size()); eigen_internal_assert(index >= 0 && index < size());
return derived().coeff(index); return internal::evaluator<Derived>(derived()).coeff(index);
} }
...@@ -146,15 +153,14 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -146,15 +153,14 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
* z() const, w() const * z() const, w() const
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
operator[](Index index) const operator[](Index index) const
{ {
#ifndef EIGEN2_SUPPORT
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
#endif
eigen_assert(index >= 0 && index < size()); eigen_assert(index >= 0 && index < size());
return derived().coeff(index); return coeff(index);
} }
/** \returns the coefficient at given index. /** \returns the coefficient at given index.
...@@ -167,32 +173,49 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -167,32 +173,49 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
* z() const, w() const * z() const, w() const
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
operator()(Index index) const operator()(Index index) const
{ {
eigen_assert(index >= 0 && index < size()); eigen_assert(index >= 0 && index < size());
return derived().coeff(index); return coeff(index);
} }
/** equivalent to operator[](0). */ /** equivalent to operator[](0). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
x() const { return (*this)[0]; } x() const { return (*this)[0]; }
/** equivalent to operator[](1). */ /** equivalent to operator[](1). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
y() const { return (*this)[1]; } y() const
{
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS);
return (*this)[1];
}
/** equivalent to operator[](2). */ /** equivalent to operator[](2). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
z() const { return (*this)[2]; } z() const
{
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS);
return (*this)[2];
}
/** equivalent to operator[](3). */ /** equivalent to operator[](3). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CoeffReturnType EIGEN_STRONG_INLINE CoeffReturnType
w() const { return (*this)[3]; } w() const
{
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS);
return (*this)[3];
}
/** \internal /** \internal
* \returns the packet of coefficients starting at the given row and column. It is your responsibility * \returns the packet of coefficients starting at the given row and column. It is your responsibility
...@@ -207,9 +230,9 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -207,9 +230,9 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const
{ {
eigen_internal_assert(row >= 0 && row < rows() typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;
&& col >= 0 && col < cols()); eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
return derived().template packet<LoadMode>(row,col); return internal::evaluator<Derived>(derived()).template packet<LoadMode,DefaultPacketType>(row,col);
} }
...@@ -234,8 +257,11 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> ...@@ -234,8 +257,11 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{ {
EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;
eigen_internal_assert(index >= 0 && index < size()); eigen_internal_assert(index >= 0 && index < size());
return derived().template packet<LoadMode>(index); return internal::evaluator<Derived>(derived()).template packet<LoadMode,DefaultPacketType>(index);
} }
protected: protected:
...@@ -278,7 +304,6 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -278,7 +304,6 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base; typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
...@@ -311,13 +336,15 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -311,13 +336,15 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
* *
* \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{ {
eigen_internal_assert(row >= 0 && row < rows() eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
return derived().coeffRef(row, col); return internal::evaluator<Derived>(derived()).coeffRef(row,col);
} }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
coeffRefByOuterInner(Index outer, Index inner) coeffRefByOuterInner(Index outer, Index inner)
{ {
...@@ -330,12 +357,13 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -330,12 +357,13 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
* \sa operator[](Index) * \sa operator[](Index)
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
operator()(Index row, Index col) operator()(Index row, Index col)
{ {
eigen_assert(row >= 0 && row < rows() eigen_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
return derived().coeffRef(row, col); return coeffRef(row, col);
} }
...@@ -354,11 +382,14 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -354,11 +382,14 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
* \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
coeffRef(Index index) coeffRef(Index index)
{ {
EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
eigen_internal_assert(index >= 0 && index < size()); eigen_internal_assert(index >= 0 && index < size());
return derived().coeffRef(index); return internal::evaluator<Derived>(derived()).coeffRef(index);
} }
/** \returns a reference to the coefficient at given index. /** \returns a reference to the coefficient at given index.
...@@ -368,15 +399,14 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -368,15 +399,14 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
* \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
operator[](Index index) operator[](Index index)
{ {
#ifndef EIGEN2_SUPPORT
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
#endif
eigen_assert(index >= 0 && index < size()); eigen_assert(index >= 0 && index < size());
return derived().coeffRef(index); return coeffRef(index);
} }
/** \returns a reference to the coefficient at given index. /** \returns a reference to the coefficient at given index.
...@@ -388,167 +418,49 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -388,167 +418,49 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
* \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
operator()(Index index) operator()(Index index)
{ {
eigen_assert(index >= 0 && index < size()); eigen_assert(index >= 0 && index < size());
return derived().coeffRef(index); return coeffRef(index);
} }
/** equivalent to operator[](0). */ /** equivalent to operator[](0). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
x() { return (*this)[0]; } x() { return (*this)[0]; }
/** equivalent to operator[](1). */ /** equivalent to operator[](1). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
y() { return (*this)[1]; } y()
/** equivalent to operator[](2). */
EIGEN_STRONG_INLINE Scalar&
z() { return (*this)[2]; }
/** equivalent to operator[](3). */
EIGEN_STRONG_INLINE Scalar&
w() { return (*this)[3]; }
/** \internal
* Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
* to ensure that a packet really starts there. This method is only available on expressions having the
* PacketAccessBit.
*
* The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
* the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
* starting at an address which is a multiple of the packet size.
*/
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket
(Index row, Index col, const typename internal::packet_traits<Scalar>::type& x)
{ {
eigen_internal_assert(row >= 0 && row < rows() EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS);
&& col >= 0 && col < cols()); return (*this)[1];
derived().template writePacket<StoreMode>(row,col,x);
} }
/** equivalent to operator[](2). */
/** \internal */ EIGEN_DEVICE_FUNC
template<int StoreMode> EIGEN_STRONG_INLINE Scalar&
EIGEN_STRONG_INLINE void writePacketByOuterInner z()
(Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& x)
{
writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner),
x);
}
/** \internal
* Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
* to ensure that a packet really starts there. This method is only available on expressions having the
* PacketAccessBit and the LinearAccessBit.
*
* The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
* the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
* starting at an address which is a multiple of the packet size.
*/
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket
(Index index, const typename internal::packet_traits<Scalar>::type& x)
{
eigen_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index,x);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal Copies the coefficient at position (row,col) of other into *this.
*
* This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
* with usual assignments.
*
* Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
*/
template<typename OtherDerived>
EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
derived().coeffRef(row, col) = other.derived().coeff(row, col);
}
/** \internal Copies the coefficient at the given index of other into *this.
*
* This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
* with usual assignments.
*
* Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
*/
template<typename OtherDerived>
EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
eigen_internal_assert(index >= 0 && index < size());
derived().coeffRef(index) = other.derived().coeff(index);
}
template<typename OtherDerived>
EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
{
const Index row = rowIndexByOuterInner(outer,inner);
const Index col = colIndexByOuterInner(outer,inner);
// derived() is important here: copyCoeff() may be reimplemented in Derived!
derived().copyCoeff(row, col, other);
}
/** \internal Copies the packet at position (row,col) of other into *this.
*
* This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
* with usual assignments.
*
* Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
*/
template<typename OtherDerived, int StoreMode, int LoadMode>
EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
eigen_internal_assert(row >= 0 && row < rows() EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS);
&& col >= 0 && col < cols()); return (*this)[2];
derived().template writePacket<StoreMode>(row, col,
other.derived().template packet<LoadMode>(row, col));
} }
/** \internal Copies the packet at the given index of other into *this. /** equivalent to operator[](3). */
*
* This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
* with usual assignments.
*
* Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
*/
template<typename OtherDerived, int StoreMode, int LoadMode>
EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
eigen_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index,
other.derived().template packet<LoadMode>(index));
}
/** \internal */ EIGEN_DEVICE_FUNC
template<typename OtherDerived, int StoreMode, int LoadMode> EIGEN_STRONG_INLINE Scalar&
EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other) w()
{ {
const Index row = rowIndexByOuterInner(outer,inner); EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS);
const Index col = colIndexByOuterInner(outer,inner); return (*this)[3];
// derived() is important here: copyCoeff() may be reimplemented in Derived!
derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
} }
#endif
}; };
/** \brief Base class providing direct read-only coefficient access to matrices and arrays. /** \brief Base class providing direct read-only coefficient access to matrices and arrays.
...@@ -560,7 +472,7 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ...@@ -560,7 +472,7 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
* inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
* \c operator() . * \c operator() .
* *
* \sa \ref TopicClassHierarchy * \sa \blank \ref TopicClassHierarchy
*/ */
template<typename Derived> template<typename Derived>
class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
...@@ -568,7 +480,6 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived ...@@ -568,7 +480,6 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
public: public:
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base; typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
...@@ -581,6 +492,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived ...@@ -581,6 +492,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
* *
* \sa outerStride(), rowStride(), colStride() * \sa outerStride(), rowStride(), colStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index innerStride() const inline Index innerStride() const
{ {
return derived().innerStride(); return derived().innerStride();
...@@ -591,6 +503,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived ...@@ -591,6 +503,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
* *
* \sa innerStride(), rowStride(), colStride() * \sa innerStride(), rowStride(), colStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index outerStride() const inline Index outerStride() const
{ {
return derived().outerStride(); return derived().outerStride();
...@@ -606,6 +519,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived ...@@ -606,6 +519,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
* *
* \sa innerStride(), outerStride(), colStride() * \sa innerStride(), outerStride(), colStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index rowStride() const inline Index rowStride() const
{ {
return Derived::IsRowMajor ? outerStride() : innerStride(); return Derived::IsRowMajor ? outerStride() : innerStride();
...@@ -615,6 +529,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived ...@@ -615,6 +529,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
* *
* \sa innerStride(), outerStride(), rowStride() * \sa innerStride(), outerStride(), rowStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index colStride() const inline Index colStride() const
{ {
return Derived::IsRowMajor ? innerStride() : outerStride(); return Derived::IsRowMajor ? innerStride() : outerStride();
...@@ -630,7 +545,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived ...@@ -630,7 +545,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
* inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
* \c operator(). * \c operator().
* *
* \sa \ref TopicClassHierarchy * \sa \blank \ref TopicClassHierarchy
*/ */
template<typename Derived> template<typename Derived>
class DenseCoeffsBase<Derived, DirectWriteAccessors> class DenseCoeffsBase<Derived, DirectWriteAccessors>
...@@ -639,7 +554,6 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors> ...@@ -639,7 +554,6 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
public: public:
typedef DenseCoeffsBase<Derived, WriteAccessors> Base; typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
...@@ -652,6 +566,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors> ...@@ -652,6 +566,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
* *
* \sa outerStride(), rowStride(), colStride() * \sa outerStride(), rowStride(), colStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index innerStride() const inline Index innerStride() const
{ {
return derived().innerStride(); return derived().innerStride();
...@@ -662,6 +577,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors> ...@@ -662,6 +577,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
* *
* \sa innerStride(), rowStride(), colStride() * \sa innerStride(), rowStride(), colStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index outerStride() const inline Index outerStride() const
{ {
return derived().outerStride(); return derived().outerStride();
...@@ -677,6 +593,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors> ...@@ -677,6 +593,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
* *
* \sa innerStride(), outerStride(), colStride() * \sa innerStride(), outerStride(), colStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index rowStride() const inline Index rowStride() const
{ {
return Derived::IsRowMajor ? outerStride() : innerStride(); return Derived::IsRowMajor ? outerStride() : innerStride();
...@@ -686,6 +603,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors> ...@@ -686,6 +603,7 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
* *
* \sa innerStride(), outerStride(), rowStride() * \sa innerStride(), outerStride(), rowStride()
*/ */
EIGEN_DEVICE_FUNC
inline Index colStride() const inline Index colStride() const
{ {
return Derived::IsRowMajor ? innerStride() : outerStride(); return Derived::IsRowMajor ? innerStride() : outerStride();
...@@ -694,33 +612,42 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors> ...@@ -694,33 +612,42 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
namespace internal { namespace internal {
template<typename Derived, bool JustReturnZero> template<int Alignment, typename Derived, bool JustReturnZero>
struct first_aligned_impl struct first_aligned_impl
{ {
static inline typename Derived::Index run(const Derived&) static inline Index run(const Derived&)
{ return 0; } { return 0; }
}; };
template<typename Derived> template<int Alignment, typename Derived>
struct first_aligned_impl<Derived, false> struct first_aligned_impl<Alignment, Derived, false>
{ {
static inline typename Derived::Index run(const Derived& m) static inline Index run(const Derived& m)
{ {
return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); return internal::first_aligned<Alignment>(m.data(), m.size());
} }
}; };
/** \internal \returns the index of the first element of the array that is well aligned for vectorization. /** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect to \a Alignment for vectorization.
*
* \tparam Alignment requested alignment in Bytes.
* *
* There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
* documentation. * documentation.
*/ */
template<int Alignment, typename Derived>
static inline Index first_aligned(const DenseBase<Derived>& m)
{
enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) };
return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived());
}
template<typename Derived> template<typename Derived>
static inline typename Derived::Index first_aligned(const Derived& m) static inline Index first_default_aligned(const DenseBase<Derived>& m)
{ {
return first_aligned_impl typedef typename Derived::Scalar Scalar;
<Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)> typedef typename packet_traits<Scalar>::type DefaultPacketType;
::run(m); return internal::first_aligned<int(unpacket_traits<DefaultPacketType>::alignment),Derived>(m);
} }
template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret> template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
// //
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com> // Copyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com>
// //
// This Source Code Form is subject to the terms of the Mozilla // This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed // Public License v. 2.0. If a copy of the MPL was not distributed
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
#define EIGEN_MATRIXSTORAGE_H #define EIGEN_MATRIXSTORAGE_H
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN; #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) X; EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
#else #else
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X)
#endif #endif
namespace Eigen { namespace Eigen {
...@@ -24,19 +24,38 @@ namespace internal { ...@@ -24,19 +24,38 @@ namespace internal {
struct constructor_without_unaligned_array_assert {}; struct constructor_without_unaligned_array_assert {};
template<typename T, int Size>
EIGEN_DEVICE_FUNC
void check_static_allocation_size()
{
// if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
#if EIGEN_STACK_ALLOCATION_LIMIT
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
#endif
}
/** \internal /** \internal
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
* to 16 bytes boundary if the total size is a multiple of 16 bytes. * to 16 bytes boundary if the total size is a multiple of 16 bytes.
*/ */
template <typename T, int Size, int MatrixOrArrayOptions, template <typename T, int Size, int MatrixOrArrayOptions,
int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0 int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0
: (((Size*sizeof(T))%16)==0) ? 16 : compute_default_alignment<T,Size>::value >
: 0 >
struct plain_array struct plain_array
{ {
T array[Size]; T array[Size];
plain_array() {}
plain_array(constructor_without_unaligned_array_assert) {} EIGEN_DEVICE_FUNC
plain_array()
{
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
plain_array(constructor_without_unaligned_array_assert)
{
check_static_allocation_size<T,Size>();
}
}; };
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
...@@ -48,32 +67,100 @@ struct plain_array ...@@ -48,32 +67,100 @@ struct plain_array
template<typename PtrType> template<typename PtrType>
EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; } EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; }
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
eigen_assert((reinterpret_cast<size_t>(eigen_unaligned_array_assert_workaround_gcc47(array)) & sizemask) == 0 \ eigen_assert((internal::UIntPtr(eigen_unaligned_array_assert_workaround_gcc47(array)) & (sizemask)) == 0 \
&& "this assertion is explained here: " \ && "this assertion is explained here: " \
"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
" **** READ THIS WEB PAGE !!! ****"); " **** READ THIS WEB PAGE !!! ****");
#else #else
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
eigen_assert((reinterpret_cast<size_t>(array) & sizemask) == 0 \ eigen_assert((internal::UIntPtr(array) & (sizemask)) == 0 \
&& "this assertion is explained here: " \ && "this assertion is explained here: " \
"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
" **** READ THIS WEB PAGE !!! ****"); " **** READ THIS WEB PAGE !!! ****");
#endif #endif
template <typename T, int Size, int MatrixOrArrayOptions>
struct plain_array<T, Size, MatrixOrArrayOptions, 8>
{
EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size];
EIGEN_DEVICE_FUNC
plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
plain_array(constructor_without_unaligned_array_assert)
{
check_static_allocation_size<T,Size>();
}
};
template <typename T, int Size, int MatrixOrArrayOptions> template <typename T, int Size, int MatrixOrArrayOptions>
struct plain_array<T, Size, MatrixOrArrayOptions, 16> struct plain_array<T, Size, MatrixOrArrayOptions, 16>
{ {
EIGEN_USER_ALIGN16 T array[Size]; EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size];
plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) }
plain_array(constructor_without_unaligned_array_assert) {} EIGEN_DEVICE_FUNC
plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
plain_array(constructor_without_unaligned_array_assert)
{
check_static_allocation_size<T,Size>();
}
};
template <typename T, int Size, int MatrixOrArrayOptions>
struct plain_array<T, Size, MatrixOrArrayOptions, 32>
{
EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size];
EIGEN_DEVICE_FUNC
plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
plain_array(constructor_without_unaligned_array_assert)
{
check_static_allocation_size<T,Size>();
}
};
template <typename T, int Size, int MatrixOrArrayOptions>
struct plain_array<T, Size, MatrixOrArrayOptions, 64>
{
EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size];
EIGEN_DEVICE_FUNC
plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
plain_array(constructor_without_unaligned_array_assert)
{
check_static_allocation_size<T,Size>();
}
}; };
template <typename T, int MatrixOrArrayOptions, int Alignment> template <typename T, int MatrixOrArrayOptions, int Alignment>
struct plain_array<T, 0, MatrixOrArrayOptions, Alignment> struct plain_array<T, 0, MatrixOrArrayOptions, Alignment>
{ {
EIGEN_USER_ALIGN16 T array[1]; T array[1];
plain_array() {} EIGEN_DEVICE_FUNC plain_array() {}
plain_array(constructor_without_unaligned_array_assert) {} EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) {}
}; };
} // end namespace internal } // end namespace internal
...@@ -97,33 +184,54 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt ...@@ -97,33 +184,54 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
{ {
internal::plain_array<T,Size,_Options> m_data; internal::plain_array<T,Size,_Options> m_data;
public: public:
inline explicit DenseStorage() {} EIGEN_DEVICE_FUNC DenseStorage() {
inline DenseStorage(internal::constructor_without_unaligned_array_assert) EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
}
EIGEN_DEVICE_FUNC
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()) {} : m_data(internal::constructor_without_unaligned_array_assert()) {}
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} EIGEN_DEVICE_FUNC
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } DenseStorage(const DenseStorage& other) : m_data(other.m_data) {
static inline DenseIndex rows(void) {return _Rows;} EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
static inline DenseIndex cols(void) {return _Cols;} }
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} EIGEN_DEVICE_FUNC
inline void resize(DenseIndex,DenseIndex,DenseIndex) {} DenseStorage& operator=(const DenseStorage& other)
inline const T *data() const { return m_data.array; } {
inline T *data() { return m_data.array; } if (this != &other) m_data = other.m_data;
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) {
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
eigen_internal_assert(size==rows*cols && rows==_Rows && cols==_Cols);
EIGEN_UNUSED_VARIABLE(size);
EIGEN_UNUSED_VARIABLE(rows);
EIGEN_UNUSED_VARIABLE(cols);
}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
}; };
// null matrix // null matrix
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options> template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
{ {
public: public:
inline explicit DenseStorage() {} EIGEN_DEVICE_FUNC DenseStorage() {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {}
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {}
inline void swap(DenseStorage& ) {} EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; }
static inline DenseIndex rows(void) {return _Rows;} EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}
static inline DenseIndex cols(void) {return _Cols;} EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {}
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
inline void resize(DenseIndex,DenseIndex,DenseIndex) {} EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
inline const T *data() const { return 0; } EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
inline T *data() { return 0; } EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC const T *data() const { return 0; }
EIGEN_DEVICE_FUNC T *data() { return 0; }
}; };
// more specializations for null matrices; these are necessary to resolve ambiguities // more specializations for null matrices; these are necessary to resolve ambiguities
...@@ -140,173 +248,321 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic, ...@@ -140,173 +248,321 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic,
template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options> template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
{ {
internal::plain_array<T,Size,_Options> m_data; internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows; Index m_rows;
DenseIndex m_cols; Index m_cols;
public: public:
inline explicit DenseStorage() : m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
inline void swap(DenseStorage& other) EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
m_data = other.m_data;
m_rows = other.m_rows;
m_cols = other.m_cols;
}
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows(void) const {return m_rows;} EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}
inline DenseIndex cols(void) const {return m_cols;} EIGEN_DEVICE_FUNC Index cols() const {return m_cols;}
inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }
inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }
inline const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
}; };
// dynamic-size matrix with fixed-size storage and fixed width // dynamic-size matrix with fixed-size storage and fixed width
template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options> template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
{ {
internal::plain_array<T,Size,_Options> m_data; internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows; Index m_rows;
public: public:
inline explicit DenseStorage() : m_rows(0) {} EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
inline DenseIndex rows(void) const {return m_rows;} {
inline DenseIndex cols(void) const {return _Cols;} if (this != &other)
inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } {
inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } m_data = other.m_data;
inline const T *data() const { return m_data.array; } m_rows = other.m_rows;
inline T *data() { return m_data.array; } }
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; }
EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) { m_rows = rows; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
}; };
// dynamic-size matrix with fixed-size storage and fixed height // dynamic-size matrix with fixed-size storage and fixed height
template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options> template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
{ {
internal::plain_array<T,Size,_Options> m_data; internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_cols; Index m_cols;
public: public:
inline explicit DenseStorage() : m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
inline DenseIndex rows(void) const {return _Rows;} {
inline DenseIndex cols(void) const {return m_cols;} if (this != &other)
inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } {
inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } m_data = other.m_data;
inline const T *data() const { return m_data.array; } m_cols = other.m_cols;
inline T *data() { return m_data.array; } }
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
void conservativeResize(Index, Index, Index cols) { m_cols = cols; }
void resize(Index, Index, Index cols) { m_cols = cols; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
}; };
// purely dynamic matrix. // purely dynamic matrix.
template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options> template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
{ {
T *m_data; T *m_data;
DenseIndex m_rows; Index m_rows;
DenseIndex m_cols; Index m_cols;
public: public:
inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {} : m_data(0), m_rows(0), m_cols(0) {}
inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols) EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } {
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); } EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
inline void swap(DenseStorage& other) eigen_internal_assert(size==rows*cols && rows>=0 && cols >=0);
}
EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*other.m_cols))
, m_rows(other.m_rows)
, m_cols(other.m_cols)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols)
internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data);
}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
DenseStorage tmp(other);
this->swap(tmp);
}
return *this;
}
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC
DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT
: m_data(std::move(other.m_data))
, m_rows(std::move(other.m_rows))
, m_cols(std::move(other.m_cols))
{
other.m_data = nullptr;
other.m_rows = 0;
other.m_cols = 0;
}
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
using std::swap;
swap(m_data, other.m_data);
swap(m_rows, other.m_rows);
swap(m_cols, other.m_cols);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows(void) const {return m_rows;} EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
inline DenseIndex cols(void) const {return m_cols;} EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols) void conservativeResize(Index size, Index rows, Index cols)
{ {
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols); m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
m_rows = rows; m_rows = rows;
m_cols = cols; m_cols = cols;
} }
void resize(DenseIndex size, DenseIndex rows, DenseIndex cols) EIGEN_DEVICE_FUNC void resize(Index size, Index rows, Index cols)
{ {
if(size != m_rows*m_cols) if(size != m_rows*m_cols)
{ {
internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);
if (size) if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size); m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
else else
m_data = 0; m_data = 0;
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
} }
m_rows = rows; m_rows = rows;
m_cols = cols; m_cols = cols;
} }
inline const T *data() const { return m_data; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; }
inline T *data() { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; }
}; };
// matrix with dynamic width and fixed height (so that matrix has dynamic size). // matrix with dynamic width and fixed height (so that matrix has dynamic size).
template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options> template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
{ {
T *m_data; T *m_data;
DenseIndex m_cols; Index m_cols;
public: public:
inline explicit DenseStorage() : m_data(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols) EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } {
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); } EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } eigen_internal_assert(size==rows*cols && rows==_Rows && cols >=0);
static inline DenseIndex rows(void) {return _Rows;} EIGEN_UNUSED_VARIABLE(rows);
inline DenseIndex cols(void) const {return m_cols;} }
inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols) EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols))
, m_cols(other.m_cols)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*_Rows)
internal::smart_copy(other.m_data, other.m_data+_Rows*m_cols, m_data);
}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
DenseStorage tmp(other);
this->swap(tmp);
}
return *this;
}
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC
DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT
: m_data(std::move(other.m_data))
, m_cols(std::move(other.m_cols))
{
other.m_data = nullptr;
other.m_cols = 0;
}
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
using std::swap;
swap(m_data, other.m_data);
swap(m_cols, other.m_cols);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols)
{ {
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols); m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
m_cols = cols; m_cols = cols;
} }
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index, Index cols)
{ {
if(size != _Rows*m_cols) if(size != _Rows*m_cols)
{ {
internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);
if (size) if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size); m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
else else
m_data = 0; m_data = 0;
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
} }
m_cols = cols; m_cols = cols;
} }
inline const T *data() const { return m_data; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; }
inline T *data() { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; }
}; };
// matrix with dynamic height and fixed width (so that matrix has dynamic size). // matrix with dynamic height and fixed width (so that matrix has dynamic size).
template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options> template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
{ {
T *m_data; T *m_data;
DenseIndex m_rows; Index m_rows;
public: public:
inline explicit DenseStorage() : m_data(0), m_rows(0) {} EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows) EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } {
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); } EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } eigen_internal_assert(size==rows*cols && rows>=0 && cols == _Cols);
inline DenseIndex rows(void) const {return m_rows;} EIGEN_UNUSED_VARIABLE(cols);
static inline DenseIndex cols(void) {return _Cols;} }
inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex) EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols))
, m_rows(other.m_rows)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*_Cols)
internal::smart_copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data);
}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
DenseStorage tmp(other);
this->swap(tmp);
}
return *this;
}
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC
DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT
: m_data(std::move(other.m_data))
, m_rows(std::move(other.m_rows))
{
other.m_data = nullptr;
other.m_rows = 0;
}
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
using std::swap;
swap(m_data, other.m_data);
swap(m_rows, other.m_rows);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
void conservativeResize(Index size, Index rows, Index)
{ {
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols); m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
m_rows = rows; m_rows = rows;
} }
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index rows, Index)
{ {
if(size != m_rows*_Cols) if(size != m_rows*_Cols)
{ {
internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);
if (size) if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size); m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
else else
m_data = 0; m_data = 0;
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
} }
m_rows = rows; m_rows = rows;
} }
inline const T *data() const { return m_data; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; }
inline T *data() { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; }
}; };
} // end namespace Eigen } // end namespace Eigen
......
...@@ -21,7 +21,7 @@ namespace Eigen { ...@@ -21,7 +21,7 @@ namespace Eigen {
* \param MatrixType the type of the object in which we are taking a sub/main/super diagonal * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
* \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
* A positive value means a superdiagonal, a negative value means a subdiagonal. * A positive value means a superdiagonal, a negative value means a subdiagonal.
* You can also use Dynamic so the index can be set at runtime. * You can also use DynamicIndex so the index can be set at runtime.
* *
* The matrix is not required to be square. * The matrix is not required to be square.
* *
...@@ -37,23 +37,22 @@ template<typename MatrixType, int DiagIndex> ...@@ -37,23 +37,22 @@ template<typename MatrixType, int DiagIndex>
struct traits<Diagonal<MatrixType,DiagIndex> > struct traits<Diagonal<MatrixType,DiagIndex> >
: traits<MatrixType> : traits<MatrixType>
{ {
typedef typename nested<MatrixType>::type MatrixTypeNested; typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
typedef typename MatrixType::StorageKind StorageKind; typedef typename MatrixType::StorageKind StorageKind;
enum { enum {
RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
: (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
ColsAtCompileTime = 1, ColsAtCompileTime = 1,
MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
: DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
MatrixType::MaxColsAtCompileTime) MatrixType::MaxColsAtCompileTime)
: (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
MaxColsAtCompileTime = 1, MaxColsAtCompileTime = 1,
MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions
CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret, MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1,
OuterStrideAtCompileTime = 0 OuterStrideAtCompileTime = 0
...@@ -61,28 +60,40 @@ struct traits<Diagonal<MatrixType,DiagIndex> > ...@@ -61,28 +60,40 @@ struct traits<Diagonal<MatrixType,DiagIndex> >
}; };
} }
template<typename MatrixType, int DiagIndex> class Diagonal template<typename MatrixType, int _DiagIndex> class Diagonal
: public internal::dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type
{ {
public: public:
enum { DiagIndex = _DiagIndex };
typedef typename internal::dense_xpr_base<Diagonal>::type Base; typedef typename internal::dense_xpr_base<Diagonal>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {} EIGEN_DEVICE_FUNC
explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index)
{
eigen_assert( a_index <= m_matrix.cols() && -a_index <= m_matrix.rows() );
}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
EIGEN_DEVICE_FUNC
inline Index rows() const inline Index rows() const
{ return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); } {
return m_index.value()<0 ? numext::mini<Index>(m_matrix.cols(),m_matrix.rows()+m_index.value())
: numext::mini<Index>(m_matrix.rows(),m_matrix.cols()-m_index.value());
}
EIGEN_DEVICE_FUNC
inline Index cols() const { return 1; } inline Index cols() const { return 1; }
EIGEN_DEVICE_FUNC
inline Index innerStride() const inline Index innerStride() const
{ {
return m_matrix.outerStride() + 1; return m_matrix.outerStride() + 1;
} }
EIGEN_DEVICE_FUNC
inline Index outerStride() const inline Index outerStride() const
{ {
return 0; return 0;
...@@ -94,62 +105,75 @@ template<typename MatrixType, int DiagIndex> class Diagonal ...@@ -94,62 +105,75 @@ template<typename MatrixType, int DiagIndex> class Diagonal
const Scalar const Scalar
>::type ScalarWithConstIfNotLvalue; >::type ScalarWithConstIfNotLvalue;
inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); } EIGEN_DEVICE_FUNC
inline const Scalar* data() const { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); } inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }
EIGEN_DEVICE_FUNC
inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }
EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index row, Index) inline Scalar& coeffRef(Index row, Index)
{ {
EIGEN_STATIC_ASSERT_LVALUE(MatrixType) EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); return m_matrix.coeffRef(row+rowOffset(), row+colOffset());
} }
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index row, Index) const inline const Scalar& coeffRef(Index row, Index) const
{ {
return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); return m_matrix.coeffRef(row+rowOffset(), row+colOffset());
} }
EIGEN_DEVICE_FUNC
inline CoeffReturnType coeff(Index row, Index) const inline CoeffReturnType coeff(Index row, Index) const
{ {
return m_matrix.coeff(row+rowOffset(), row+colOffset()); return m_matrix.coeff(row+rowOffset(), row+colOffset());
} }
inline Scalar& coeffRef(Index index) EIGEN_DEVICE_FUNC
inline Scalar& coeffRef(Index idx)
{ {
EIGEN_STATIC_ASSERT_LVALUE(MatrixType) EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset());
} }
inline const Scalar& coeffRef(Index index) const EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index idx) const
{ {
return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset());
} }
inline CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC
inline CoeffReturnType coeff(Index idx) const
{ {
return m_matrix.coeff(index+rowOffset(), index+colOffset()); return m_matrix.coeff(idx+rowOffset(), idx+colOffset());
} }
const typename internal::remove_all<typename MatrixType::Nested>::type& EIGEN_DEVICE_FUNC
inline const typename internal::remove_all<typename MatrixType::Nested>::type&
nestedExpression() const nestedExpression() const
{ {
return m_matrix; return m_matrix;
} }
int index() const EIGEN_DEVICE_FUNC
inline Index index() const
{ {
return m_index.value(); return m_index.value();
} }
protected: protected:
typename MatrixType::Nested m_matrix; typename internal::ref_selector<MatrixType>::non_const_type m_matrix;
const internal::variable_if_dynamic<Index, DiagIndex> m_index; const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
private: private:
// some compilers may fail to optimize std::max etc in case of compile-time constants... // some compilers may fail to optimize std::max etc in case of compile-time constants...
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
// triger a compile time error is someone try to call packet // trigger a compile-time error if someone try to call packet
template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const; template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const; template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
}; };
...@@ -166,12 +190,12 @@ template<typename Derived> ...@@ -166,12 +190,12 @@ template<typename Derived>
inline typename MatrixBase<Derived>::DiagonalReturnType inline typename MatrixBase<Derived>::DiagonalReturnType
MatrixBase<Derived>::diagonal() MatrixBase<Derived>::diagonal()
{ {
return derived(); return DiagonalReturnType(derived());
} }
/** This is the const version of diagonal(). */ /** This is the const version of diagonal(). */
template<typename Derived> template<typename Derived>
inline const typename MatrixBase<Derived>::ConstDiagonalReturnType inline typename MatrixBase<Derived>::ConstDiagonalReturnType
MatrixBase<Derived>::diagonal() const MatrixBase<Derived>::diagonal() const
{ {
return ConstDiagonalReturnType(derived()); return ConstDiagonalReturnType(derived());
...@@ -189,18 +213,18 @@ MatrixBase<Derived>::diagonal() const ...@@ -189,18 +213,18 @@ MatrixBase<Derived>::diagonal() const
* *
* \sa MatrixBase::diagonal(), class Diagonal */ * \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived> template<typename Derived>
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Dynamic>::Type inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) MatrixBase<Derived>::diagonal(Index index)
{ {
return typename DiagonalIndexReturnType<Dynamic>::Type(derived(), index); return DiagonalDynamicIndexReturnType(derived(), index);
} }
/** This is the const version of diagonal(Index). */ /** This is the const version of diagonal(Index). */
template<typename Derived> template<typename Derived>
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Dynamic>::Type inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) const MatrixBase<Derived>::diagonal(Index index) const
{ {
return typename ConstDiagonalIndexReturnType<Dynamic>::Type(derived(), index); return ConstDiagonalDynamicIndexReturnType(derived(), index);
} }
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
...@@ -215,20 +239,20 @@ MatrixBase<Derived>::diagonal(Index index) const ...@@ -215,20 +239,20 @@ MatrixBase<Derived>::diagonal(Index index) const
* *
* \sa MatrixBase::diagonal(), class Diagonal */ * \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived> template<typename Derived>
template<int Index> template<int Index_>
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index>::Type inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type
MatrixBase<Derived>::diagonal() MatrixBase<Derived>::diagonal()
{ {
return derived(); return typename DiagonalIndexReturnType<Index_>::Type(derived());
} }
/** This is the const version of diagonal<int>(). */ /** This is the const version of diagonal<int>(). */
template<typename Derived> template<typename Derived>
template<int Index> template<int Index_>
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index>::Type inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type
MatrixBase<Derived>::diagonal() const MatrixBase<Derived>::diagonal() const
{ {
return derived(); return typename ConstDiagonalIndexReturnType<Index_>::Type(derived());
} }
} // end namespace Eigen } // end namespace Eigen
......
...@@ -22,7 +22,7 @@ class DiagonalBase : public EigenBase<Derived> ...@@ -22,7 +22,7 @@ class DiagonalBase : public EigenBase<Derived>
typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::Scalar Scalar;
typedef typename DiagonalVectorType::RealScalar RealScalar; typedef typename DiagonalVectorType::RealScalar RealScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index; typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
enum { enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
...@@ -30,74 +30,61 @@ class DiagonalBase : public EigenBase<Derived> ...@@ -30,74 +30,61 @@ class DiagonalBase : public EigenBase<Derived>
MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
IsVectorAtCompileTime = 0, IsVectorAtCompileTime = 0,
Flags = 0 Flags = NoPreferredStorageOrderBit
}; };
typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType; typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType;
typedef DenseMatrixType DenseType; typedef DenseMatrixType DenseType;
typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject; typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject;
EIGEN_DEVICE_FUNC
inline const Derived& derived() const { return *static_cast<const Derived*>(this); } inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
EIGEN_DEVICE_FUNC
inline Derived& derived() { return *static_cast<Derived*>(this); } inline Derived& derived() { return *static_cast<Derived*>(this); }
EIGEN_DEVICE_FUNC
DenseMatrixType toDenseMatrix() const { return derived(); } DenseMatrixType toDenseMatrix() const { return derived(); }
template<typename DenseDerived>
void evalTo(MatrixBase<DenseDerived> &other) const; EIGEN_DEVICE_FUNC
template<typename DenseDerived>
void addTo(MatrixBase<DenseDerived> &other) const
{ other.diagonal() += diagonal(); }
template<typename DenseDerived>
void subTo(MatrixBase<DenseDerived> &other) const
{ other.diagonal() -= diagonal(); }
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
EIGEN_DEVICE_FUNC
inline DiagonalVectorType& diagonal() { return derived().diagonal(); } inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
EIGEN_DEVICE_FUNC
inline Index rows() const { return diagonal().size(); } inline Index rows() const { return diagonal().size(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return diagonal().size(); } inline Index cols() const { return diagonal().size(); }
template<typename MatrixDerived> template<typename MatrixDerived>
const DiagonalProduct<MatrixDerived, Derived, OnTheLeft> EIGEN_DEVICE_FUNC
operator*(const MatrixBase<MatrixDerived> &matrix) const; const Product<Derived,MatrixDerived,LazyProduct>
operator*(const MatrixBase<MatrixDerived> &matrix) const
{
return Product<Derived, MatrixDerived, LazyProduct>(derived(),matrix.derived());
}
inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> > typedef DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> > InverseReturnType;
EIGEN_DEVICE_FUNC
inline const InverseReturnType
inverse() const inverse() const
{ {
return diagonal().cwiseInverse(); return InverseReturnType(diagonal().cwiseInverse());
} }
inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> > EIGEN_DEVICE_FUNC
inline const DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType,Scalar,product) >
operator*(const Scalar& scalar) const operator*(const Scalar& scalar) const
{ {
return diagonal() * scalar; return DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType,Scalar,product) >(diagonal() * scalar);
} }
friend inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> > EIGEN_DEVICE_FUNC
friend inline const DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,DiagonalVectorType,product) >
operator*(const Scalar& scalar, const DiagonalBase& other) operator*(const Scalar& scalar, const DiagonalBase& other)
{ {
return other.diagonal() * scalar; return DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,DiagonalVectorType,product) >(scalar * other.diagonal());
}
#ifdef EIGEN2_SUPPORT
template<typename OtherDerived>
bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
{
return diagonal().isApprox(other.diagonal(), precision);
}
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
{
return toDenseMatrix().isApprox(other, precision);
} }
#endif
}; };
template<typename Derived>
template<typename DenseDerived>
void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
{
other.setZero();
other.diagonal() = diagonal();
}
#endif #endif
/** \class DiagonalMatrix /** \class DiagonalMatrix
...@@ -119,10 +106,9 @@ struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> > ...@@ -119,10 +106,9 @@ struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
: traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> > : traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
{ {
typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
typedef Dense StorageKind; typedef DiagonalShape StorageKind;
typedef DenseIndex Index;
enum { enum {
Flags = LvalueBit Flags = LvalueBit | NoPreferredStorageOrderBit
}; };
}; };
} }
...@@ -136,7 +122,7 @@ class DiagonalMatrix ...@@ -136,7 +122,7 @@ class DiagonalMatrix
typedef const DiagonalMatrix& Nested; typedef const DiagonalMatrix& Nested;
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind; typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
typedef typename internal::traits<DiagonalMatrix>::Index Index; typedef typename internal::traits<DiagonalMatrix>::StorageIndex StorageIndex;
#endif #endif
protected: protected:
...@@ -146,24 +132,31 @@ class DiagonalMatrix ...@@ -146,24 +132,31 @@ class DiagonalMatrix
public: public:
/** const version of diagonal(). */ /** const version of diagonal(). */
EIGEN_DEVICE_FUNC
inline const DiagonalVectorType& diagonal() const { return m_diagonal; } inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
/** \returns a reference to the stored vector of diagonal coefficients. */ /** \returns a reference to the stored vector of diagonal coefficients. */
EIGEN_DEVICE_FUNC
inline DiagonalVectorType& diagonal() { return m_diagonal; } inline DiagonalVectorType& diagonal() { return m_diagonal; }
/** Default constructor without initialization */ /** Default constructor without initialization */
EIGEN_DEVICE_FUNC
inline DiagonalMatrix() {} inline DiagonalMatrix() {}
/** Constructs a diagonal matrix with given dimension */ /** Constructs a diagonal matrix with given dimension */
inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} EIGEN_DEVICE_FUNC
explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
/** 2D constructor. */ /** 2D constructor. */
EIGEN_DEVICE_FUNC
inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
/** 3D constructor. */ /** 3D constructor. */
EIGEN_DEVICE_FUNC
inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {} inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
/** Copy constructor. */ /** Copy constructor. */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {} inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
...@@ -173,11 +166,13 @@ class DiagonalMatrix ...@@ -173,11 +166,13 @@ class DiagonalMatrix
/** generic constructor from expression of the diagonal coefficients */ /** generic constructor from expression of the diagonal coefficients */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other) explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other)
{} {}
/** Copy operator. */ /** Copy operator. */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other) DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other)
{ {
m_diagonal = other.diagonal(); m_diagonal = other.diagonal();
...@@ -188,6 +183,7 @@ class DiagonalMatrix ...@@ -188,6 +183,7 @@ class DiagonalMatrix
/** This is a special case of the templated operator=. Its purpose is to /** This is a special case of the templated operator=. Its purpose is to
* prevent a default operator= from hiding the templated operator=. * prevent a default operator= from hiding the templated operator=.
*/ */
EIGEN_DEVICE_FUNC
DiagonalMatrix& operator=(const DiagonalMatrix& other) DiagonalMatrix& operator=(const DiagonalMatrix& other)
{ {
m_diagonal = other.diagonal(); m_diagonal = other.diagonal();
...@@ -196,14 +192,19 @@ class DiagonalMatrix ...@@ -196,14 +192,19 @@ class DiagonalMatrix
#endif #endif
/** Resizes to given size. */ /** Resizes to given size. */
EIGEN_DEVICE_FUNC
inline void resize(Index size) { m_diagonal.resize(size); } inline void resize(Index size) { m_diagonal.resize(size); }
/** Sets all coefficients to zero. */ /** Sets all coefficients to zero. */
EIGEN_DEVICE_FUNC
inline void setZero() { m_diagonal.setZero(); } inline void setZero() { m_diagonal.setZero(); }
/** Resizes and sets all coefficients to zero. */ /** Resizes and sets all coefficients to zero. */
EIGEN_DEVICE_FUNC
inline void setZero(Index size) { m_diagonal.setZero(size); } inline void setZero(Index size) { m_diagonal.setZero(size); }
/** Sets this matrix to be the identity matrix of the current size. */ /** Sets this matrix to be the identity matrix of the current size. */
EIGEN_DEVICE_FUNC
inline void setIdentity() { m_diagonal.setOnes(); } inline void setIdentity() { m_diagonal.setOnes(); }
/** Sets this matrix to be the identity matrix of the given size. */ /** Sets this matrix to be the identity matrix of the given size. */
EIGEN_DEVICE_FUNC
inline void setIdentity(Index size) { m_diagonal.setOnes(size); } inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
}; };
...@@ -227,14 +228,15 @@ struct traits<DiagonalWrapper<_DiagonalVectorType> > ...@@ -227,14 +228,15 @@ struct traits<DiagonalWrapper<_DiagonalVectorType> >
{ {
typedef _DiagonalVectorType DiagonalVectorType; typedef _DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::Scalar Scalar;
typedef typename DiagonalVectorType::Index Index; typedef typename DiagonalVectorType::StorageIndex StorageIndex;
typedef typename DiagonalVectorType::StorageKind StorageKind; typedef DiagonalShape StorageKind;
typedef typename traits<DiagonalVectorType>::XprKind XprKind;
enum { enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
Flags = traits<DiagonalVectorType>::Flags & LvalueBit Flags = (traits<DiagonalVectorType>::Flags & LvalueBit) | NoPreferredStorageOrderBit
}; };
}; };
} }
...@@ -250,9 +252,11 @@ class DiagonalWrapper ...@@ -250,9 +252,11 @@ class DiagonalWrapper
#endif #endif
/** Constructor from expression of diagonal coefficients to wrap. */ /** Constructor from expression of diagonal coefficients to wrap. */
inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {} EIGEN_DEVICE_FUNC
explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
/** \returns a const reference to the wrapped expression of diagonal coefficients. */ /** \returns a const reference to the wrapped expression of diagonal coefficients. */
EIGEN_DEVICE_FUNC
const DiagonalVectorType& diagonal() const { return m_diagonal; } const DiagonalVectorType& diagonal() const { return m_diagonal; }
protected: protected:
...@@ -272,7 +276,7 @@ template<typename Derived> ...@@ -272,7 +276,7 @@ template<typename Derived>
inline const DiagonalWrapper<const Derived> inline const DiagonalWrapper<const Derived>
MatrixBase<Derived>::asDiagonal() const MatrixBase<Derived>::asDiagonal() const
{ {
return derived(); return DiagonalWrapper<const Derived>(derived());
} }
/** \returns true if *this is approximately equal to a diagonal matrix, /** \returns true if *this is approximately equal to a diagonal matrix,
...@@ -284,13 +288,13 @@ MatrixBase<Derived>::asDiagonal() const ...@@ -284,13 +288,13 @@ MatrixBase<Derived>::asDiagonal() const
* \sa asDiagonal() * \sa asDiagonal()
*/ */
template<typename Derived> template<typename Derived>
bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const
{ {
if(cols() != rows()) return false; if(cols() != rows()) return false;
RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1); RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
{ {
RealScalar absOnDiagonal = internal::abs(coeff(j,j)); RealScalar absOnDiagonal = numext::abs(coeff(j,j));
if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
} }
for(Index j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
...@@ -302,6 +306,38 @@ bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const ...@@ -302,6 +306,38 @@ bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const
return true; return true;
} }
namespace internal {
template<> struct storage_kind_to_shape<DiagonalShape> { typedef DiagonalShape Shape; };
struct Diagonal2Dense {};
template<> struct AssignmentKind<DenseShape,DiagonalShape> { typedef Diagonal2Dense Kind; };
// Diagonal matrix to Dense assignment
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense>
{
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
dst.setZero();
dst.diagonal() = src.diagonal();
}
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() += src.diagonal(); }
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() -= src.diagonal(); }
};
} // namespace internal
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_DIAGONALMATRIX_H #endif // EIGEN_DIAGONALMATRIX_H
...@@ -13,109 +13,14 @@ ...@@ -13,109 +13,14 @@
namespace Eigen { namespace Eigen {
namespace internal {
template<typename MatrixType, typename DiagonalType, int ProductOrder>
struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
: traits<MatrixType>
{
typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
_StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
_PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
_SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
// FIXME currently we need same types, but in the future the next rule should be the one
//_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
};
};
}
template<typename MatrixType, typename DiagonalType, int ProductOrder>
class DiagonalProduct : internal::no_assignment_operator,
public MatrixBase<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
{
public:
typedef MatrixBase<DiagonalProduct> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct)
inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal)
: m_matrix(matrix), m_diagonal(diagonal)
{
eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
const Scalar coeff(Index row, Index col) const
{
return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
enum {
StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
};
const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
}
protected:
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const
{
return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
internal::pset1<PacketScalar>(m_diagonal.diagonal().coeff(id)));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const
{
enum {
InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
};
return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
}
typename MatrixType::Nested m_matrix;
typename DiagonalType::Nested m_diagonal;
};
/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
*/ */
template<typename Derived> template<typename Derived>
template<typename DiagonalDerived> template<typename DiagonalDerived>
inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight> inline const Product<Derived, DiagonalDerived, LazyProduct>
MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &diagonal) const MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
{
return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), diagonal.derived());
}
/** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
*/
template<typename DiagonalDerived>
template<typename MatrixDerived>
inline const DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>
DiagonalBase<DiagonalDerived>::operator*(const MatrixBase<MatrixDerived> &matrix) const
{ {
return DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>(matrix.derived(), derived()); return Product<Derived, DiagonalDerived, LazyProduct>(derived(),a_diagonal.derived());
} }
} // end namespace Eigen } // end namespace Eigen
......
...@@ -28,26 +28,33 @@ template<typename T, typename U, ...@@ -28,26 +28,33 @@ template<typename T, typename U,
> >
struct dot_nocheck struct dot_nocheck
{ {
typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar; typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) typedef typename conj_prod::result_type ResScalar;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
{ {
return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum(); return a.template binaryExpr<conj_prod>(b).sum();
} }
}; };
template<typename T, typename U> template<typename T, typename U>
struct dot_nocheck<T, U, true> struct dot_nocheck<T, U, true>
{ {
typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar; typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) typedef typename conj_prod::result_type ResScalar;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
{ {
return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum(); return a.transpose().template binaryExpr<conj_prod>(b).sum();
} }
}; };
} // end namespace internal } // end namespace internal
/** \returns the dot product of *this with other. /** \fn MatrixBase::dot
* \returns the dot product of *this with other.
* *
* \only_for_vectors * \only_for_vectors
* *
...@@ -59,100 +66,134 @@ struct dot_nocheck<T, U, true> ...@@ -59,100 +66,134 @@ struct dot_nocheck<T, U, true>
*/ */
template<typename Derived> template<typename Derived>
template<typename OtherDerived> template<typename OtherDerived>
typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func; typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
#endif
eigen_assert(size() == other.size()); eigen_assert(size() == other.size());
return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other); return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
} }
#ifdef EIGEN2_SUPPORT
/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable
* (conjugating the second variable). Of course this only makes a difference in the complex case.
*
* This method is only available in EIGEN2_SUPPORT mode.
*
* \only_for_vectors
*
* \sa dot()
*/
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
return internal::dot_nocheck<OtherDerived,Derived>::run(other,*this);
}
#endif
//---------- implementation of L2 norm and related functions ---------- //---------- implementation of L2 norm and related functions ----------
/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm. /** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
* In both cases, it consists in the sum of the square of all the matrix entries. * In both cases, it consists in the sum of the square of all the matrix entries.
* For vectors, this is also equals to the dot product of \c *this with itself. * For vectors, this is also equals to the dot product of \c *this with itself.
* *
* \sa dot(), norm() * \sa dot(), norm(), lpNorm()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
{ {
return internal::real((*this).cwiseAbs2().sum()); return numext::real((*this).cwiseAbs2().sum());
} }
/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. /** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
* In both cases, it consists in the square root of the sum of the square of all the matrix entries. * In both cases, it consists in the square root of the sum of the square of all the matrix entries.
* For vectors, this is also equals to the square root of the dot product of \c *this with itself. * For vectors, this is also equals to the square root of the dot product of \c *this with itself.
* *
* \sa dot(), squaredNorm() * \sa lpNorm(), dot(), squaredNorm()
*/ */
template<typename Derived> template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
{ {
return internal::sqrt(squaredNorm()); return numext::sqrt(squaredNorm());
} }
/** \returns an expression of the quotient of *this by its own norm. /** \returns an expression of the quotient of \c *this by its own norm.
*
* \warning If the input vector is too small (i.e., this->norm()==0),
* then this function returns a copy of the input.
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa norm(), normalize() * \sa norm(), normalize()
*/ */
template<typename Derived> template<typename Derived>
inline const typename MatrixBase<Derived>::PlainObject EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::normalized() const MatrixBase<Derived>::normalized() const
{ {
typedef typename internal::nested<Derived>::type Nested; typedef typename internal::nested_eval<Derived,2>::type _Nested;
typedef typename internal::remove_reference<Nested>::type _Nested;
_Nested n(derived()); _Nested n(derived());
return n / n.norm(); RealScalar z = n.squaredNorm();
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
if(z>RealScalar(0))
return n / numext::sqrt(z);
else
return n;
} }
/** Normalizes the vector, i.e. divides it by its own norm. /** Normalizes the vector, i.e. divides it by its own norm.
* *
* \only_for_vectors * \only_for_vectors
* *
* \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged.
*
* \sa norm(), normalized() * \sa norm(), normalized()
*/ */
template<typename Derived> template<typename Derived>
inline void MatrixBase<Derived>::normalize() EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize()
{ {
*this /= norm(); RealScalar z = squaredNorm();
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
if(z>RealScalar(0))
derived() /= numext::sqrt(z);
}
/** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow.
*
* \only_for_vectors
*
* This method is analogue to the normalized() method, but it reduces the risk of
* underflow and overflow when computing the norm.
*
* \warning If the input vector is too small (i.e., this->norm()==0),
* then this function returns a copy of the input.
*
* \sa stableNorm(), stableNormalize(), normalized()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::stableNormalized() const
{
typedef typename internal::nested_eval<Derived,3>::type _Nested;
_Nested n(derived());
RealScalar w = n.cwiseAbs().maxCoeff();
RealScalar z = (n/w).squaredNorm();
if(z>RealScalar(0))
return n / (numext::sqrt(z)*w);
else
return n;
}
/** Normalizes the vector while avoid underflow and overflow
*
* \only_for_vectors
*
* This method is analogue to the normalize() method, but it reduces the risk of
* underflow and overflow when computing the norm.
*
* \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged.
*
* \sa stableNorm(), stableNormalized(), normalize()
*/
template<typename Derived>
EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize()
{
RealScalar w = cwiseAbs().maxCoeff();
RealScalar z = (derived()/w).squaredNorm();
if(z>RealScalar(0))
derived() /= numext::sqrt(z)*w;
} }
//---------- implementation of other norms ---------- //---------- implementation of other norms ----------
...@@ -163,8 +204,10 @@ template<typename Derived, int p> ...@@ -163,8 +204,10 @@ template<typename Derived, int p>
struct lpNorm_selector struct lpNorm_selector
{ {
typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar; typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC
static inline RealScalar run(const MatrixBase<Derived>& m) static inline RealScalar run(const MatrixBase<Derived>& m)
{ {
EIGEN_USING_STD_MATH(pow)
return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
} }
}; };
...@@ -172,6 +215,7 @@ struct lpNorm_selector ...@@ -172,6 +215,7 @@ struct lpNorm_selector
template<typename Derived> template<typename Derived>
struct lpNorm_selector<Derived, 1> struct lpNorm_selector<Derived, 1>
{ {
EIGEN_DEVICE_FUNC
static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m) static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
{ {
return m.cwiseAbs().sum(); return m.cwiseAbs().sum();
...@@ -181,6 +225,7 @@ struct lpNorm_selector<Derived, 1> ...@@ -181,6 +225,7 @@ struct lpNorm_selector<Derived, 1>
template<typename Derived> template<typename Derived>
struct lpNorm_selector<Derived, 2> struct lpNorm_selector<Derived, 2>
{ {
EIGEN_DEVICE_FUNC
static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m) static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
{ {
return m.norm(); return m.norm();
...@@ -190,23 +235,35 @@ struct lpNorm_selector<Derived, 2> ...@@ -190,23 +235,35 @@ struct lpNorm_selector<Derived, 2>
template<typename Derived> template<typename Derived>
struct lpNorm_selector<Derived, Infinity> struct lpNorm_selector<Derived, Infinity>
{ {
static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m) typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC
static inline RealScalar run(const MatrixBase<Derived>& m)
{ {
if(Derived::SizeAtCompileTime==0 || (Derived::SizeAtCompileTime==Dynamic && m.size()==0))
return RealScalar(0);
return m.cwiseAbs().maxCoeff(); return m.cwiseAbs().maxCoeff();
} }
}; };
} // end namespace internal } // end namespace internal
/** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values /** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values
* of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ * of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$
* norm, that is the maximum of the absolute values of the coefficients of *this. * norm, that is the maximum of the absolute values of the coefficients of \c *this.
*
* In all cases, if \c *this is empty, then the value 0 is returned.
*
* \note For matrices, this function does not compute the <a href="https://en.wikipedia.org/wiki/Operator_norm">operator-norm</a>. That is, if \c *this is a matrix, then its coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink.
* *
* \sa norm() * \sa norm()
*/ */
template<typename Derived> template<typename Derived>
template<int p> template<int p>
#ifndef EIGEN_PARSED_BY_DOXYGEN
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
#else
MatrixBase<Derived>::RealScalar
#endif
MatrixBase<Derived>::lpNorm() const MatrixBase<Derived>::lpNorm() const
{ {
return internal::lpNorm_selector<Derived, p>::run(*this); return internal::lpNorm_selector<Derived, p>::run(*this);
...@@ -223,11 +280,11 @@ MatrixBase<Derived>::lpNorm() const ...@@ -223,11 +280,11 @@ MatrixBase<Derived>::lpNorm() const
template<typename Derived> template<typename Derived>
template<typename OtherDerived> template<typename OtherDerived>
bool MatrixBase<Derived>::isOrthogonal bool MatrixBase<Derived>::isOrthogonal
(const MatrixBase<OtherDerived>& other, RealScalar prec) const (const MatrixBase<OtherDerived>& other, const RealScalar& prec) const
{ {
typename internal::nested<Derived,2>::type nested(derived()); typename internal::nested_eval<Derived,2>::type nested(derived());
typename internal::nested<OtherDerived,2>::type otherNested(other.derived()); typename internal::nested_eval<OtherDerived,2>::type otherNested(other.derived());
return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
} }
/** \returns true if *this is approximately an unitary matrix, /** \returns true if *this is approximately an unitary matrix,
...@@ -242,15 +299,15 @@ bool MatrixBase<Derived>::isOrthogonal ...@@ -242,15 +299,15 @@ bool MatrixBase<Derived>::isOrthogonal
* Output: \verbinclude MatrixBase_isUnitary.out * Output: \verbinclude MatrixBase_isUnitary.out
*/ */
template<typename Derived> template<typename Derived>
bool MatrixBase<Derived>::isUnitary(RealScalar prec) const bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const
{ {
typename Derived::Nested nested(derived()); typename internal::nested_eval<Derived,1>::type self(derived());
for(Index i = 0; i < cols(); ++i) for(Index i = 0; i < cols(); ++i)
{ {
if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec)) if(!internal::isApprox(self.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
return false; return false;
for(Index j = 0; j < i; ++j) for(Index j = 0; j < i; ++j)
if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec)) if(!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast<Scalar>(1), prec))
return false; return false;
} }
return true; return true;
......
...@@ -13,7 +13,10 @@ ...@@ -13,7 +13,10 @@
namespace Eigen { namespace Eigen {
/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). /** \class EigenBase
* \ingroup Core_Module
*
* Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
* *
* In other words, an EigenBase object is an object that can be copied into a MatrixBase. * In other words, an EigenBase object is an object that can be copied into a MatrixBase.
* *
...@@ -21,39 +24,57 @@ namespace Eigen { ...@@ -21,39 +24,57 @@ namespace Eigen {
* *
* Notice that this class is trivial, it is only used to disambiguate overloaded functions. * Notice that this class is trivial, it is only used to disambiguate overloaded functions.
* *
* \sa \ref TopicClassHierarchy * \sa \blank \ref TopicClassHierarchy
*/ */
template<typename Derived> struct EigenBase template<typename Derived> struct EigenBase
{ {
// typedef typename internal::plain_matrix_type<Derived>::type PlainObject; // typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
/** \brief The interface type of indices
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
* \deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.
* \sa StorageIndex, \ref TopicPreprocessorDirectives.
*/
typedef Eigen::Index Index;
// FIXME is it needed?
typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
/** \returns a reference to the derived object */ /** \returns a reference to the derived object */
EIGEN_DEVICE_FUNC
Derived& derived() { return *static_cast<Derived*>(this); } Derived& derived() { return *static_cast<Derived*>(this); }
/** \returns a const reference to the derived object */ /** \returns a const reference to the derived object */
EIGEN_DEVICE_FUNC
const Derived& derived() const { return *static_cast<const Derived*>(this); } const Derived& derived() const { return *static_cast<const Derived*>(this); }
EIGEN_DEVICE_FUNC
inline Derived& const_cast_derived() const inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<EigenBase*>(this)); } { return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
EIGEN_DEVICE_FUNC
inline const Derived& const_derived() const inline const Derived& const_derived() const
{ return *static_cast<const Derived*>(this); } { return *static_cast<const Derived*>(this); }
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
EIGEN_DEVICE_FUNC
inline Index rows() const { return derived().rows(); } inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
EIGEN_DEVICE_FUNC
inline Index cols() const { return derived().cols(); } inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is rows()*cols(). /** \returns the number of coefficients, which is rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */ * \sa rows(), cols(), SizeAtCompileTime. */
EIGEN_DEVICE_FUNC
inline Index size() const { return rows() * cols(); } inline Index size() const { return rows() * cols(); }
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
template<typename Dest> inline void evalTo(Dest& dst) const template<typename Dest>
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const
{ derived().evalTo(dst); } { derived().evalTo(dst); }
/** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */ /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */
template<typename Dest> inline void addTo(Dest& dst) const template<typename Dest>
EIGEN_DEVICE_FUNC
inline void addTo(Dest& dst) const
{ {
// This is the default implementation, // This is the default implementation,
// derived class can reimplement it in a more optimized way. // derived class can reimplement it in a more optimized way.
...@@ -63,7 +84,9 @@ template<typename Derived> struct EigenBase ...@@ -63,7 +84,9 @@ template<typename Derived> struct EigenBase
} }
/** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */ /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */
template<typename Dest> inline void subTo(Dest& dst) const template<typename Dest>
EIGEN_DEVICE_FUNC
inline void subTo(Dest& dst) const
{ {
// This is the default implementation, // This is the default implementation,
// derived class can reimplement it in a more optimized way. // derived class can reimplement it in a more optimized way.
...@@ -73,7 +96,8 @@ template<typename Derived> struct EigenBase ...@@ -73,7 +96,8 @@ template<typename Derived> struct EigenBase
} }
/** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */ /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */
template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const template<typename Dest>
EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const
{ {
// This is the default implementation, // This is the default implementation,
// derived class can reimplement it in a more optimized way. // derived class can reimplement it in a more optimized way.
...@@ -81,7 +105,8 @@ template<typename Derived> struct EigenBase ...@@ -81,7 +105,8 @@ template<typename Derived> struct EigenBase
} }
/** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */ /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */
template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const template<typename Dest>
EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const
{ {
// This is the default implementation, // This is the default implementation,
// derived class can reimplement it in a more optimized way. // derived class can reimplement it in a more optimized way.
...@@ -104,57 +129,31 @@ template<typename Derived> struct EigenBase ...@@ -104,57 +129,31 @@ template<typename Derived> struct EigenBase
*/ */
template<typename Derived> template<typename Derived>
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other) Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
{ {
other.derived().evalTo(derived()); call_assignment(derived(), other.derived());
return derived(); return derived();
} }
template<typename Derived> template<typename Derived>
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other) Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
{ {
other.derived().addTo(derived()); call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived(); return derived();
} }
template<typename Derived> template<typename Derived>
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other) Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
{ {
other.derived().subTo(derived()); call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
/** replaces \c *this by \c *this * \a other.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
inline Derived&
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
return derived(); return derived();
} }
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=() */
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
}
/** replaces \c *this by \c *this * \a other. */
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheLeft(derived());
}
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_EIGENBASE_H #endif // EIGEN_EIGENBASE_H
...@@ -39,29 +39,29 @@ template<typename ExpressionType> class ForceAlignedAccess ...@@ -39,29 +39,29 @@ template<typename ExpressionType> class ForceAlignedAccess
typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base; typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)
inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }
inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }
inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }
inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }
inline const CoeffReturnType coeff(Index row, Index col) const EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return m_expression.coeff(row, col); return m_expression.coeff(row, col);
} }
inline Scalar& coeffRef(Index row, Index col) EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col)
{ {
return m_expression.const_cast_derived().coeffRef(row, col); return m_expression.const_cast_derived().coeffRef(row, col);
} }
inline const CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const
{ {
return m_expression.coeff(index); return m_expression.coeff(index);
} }
inline Scalar& coeffRef(Index index) EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index)
{ {
return m_expression.const_cast_derived().coeffRef(index); return m_expression.const_cast_derived().coeffRef(index);
} }
...@@ -90,7 +90,7 @@ template<typename ExpressionType> class ForceAlignedAccess ...@@ -90,7 +90,7 @@ template<typename ExpressionType> class ForceAlignedAccess
m_expression.const_cast_derived().template writePacket<Aligned>(index, x); m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
} }
operator const ExpressionType&() const { return m_expression; } EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
protected: protected:
const ExpressionType& m_expression; const ExpressionType& m_expression;
...@@ -127,7 +127,7 @@ template<bool Enable> ...@@ -127,7 +127,7 @@ template<bool Enable>
inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type
MatrixBase<Derived>::forceAlignedAccessIf() const MatrixBase<Derived>::forceAlignedAccessIf() const
{ {
return derived(); return derived(); // FIXME This should not work but apparently is never used
} }
/** \returns an expression of *this with forced aligned access if \a Enable is true. /** \returns an expression of *this with forced aligned access if \a Enable is true.
...@@ -138,7 +138,7 @@ template<bool Enable> ...@@ -138,7 +138,7 @@ template<bool Enable>
inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type
MatrixBase<Derived>::forceAlignedAccessIf() MatrixBase<Derived>::forceAlignedAccessIf()
{ {
return derived(); return derived(); // FIXME This should not work but apparently is never used
} }
} // end namespace Eigen } // end namespace Eigen
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment