Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
lietorch
Commits
266d4fd9
Commit
266d4fd9
authored
Jun 03, 2025
by
zhanggzh
Browse files
add lietorch src code and eigen src code, update readme
parent
e7df8655
Changes
148
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
7195 additions
and
0 deletions
+7195
-0
eigen-master/Eigen/src/Core/MatrixBase.h
eigen-master/Eigen/src/Core/MatrixBase.h
+542
-0
eigen-master/Eigen/src/Core/NestByValue.h
eigen-master/Eigen/src/Core/NestByValue.h
+91
-0
eigen-master/Eigen/src/Core/NoAlias.h
eigen-master/Eigen/src/Core/NoAlias.h
+102
-0
eigen-master/Eigen/src/Core/NumTraits.h
eigen-master/Eigen/src/Core/NumTraits.h
+321
-0
eigen-master/Eigen/src/Core/PartialReduxEvaluator.h
eigen-master/Eigen/src/Core/PartialReduxEvaluator.h
+253
-0
eigen-master/Eigen/src/Core/PermutationMatrix.h
eigen-master/Eigen/src/Core/PermutationMatrix.h
+552
-0
eigen-master/Eigen/src/Core/PlainObjectBase.h
eigen-master/Eigen/src/Core/PlainObjectBase.h
+1014
-0
eigen-master/Eigen/src/Core/Product.h
eigen-master/Eigen/src/Core/Product.h
+307
-0
eigen-master/Eigen/src/Core/ProductEvaluators.h
eigen-master/Eigen/src/Core/ProductEvaluators.h
+1271
-0
eigen-master/Eigen/src/Core/Random.h
eigen-master/Eigen/src/Core/Random.h
+207
-0
eigen-master/Eigen/src/Core/RandomImpl.h
eigen-master/Eigen/src/Core/RandomImpl.h
+255
-0
eigen-master/Eigen/src/Core/Redux.h
eigen-master/Eigen/src/Core/Redux.h
+535
-0
eigen-master/Eigen/src/Core/Ref.h
eigen-master/Eigen/src/Core/Ref.h
+383
-0
eigen-master/Eigen/src/Core/Replicate.h
eigen-master/Eigen/src/Core/Replicate.h
+130
-0
eigen-master/Eigen/src/Core/Reshaped.h
eigen-master/Eigen/src/Core/Reshaped.h
+398
-0
eigen-master/Eigen/src/Core/ReturnByValue.h
eigen-master/Eigen/src/Core/ReturnByValue.h
+111
-0
eigen-master/Eigen/src/Core/Reverse.h
eigen-master/Eigen/src/Core/Reverse.h
+202
-0
eigen-master/Eigen/src/Core/Select.h
eigen-master/Eigen/src/Core/Select.h
+156
-0
eigen-master/Eigen/src/Core/SelfAdjointView.h
eigen-master/Eigen/src/Core/SelfAdjointView.h
+329
-0
eigen-master/Eigen/src/Core/SelfCwiseBinaryOp.h
eigen-master/Eigen/src/Core/SelfCwiseBinaryOp.h
+36
-0
No files found.
Too many changes to show.
To preserve performance only
148 of 148+
files are displayed.
Plain diff
Email patch
eigen-master/Eigen/src/Core/MatrixBase.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIXBASE_H
#define EIGEN_MATRIXBASE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class MatrixBase
* \ingroup Core_Module
*
* \brief Base class for all dense matrices, vectors, and expressions
*
* This class is the base that is inherited by all matrix, vector, and related expression
* types. Most of the Eigen API is contained in this class, and its base classes. Other important
* classes for the Eigen API are Matrix, and VectorwiseOp.
*
* Note that some methods are defined in other modules such as the \ref LU_Module LU module
* for all functions related to matrix inversions.
*
* \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
*
* When writing a function taking Eigen objects as argument, if you want your function
* to take as argument any matrix, vector, or expression, just let it take a
* MatrixBase argument. As an example, here is a function printFirstRow which, given
* a matrix, vector, or expression \a x, prints the first row of \a x.
*
* \code
template<typename Derived>
void printFirstRow(const Eigen::MatrixBase<Derived>& x)
{
cout << x.row(0) << endl;
}
* \endcode
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
*
* \sa \blank \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
MatrixBase
:
public
DenseBase
<
Derived
>
{
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef
MatrixBase
StorageBaseType
;
typedef
typename
internal
::
traits
<
Derived
>::
StorageKind
StorageKind
;
typedef
typename
internal
::
traits
<
Derived
>::
StorageIndex
StorageIndex
;
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
typename
internal
::
packet_traits
<
Scalar
>::
type
PacketScalar
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
typedef
DenseBase
<
Derived
>
Base
;
using
Base
::
ColsAtCompileTime
;
using
Base
::
Flags
;
using
Base
::
IsVectorAtCompileTime
;
using
Base
::
MaxColsAtCompileTime
;
using
Base
::
MaxRowsAtCompileTime
;
using
Base
::
MaxSizeAtCompileTime
;
using
Base
::
RowsAtCompileTime
;
using
Base
::
SizeAtCompileTime
;
using
Base
::
coeff
;
using
Base
::
coeffRef
;
using
Base
::
cols
;
using
Base
::
const_cast_derived
;
using
Base
::
derived
;
using
Base
::
eval
;
using
Base
::
lazyAssign
;
using
Base
::
rows
;
using
Base
::
size
;
using
Base
::
operator
-
;
using
Base
::
operator
+=
;
using
Base
::
operator
-=
;
using
Base
::
operator
*=
;
using
Base
::
operator
/=
;
typedef
typename
Base
::
CoeffReturnType
CoeffReturnType
;
typedef
typename
Base
::
ConstTransposeReturnType
ConstTransposeReturnType
;
typedef
typename
Base
::
RowXpr
RowXpr
;
typedef
typename
Base
::
ColXpr
ColXpr
;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** type of the equivalent square matrix */
typedef
Matrix
<
Scalar
,
internal
::
max_size_prefer_dynamic
(
RowsAtCompileTime
,
ColsAtCompileTime
),
internal
::
max_size_prefer_dynamic
(
RowsAtCompileTime
,
ColsAtCompileTime
)
>
SquareMatrixType
;
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** \returns the size of the main diagonal, which is min(rows(),cols()).
* \sa rows(), cols(), SizeAtCompileTime. */
EIGEN_DEVICE_FUNC
inline
Index
diagonalSize
()
const
{
return
(
numext
::
mini
)(
rows
(),
cols
());
}
typedef
typename
Base
::
PlainObject
PlainObject
;
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
Scalar
>
,
PlainObject
>
ConstantReturnType
;
/** \internal the return type of MatrixBase::adjoint() */
typedef
std
::
conditional_t
<
NumTraits
<
Scalar
>::
IsComplex
,
CwiseUnaryOp
<
internal
::
scalar_conjugate_op
<
Scalar
>
,
ConstTransposeReturnType
>
,
ConstTransposeReturnType
>
AdjointReturnType
;
/** \internal Return type of eigenvalues() */
typedef
Matrix
<
internal
::
make_complex_t
<
Scalar
>
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
,
1
,
ColMajor
>
EigenvaluesReturnType
;
/** \internal the return type of identity */
typedef
CwiseNullaryOp
<
internal
::
scalar_identity_op
<
Scalar
>
,
PlainObject
>
IdentityReturnType
;
/** \internal the return type of unit vectors */
typedef
Block
<
const
CwiseNullaryOp
<
internal
::
scalar_identity_op
<
Scalar
>
,
SquareMatrixType
>
,
internal
::
traits
<
Derived
>::
RowsAtCompileTime
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
>
BasisReturnType
;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
#define EIGEN_DOC_UNARY_ADDONS(X, Y)
#include "../plugins/CommonCwiseBinaryOps.inc"
#include "../plugins/MatrixCwiseUnaryOps.inc"
#include "../plugins/MatrixCwiseBinaryOps.inc"
#ifdef EIGEN_MATRIXBASE_PLUGIN
#include EIGEN_MATRIXBASE_PLUGIN
#endif
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_DOC_UNARY_ADDONS
/** Special case of the template operator=, in order to prevent the compiler
* from generating a default operator= (issue hit with g++ 4.1)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
MatrixBase
&
other
);
// We cannot inherit here via Base::operator= since it is causing
// trouble with MSVC.
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
DenseBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
operator
=
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
operator
=
(
const
ReturnByValue
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
+=
(
const
MatrixBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
-=
(
const
MatrixBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
Derived
,
OtherDerived
>
operator
*
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
;
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
Derived
,
OtherDerived
,
LazyProduct
>
lazyProduct
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
;
template
<
typename
OtherDerived
>
Derived
&
operator
*=
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
void
applyOnTheLeft
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
void
applyOnTheRight
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
DiagonalDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
Derived
,
DiagonalDerived
,
LazyProduct
>
operator
*
(
const
DiagonalBase
<
DiagonalDerived
>&
diagonal
)
const
;
template
<
typename
SkewDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
Derived
,
SkewDerived
,
LazyProduct
>
operator
*
(
const
SkewSymmetricBase
<
SkewDerived
>&
skew
)
const
;
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
typename
ScalarBinaryOpTraits
<
typename
internal
::
traits
<
Derived
>::
Scalar
,
typename
internal
::
traits
<
OtherDerived
>::
Scalar
>::
ReturnType
dot
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
;
EIGEN_DEVICE_FUNC
RealScalar
squaredNorm
()
const
;
EIGEN_DEVICE_FUNC
RealScalar
norm
()
const
;
RealScalar
stableNorm
()
const
;
RealScalar
blueNorm
()
const
;
RealScalar
hypotNorm
()
const
;
EIGEN_DEVICE_FUNC
const
PlainObject
normalized
()
const
;
EIGEN_DEVICE_FUNC
const
PlainObject
stableNormalized
()
const
;
EIGEN_DEVICE_FUNC
void
normalize
();
EIGEN_DEVICE_FUNC
void
stableNormalize
();
EIGEN_DEVICE_FUNC
const
AdjointReturnType
adjoint
()
const
;
EIGEN_DEVICE_FUNC
void
adjointInPlace
();
typedef
Diagonal
<
Derived
>
DiagonalReturnType
;
EIGEN_DEVICE_FUNC
DiagonalReturnType
diagonal
();
typedef
Diagonal
<
const
Derived
>
ConstDiagonalReturnType
;
EIGEN_DEVICE_FUNC
const
ConstDiagonalReturnType
diagonal
()
const
;
template
<
int
Index
>
EIGEN_DEVICE_FUNC
Diagonal
<
Derived
,
Index
>
diagonal
();
template
<
int
Index
>
EIGEN_DEVICE_FUNC
const
Diagonal
<
const
Derived
,
Index
>
diagonal
()
const
;
EIGEN_DEVICE_FUNC
Diagonal
<
Derived
,
DynamicIndex
>
diagonal
(
Index
index
);
EIGEN_DEVICE_FUNC
const
Diagonal
<
const
Derived
,
DynamicIndex
>
diagonal
(
Index
index
)
const
;
template
<
unsigned
int
Mode
>
struct
TriangularViewReturnType
{
typedef
TriangularView
<
Derived
,
Mode
>
Type
;
};
template
<
unsigned
int
Mode
>
struct
ConstTriangularViewReturnType
{
typedef
const
TriangularView
<
const
Derived
,
Mode
>
Type
;
};
template
<
unsigned
int
Mode
>
EIGEN_DEVICE_FUNC
typename
TriangularViewReturnType
<
Mode
>::
Type
triangularView
();
template
<
unsigned
int
Mode
>
EIGEN_DEVICE_FUNC
typename
ConstTriangularViewReturnType
<
Mode
>::
Type
triangularView
()
const
;
template
<
unsigned
int
UpLo
>
struct
SelfAdjointViewReturnType
{
typedef
SelfAdjointView
<
Derived
,
UpLo
>
Type
;
};
template
<
unsigned
int
UpLo
>
struct
ConstSelfAdjointViewReturnType
{
typedef
const
SelfAdjointView
<
const
Derived
,
UpLo
>
Type
;
};
template
<
unsigned
int
UpLo
>
EIGEN_DEVICE_FUNC
typename
SelfAdjointViewReturnType
<
UpLo
>::
Type
selfadjointView
();
template
<
unsigned
int
UpLo
>
EIGEN_DEVICE_FUNC
typename
ConstSelfAdjointViewReturnType
<
UpLo
>::
Type
selfadjointView
()
const
;
const
SparseView
<
Derived
>
sparseView
(
const
Scalar
&
m_reference
=
Scalar
(
0
),
const
typename
NumTraits
<
Scalar
>::
Real
&
m_epsilon
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
static
const
IdentityReturnType
Identity
();
EIGEN_DEVICE_FUNC
static
const
IdentityReturnType
Identity
(
Index
rows
,
Index
cols
);
EIGEN_DEVICE_FUNC
static
const
BasisReturnType
Unit
(
Index
size
,
Index
i
);
EIGEN_DEVICE_FUNC
static
const
BasisReturnType
Unit
(
Index
i
);
EIGEN_DEVICE_FUNC
static
const
BasisReturnType
UnitX
();
EIGEN_DEVICE_FUNC
static
const
BasisReturnType
UnitY
();
EIGEN_DEVICE_FUNC
static
const
BasisReturnType
UnitZ
();
EIGEN_DEVICE_FUNC
static
const
BasisReturnType
UnitW
();
EIGEN_DEVICE_FUNC
const
DiagonalWrapper
<
const
Derived
>
asDiagonal
()
const
;
const
PermutationWrapper
<
const
Derived
>
asPermutation
()
const
;
EIGEN_DEVICE_FUNC
const
SkewSymmetricWrapper
<
const
Derived
>
asSkewSymmetric
()
const
;
EIGEN_DEVICE_FUNC
Derived
&
setIdentity
();
EIGEN_DEVICE_FUNC
Derived
&
setIdentity
(
Index
rows
,
Index
cols
);
EIGEN_DEVICE_FUNC
Derived
&
setUnit
(
Index
i
);
EIGEN_DEVICE_FUNC
Derived
&
setUnit
(
Index
newSize
,
Index
i
);
bool
isIdentity
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
bool
isDiagonal
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
bool
isUpperTriangular
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
bool
isLowerTriangular
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
bool
isSkewSymmetric
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
template
<
typename
OtherDerived
>
bool
isOrthogonal
(
const
MatrixBase
<
OtherDerived
>&
other
,
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
bool
isUnitary
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
/** \returns true if each coefficients of \c *this and \a other are all exactly equal.
* \warning When using floating point scalar values you probably should rather use a
* fuzzy comparison such as isApprox()
* \sa isApprox(), operator!= */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
inline
bool
operator
==
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
{
return
(
this
->
rows
()
==
other
.
rows
())
&&
(
this
->
cols
()
==
other
.
cols
())
&&
cwiseEqual
(
other
).
all
();
}
/** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
* \warning When using floating point scalar values you probably should rather use a
* fuzzy comparison such as isApprox()
* \sa isApprox(), operator== */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
inline
bool
operator
!=
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
{
return
!
(
*
this
==
other
);
}
NoAlias
<
Derived
,
Eigen
::
MatrixBase
>
EIGEN_DEVICE_FUNC
noalias
();
// TODO forceAlignedAccess is temporarily disabled
// Need to find a nicer workaround.
inline
const
Derived
&
forceAlignedAccess
()
const
{
return
derived
();
}
inline
Derived
&
forceAlignedAccess
()
{
return
derived
();
}
template
<
bool
Enable
>
inline
const
Derived
&
forceAlignedAccessIf
()
const
{
return
derived
();
}
template
<
bool
Enable
>
inline
Derived
&
forceAlignedAccessIf
()
{
return
derived
();
}
EIGEN_DEVICE_FUNC
Scalar
trace
()
const
;
template
<
int
p
>
EIGEN_DEVICE_FUNC
RealScalar
lpNorm
()
const
;
EIGEN_DEVICE_FUNC
MatrixBase
<
Derived
>&
matrix
()
{
return
*
this
;
}
EIGEN_DEVICE_FUNC
const
MatrixBase
<
Derived
>&
matrix
()
const
{
return
*
this
;
}
/** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix
* \sa ArrayBase::matrix() */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
ArrayWrapper
<
Derived
>
array
()
{
return
ArrayWrapper
<
Derived
>
(
derived
());
}
/** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix
* \sa ArrayBase::matrix() */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
ArrayWrapper
<
const
Derived
>
array
()
const
{
return
ArrayWrapper
<
const
Derived
>
(
derived
());
}
/////////// LU module ///////////
template
<
typename
PermutationIndex
=
DefaultPermutationIndex
>
inline
const
FullPivLU
<
PlainObject
,
PermutationIndex
>
fullPivLu
()
const
;
template
<
typename
PermutationIndex
=
DefaultPermutationIndex
>
inline
const
PartialPivLU
<
PlainObject
,
PermutationIndex
>
partialPivLu
()
const
;
template
<
typename
PermutationIndex
=
DefaultPermutationIndex
>
inline
const
PartialPivLU
<
PlainObject
,
PermutationIndex
>
lu
()
const
;
EIGEN_DEVICE_FUNC
inline
const
Inverse
<
Derived
>
inverse
()
const
;
template
<
typename
ResultType
>
inline
void
computeInverseAndDetWithCheck
(
ResultType
&
inverse
,
typename
ResultType
::
Scalar
&
determinant
,
bool
&
invertible
,
const
RealScalar
&
absDeterminantThreshold
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
template
<
typename
ResultType
>
inline
void
computeInverseWithCheck
(
ResultType
&
inverse
,
bool
&
invertible
,
const
RealScalar
&
absDeterminantThreshold
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
Scalar
determinant
()
const
;
/////////// Cholesky module ///////////
inline
const
LLT
<
PlainObject
>
llt
()
const
;
inline
const
LDLT
<
PlainObject
>
ldlt
()
const
;
/////////// QR module ///////////
inline
const
HouseholderQR
<
PlainObject
>
householderQr
()
const
;
template
<
typename
PermutationIndex
=
DefaultPermutationIndex
>
inline
const
ColPivHouseholderQR
<
PlainObject
,
PermutationIndex
>
colPivHouseholderQr
()
const
;
template
<
typename
PermutationIndex
=
DefaultPermutationIndex
>
inline
const
FullPivHouseholderQR
<
PlainObject
,
PermutationIndex
>
fullPivHouseholderQr
()
const
;
template
<
typename
PermutationIndex
=
DefaultPermutationIndex
>
inline
const
CompleteOrthogonalDecomposition
<
PlainObject
,
PermutationIndex
>
completeOrthogonalDecomposition
()
const
;
/////////// Eigenvalues module ///////////
inline
EigenvaluesReturnType
eigenvalues
()
const
;
inline
RealScalar
operatorNorm
()
const
;
/////////// SVD module ///////////
template
<
int
Options
=
0
>
inline
JacobiSVD
<
PlainObject
,
Options
>
jacobiSvd
()
const
;
template
<
int
Options
=
0
>
EIGEN_DEPRECATED
inline
JacobiSVD
<
PlainObject
,
Options
>
jacobiSvd
(
unsigned
int
computationOptions
)
const
;
template
<
int
Options
=
0
>
inline
BDCSVD
<
PlainObject
,
Options
>
bdcSvd
()
const
;
template
<
int
Options
=
0
>
EIGEN_DEPRECATED
inline
BDCSVD
<
PlainObject
,
Options
>
bdcSvd
(
unsigned
int
computationOptions
)
const
;
/////////// Geometry module ///////////
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
inline
typename
internal
::
cross_impl
<
Derived
,
OtherDerived
>::
return_type
cross
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
;
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
inline
PlainObject
cross3
(
const
MatrixBase
<
OtherDerived
>&
other
)
const
;
EIGEN_DEVICE_FUNC
inline
PlainObject
unitOrthogonal
(
void
)
const
;
EIGEN_DEPRECATED
EIGEN_DEVICE_FUNC
inline
Matrix
<
Scalar
,
3
,
1
>
eulerAngles
(
Index
a0
,
Index
a1
,
Index
a2
)
const
;
EIGEN_DEVICE_FUNC
inline
Matrix
<
Scalar
,
3
,
1
>
canonicalEulerAngles
(
Index
a0
,
Index
a1
,
Index
a2
)
const
;
// put this as separate enum value to work around possible GCC 4.3 bug (?)
enum
{
HomogeneousReturnTypeDirection
=
ColsAtCompileTime
==
1
&&
RowsAtCompileTime
==
1
?
((
internal
::
traits
<
Derived
>::
Flags
&
RowMajorBit
)
==
RowMajorBit
?
Horizontal
:
Vertical
)
:
ColsAtCompileTime
==
1
?
Vertical
:
Horizontal
};
typedef
Homogeneous
<
Derived
,
HomogeneousReturnTypeDirection
>
HomogeneousReturnType
;
EIGEN_DEVICE_FUNC
inline
HomogeneousReturnType
homogeneous
()
const
;
enum
{
SizeMinusOne
=
SizeAtCompileTime
==
Dynamic
?
Dynamic
:
SizeAtCompileTime
-
1
};
typedef
Block
<
const
Derived
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
==
1
?
SizeMinusOne
:
1
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
==
1
?
1
:
SizeMinusOne
>
ConstStartMinusOne
;
typedef
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE
(
ConstStartMinusOne
,
Scalar
,
quotient
)
HNormalizedReturnType
;
EIGEN_DEVICE_FUNC
inline
const
HNormalizedReturnType
hnormalized
()
const
;
////////// Householder module ///////////
EIGEN_DEVICE_FUNC
void
makeHouseholderInPlace
(
Scalar
&
tau
,
RealScalar
&
beta
);
template
<
typename
EssentialPart
>
EIGEN_DEVICE_FUNC
void
makeHouseholder
(
EssentialPart
&
essential
,
Scalar
&
tau
,
RealScalar
&
beta
)
const
;
template
<
typename
EssentialPart
>
EIGEN_DEVICE_FUNC
void
applyHouseholderOnTheLeft
(
const
EssentialPart
&
essential
,
const
Scalar
&
tau
,
Scalar
*
workspace
);
template
<
typename
EssentialPart
>
EIGEN_DEVICE_FUNC
void
applyHouseholderOnTheRight
(
const
EssentialPart
&
essential
,
const
Scalar
&
tau
,
Scalar
*
workspace
);
///////// Jacobi module /////////
template
<
typename
OtherScalar
>
EIGEN_DEVICE_FUNC
void
applyOnTheLeft
(
Index
p
,
Index
q
,
const
JacobiRotation
<
OtherScalar
>&
j
);
template
<
typename
OtherScalar
>
EIGEN_DEVICE_FUNC
void
applyOnTheRight
(
Index
p
,
Index
q
,
const
JacobiRotation
<
OtherScalar
>&
j
);
///////// SparseCore module /////////
template
<
typename
OtherDerived
>
EIGEN_STRONG_INLINE
const
typename
SparseMatrixBase
<
OtherDerived
>::
template
CwiseProductDenseReturnType
<
Derived
>
::
Type
cwiseProduct
(
const
SparseMatrixBase
<
OtherDerived
>&
other
)
const
{
return
other
.
cwiseProduct
(
derived
());
}
///////// MatrixFunctions module /////////
typedef
typename
internal
::
stem_function
<
Scalar
>::
type
StemFunction
;
#define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \
/** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a \
* href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the \
* coefficient-wise Description use ArrayBase::##Name . */
\
const ReturnType<Derived> Name() const;
#define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \
/** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a \
* href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the \
* coefficient-wise Description use ArrayBase::##Name . */
\
const ReturnType<Derived> Name(Argument) const;
EIGEN_MATRIX_FUNCTION
(
MatrixExponentialReturnValue
,
exp
,
exponential
)
/** \brief Helper function for the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported
* MatrixFunctions module</a>.*/
const
MatrixFunctionReturnValue
<
Derived
>
matrixFunction
(
StemFunction
f
)
const
;
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
cosh
,
hyperbolic
cosine
)
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
sinh
,
hyperbolic
sine
)
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
atanh
,
inverse
hyperbolic
cosine
)
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
acosh
,
inverse
hyperbolic
cosine
)
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
asinh
,
inverse
hyperbolic
sine
)
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
cos
,
cosine
)
EIGEN_MATRIX_FUNCTION
(
MatrixFunctionReturnValue
,
sin
,
sine
)
EIGEN_MATRIX_FUNCTION
(
MatrixSquareRootReturnValue
,
sqrt
,
square
root
)
EIGEN_MATRIX_FUNCTION
(
MatrixLogarithmReturnValue
,
log
,
logarithm
)
EIGEN_MATRIX_FUNCTION_1
(
MatrixPowerReturnValue
,
pow
,
power
to
\
c
p
,
const
RealScalar
&
p
)
EIGEN_MATRIX_FUNCTION_1
(
MatrixComplexPowerReturnValue
,
pow
,
power
to
\
c
p
,
const
internal
::
make_complex_t
<
Scalar
>&
p
)
protected:
EIGEN_DEFAULT_COPY_CONSTRUCTOR
(
MatrixBase
)
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR
(
MatrixBase
)
private:
EIGEN_DEVICE_FUNC
explicit
MatrixBase
(
int
);
EIGEN_DEVICE_FUNC
MatrixBase
(
int
,
int
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
explicit
MatrixBase
(
const
MatrixBase
<
OtherDerived
>&
);
protected:
// mixing arrays and matrices is not legal
template
<
typename
OtherDerived
>
Derived
&
operator
+=
(
const
ArrayBase
<
OtherDerived
>&
)
{
EIGEN_STATIC_ASSERT
(
std
::
ptrdiff_t
(
sizeof
(
typename
OtherDerived
::
Scalar
))
==
-
1
,
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES
);
return
*
this
;
}
// mixing arrays and matrices is not legal
template
<
typename
OtherDerived
>
Derived
&
operator
-=
(
const
ArrayBase
<
OtherDerived
>&
)
{
EIGEN_STATIC_ASSERT
(
std
::
ptrdiff_t
(
sizeof
(
typename
OtherDerived
::
Scalar
))
==
-
1
,
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES
);
return
*
this
;
}
};
/***************************************************************************
* Implementation of matrix base methods
***************************************************************************/
/** replaces \c *this by \c *this * \a other.
*
* \returns a reference to \c *this
*
* Example: \include MatrixBase_applyOnTheRight.cpp
* Output: \verbinclude MatrixBase_applyOnTheRight.out
*/
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
inline
Derived
&
MatrixBase
<
Derived
>::
operator
*=
(
const
EigenBase
<
OtherDerived
>&
other
)
{
other
.
derived
().
applyThisOnTheRight
(
derived
());
return
derived
();
}
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
*
* Example: \include MatrixBase_applyOnTheRight.cpp
* Output: \verbinclude MatrixBase_applyOnTheRight.out
*/
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
inline
void
MatrixBase
<
Derived
>::
applyOnTheRight
(
const
EigenBase
<
OtherDerived
>&
other
)
{
other
.
derived
().
applyThisOnTheRight
(
derived
());
}
/** replaces \c *this by \a other * \c *this.
*
* Example: \include MatrixBase_applyOnTheLeft.cpp
* Output: \verbinclude MatrixBase_applyOnTheLeft.out
*/
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
inline
void
MatrixBase
<
Derived
>::
applyOnTheLeft
(
const
EigenBase
<
OtherDerived
>&
other
)
{
other
.
derived
().
applyThisOnTheLeft
(
derived
());
}
}
// end namespace Eigen
#endif // EIGEN_MATRIXBASE_H
eigen-master/Eigen/src/Core/NestByValue.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_NESTBYVALUE_H
#define EIGEN_NESTBYVALUE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
ExpressionType
>
struct
traits
<
NestByValue
<
ExpressionType
>
>
:
public
traits
<
ExpressionType
>
{
enum
{
Flags
=
traits
<
ExpressionType
>::
Flags
&
~
NestByRefBit
};
};
}
// namespace internal
/** \class NestByValue
* \ingroup Core_Module
*
* \brief Expression which must be nested by value
*
* \tparam ExpressionType the type of the object of which we are requiring nesting-by-value
*
* This class is the return type of MatrixBase::nestByValue()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::nestByValue()
*/
template
<
typename
ExpressionType
>
class
NestByValue
:
public
internal
::
dense_xpr_base
<
NestByValue
<
ExpressionType
>
>::
type
{
public:
typedef
typename
internal
::
dense_xpr_base
<
NestByValue
>::
type
Base
;
static
constexpr
bool
HasDirectAccess
=
internal
::
has_direct_access
<
ExpressionType
>::
ret
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
NestByValue
)
EIGEN_DEVICE_FUNC
explicit
inline
NestByValue
(
const
ExpressionType
&
matrix
)
:
m_expression
(
matrix
)
{}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_expression
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_expression
.
cols
();
}
EIGEN_DEVICE_FUNC
operator
const
ExpressionType
&
()
const
{
return
m_expression
;
}
EIGEN_DEVICE_FUNC
const
ExpressionType
&
nestedExpression
()
const
{
return
m_expression
;
}
EIGEN_DEVICE_FUNC
typename
std
::
enable_if
<
HasDirectAccess
,
const
Scalar
*>::
type
data
()
const
{
return
m_expression
.
data
();
}
EIGEN_DEVICE_FUNC
typename
std
::
enable_if
<
HasDirectAccess
,
Index
>::
type
innerStride
()
const
{
return
m_expression
.
innerStride
();
}
EIGEN_DEVICE_FUNC
typename
std
::
enable_if
<
HasDirectAccess
,
Index
>::
type
outerStride
()
const
{
return
m_expression
.
outerStride
();
}
protected:
const
ExpressionType
m_expression
;
};
/** \returns an expression of the temporary version of *this.
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
const
NestByValue
<
Derived
>
DenseBase
<
Derived
>::
nestByValue
()
const
{
return
NestByValue
<
Derived
>
(
derived
());
}
namespace
internal
{
// Evaluator of Solve -> eval into a temporary
template
<
typename
ArgType
>
struct
evaluator
<
NestByValue
<
ArgType
>
>
:
public
evaluator
<
ArgType
>
{
typedef
evaluator
<
ArgType
>
Base
;
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
NestByValue
<
ArgType
>&
xpr
)
:
Base
(
xpr
.
nestedExpression
())
{}
};
}
// namespace internal
}
// end namespace Eigen
#endif // EIGEN_NESTBYVALUE_H
eigen-master/Eigen/src/Core/NoAlias.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_NOALIAS_H
#define EIGEN_NOALIAS_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class NoAlias
* \ingroup Core_Module
*
* \brief Pseudo expression providing an operator = assuming no aliasing
*
* \tparam ExpressionType the type of the object on which to do the lazy assignment
*
* This class represents an expression with special assignment operators
* assuming no aliasing between the target expression and the source expression.
* More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
* It is the return type of MatrixBase::noalias()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::noalias()
*/
template
<
typename
ExpressionType
,
template
<
typename
>
class
StorageBase
>
class
NoAlias
{
public:
typedef
typename
ExpressionType
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
explicit
NoAlias
(
ExpressionType
&
expression
)
:
m_expression
(
expression
)
{}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
ExpressionType
&
operator
=
(
const
StorageBase
<
OtherDerived
>&
other
)
{
call_assignment_no_alias
(
m_expression
,
other
.
derived
(),
internal
::
assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
());
return
m_expression
;
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
ExpressionType
&
operator
+=
(
const
StorageBase
<
OtherDerived
>&
other
)
{
call_assignment_no_alias
(
m_expression
,
other
.
derived
(),
internal
::
add_assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
());
return
m_expression
;
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
ExpressionType
&
operator
-=
(
const
StorageBase
<
OtherDerived
>&
other
)
{
call_assignment_no_alias
(
m_expression
,
other
.
derived
(),
internal
::
sub_assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
());
return
m_expression
;
}
EIGEN_DEVICE_FUNC
ExpressionType
&
expression
()
const
{
return
m_expression
;
}
protected:
ExpressionType
&
m_expression
;
};
/** \returns a pseudo expression of \c *this with an operator= assuming
* no aliasing between \c *this and the source expression.
*
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
* Currently, even though several expressions may alias, only product
* expressions have this flag. Therefore, noalias() is only useful when
* the source expression contains a matrix product.
*
* Here are some examples where noalias is useful:
* \code
* D.noalias() = A * B;
* D.noalias() += A.transpose() * B;
* D.noalias() -= 2 * A * B.adjoint();
* \endcode
*
* On the other hand the following example will lead to a \b wrong result:
* \code
* A.noalias() = A * B;
* \endcode
* because the result matrix A is also an operand of the matrix product. Therefore,
* there is no alternative than evaluating A * B in a temporary, that is the default
* behavior when you write:
* \code
* A = A * B;
* \endcode
*
* \sa class NoAlias
*/
template
<
typename
Derived
>
NoAlias
<
Derived
,
MatrixBase
>
EIGEN_DEVICE_FUNC
MatrixBase
<
Derived
>::
noalias
()
{
return
NoAlias
<
Derived
,
Eigen
::
MatrixBase
>
(
derived
());
}
}
// end namespace Eigen
#endif // EIGEN_NOALIAS_H
eigen-master/Eigen/src/Core/NumTraits.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_NUMTRAITS_H
#define EIGEN_NUMTRAITS_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
// default implementation of digits(), based on numeric_limits if specialized,
// 0 for integer types, and log2(epsilon()) otherwise.
template
<
typename
T
,
bool
use_numeric_limits
=
std
::
numeric_limits
<
T
>
::
is_specialized
,
bool
is_integer
=
NumTraits
<
T
>::
IsInteger
>
struct
default_digits_impl
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
return
std
::
numeric_limits
<
T
>::
digits
;
}
};
template
<
typename
T
>
struct
default_digits_impl
<
T
,
false
,
false
>
// Floating point
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
using
std
::
ceil
;
using
std
::
log2
;
typedef
typename
NumTraits
<
T
>::
Real
Real
;
return
int
(
ceil
(
-
log2
(
NumTraits
<
Real
>::
epsilon
())));
}
};
template
<
typename
T
>
struct
default_digits_impl
<
T
,
false
,
true
>
// Integer
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
return
0
;
}
};
// default implementation of digits10(), based on numeric_limits if specialized,
// 0 for integer types, and floor((digits()-1)*log10(2)) otherwise.
template
<
typename
T
,
bool
use_numeric_limits
=
std
::
numeric_limits
<
T
>
::
is_specialized
,
bool
is_integer
=
NumTraits
<
T
>::
IsInteger
>
struct
default_digits10_impl
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
return
std
::
numeric_limits
<
T
>::
digits10
;
}
};
template
<
typename
T
>
struct
default_digits10_impl
<
T
,
false
,
false
>
// Floating point
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
using
std
::
floor
;
using
std
::
log10
;
typedef
typename
NumTraits
<
T
>::
Real
Real
;
return
int
(
floor
((
internal
::
default_digits_impl
<
Real
>::
run
()
-
1
)
*
log10
(
2
)));
}
};
template
<
typename
T
>
struct
default_digits10_impl
<
T
,
false
,
true
>
// Integer
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
return
0
;
}
};
// default implementation of max_digits10(), based on numeric_limits if specialized,
// 0 for integer types, and log10(2) * digits() + 1 otherwise.
template
<
typename
T
,
bool
use_numeric_limits
=
std
::
numeric_limits
<
T
>
::
is_specialized
,
bool
is_integer
=
NumTraits
<
T
>::
IsInteger
>
struct
default_max_digits10_impl
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
return
std
::
numeric_limits
<
T
>::
max_digits10
;
}
};
template
<
typename
T
>
struct
default_max_digits10_impl
<
T
,
false
,
false
>
// Floating point
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
using
std
::
ceil
;
using
std
::
log10
;
typedef
typename
NumTraits
<
T
>::
Real
Real
;
return
int
(
ceil
(
internal
::
default_digits_impl
<
Real
>::
run
()
*
log10
(
2
)
+
1
));
}
};
template
<
typename
T
>
struct
default_max_digits10_impl
<
T
,
false
,
true
>
// Integer
{
EIGEN_DEVICE_FUNC
constexpr
static
int
run
()
{
return
0
;
}
};
}
// end namespace internal
namespace
numext
{
/** \internal bit-wise cast without changing the underlying bit representation. */
// TODO: Replace by std::bit_cast (available in C++20)
template
<
typename
Tgt
,
typename
Src
>
EIGEN_STRONG_INLINE
EIGEN_DEVICE_FUNC
Tgt
bit_cast
(
const
Src
&
src
)
{
// The behaviour of memcpy is not specified for non-trivially copyable types
EIGEN_STATIC_ASSERT
(
std
::
is_trivially_copyable
<
Src
>::
value
,
THIS_TYPE_IS_NOT_SUPPORTED
)
EIGEN_STATIC_ASSERT
(
std
::
is_trivially_copyable
<
Tgt
>::
value
&&
std
::
is_default_constructible
<
Tgt
>::
value
,
THIS_TYPE_IS_NOT_SUPPORTED
)
EIGEN_STATIC_ASSERT
(
sizeof
(
Src
)
==
sizeof
(
Tgt
),
THIS_TYPE_IS_NOT_SUPPORTED
)
Tgt
tgt
;
// Load src into registers first. This allows the memcpy to be elided by CUDA.
const
Src
staged
=
src
;
EIGEN_USING_STD
(
memcpy
)
memcpy
(
static_cast
<
void
*>
(
&
tgt
),
static_cast
<
const
void
*>
(
&
staged
),
sizeof
(
Tgt
));
return
tgt
;
}
}
// namespace numext
// clang-format off
/** \class NumTraits
* \ingroup Core_Module
*
* \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
*
* \tparam T the numeric type at hand
*
* This class stores enums, typedefs and static methods giving information about a numeric type.
*
* The provided data consists of:
* \li A typedef \c Real, giving the "real part" type of \a T. If \a T is already real,
* then \c Real is just a typedef to \a T. If \a T is `std::complex<U>` then \c Real
* is a typedef to \a U.
* \li A typedef \c NonInteger, giving the type that should be used for operations producing non-integral values,
* such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives
* \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to
* take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is
* only intended as a helper for code that needs to explicitly promote types.
* \li A typedef \c Literal giving the type to use for numeric literals such as "2" or "0.5". For instance, for
* `std::complex<U>`, Literal is defined as \a U. Of course, this type must be fully compatible with \a T. In doubt,
* just use \a T here.
* \li A typedef \c Nested giving the type to use to nest a value inside of the expression tree. If you don't know what
* this means, just use \a T here.
* \li An enum value \c IsComplex. It is equal to 1 if \a T is a \c std::complex type, and to 0 otherwise.
* \li An enum value \c IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, and to \c 0 otherwise.
* \li Enum values \c ReadCost, \c AddCost and \c MulCost representing a rough estimate of the number of CPU cycles needed to by
* move / add / mul instructions respectively, assuming the data is already stored in CPU registers. Stay vague here.
* No need to do architecture-specific stuff. If you don't know what this means, just use \c Eigen::HugeCost.
* \li An enum value \c IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned.
* \li An enum value \c RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must be
* called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise.
* \li An epsilon() function which, unlike <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/epsilon">
* `std::numeric_limits::epsilon()`</a>, it returns a \c Real instead of a \a T.
* \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default value by the fuzzy
* comparison operators.
* \li highest() and lowest() functions returning the highest and lowest possible values respectively.
* \li digits() function returning the number of radix digits (non-sign digits for integers, mantissa for floating-point).
* This is the analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits">
* `std::numeric_limits<T>::digits`</a> which is used as the default implementation if specialized.
* \li digits10() function returning the number of decimal digits that can be represented without change. This is the
* analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits10">
* `std::numeric_limits<T>::digits10`</a> which is used as the default implementation if specialized.
* \li max_digits10() function returning the number of decimal digits required to uniquely represent all distinct values
* of the type. This is the analogue of <a
* href="http://en.cppreference.com/w/cpp/types/numeric_limits/max_digits10">`std::numeric_limits<T>::max_digits10`</a>
* which is used as the default implementation if specialized.
* \li min_exponent() and max_exponent() functions returning the highest and lowest possible values, respectively,
* such that the radix raised to the power exponent-1 is a normalized floating-point number. These are equivalent
* to <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/min_exponent">
* `std::numeric_limits<T>::min_exponent`</a>/<a
* href="http://en.cppreference.com/w/cpp/types/numeric_limits/max_exponent">`std::numeric_limits<T>::max_exponent`</a>.
* \li infinity() function returning a representation of positive infinity, if available.
* \li quiet_NaN() function returning a non-signaling "not-a-number", if available.
*/
// clang-format on
template
<
typename
T
>
struct
GenericNumTraits
{
enum
{
IsInteger
=
std
::
numeric_limits
<
T
>::
is_integer
,
IsSigned
=
std
::
numeric_limits
<
T
>::
is_signed
,
IsComplex
=
0
,
RequireInitialization
=
internal
::
is_arithmetic
<
T
>::
value
?
0
:
1
,
ReadCost
=
1
,
AddCost
=
1
,
MulCost
=
1
};
typedef
T
Real
;
typedef
std
::
conditional_t
<
IsInteger
,
std
::
conditional_t
<
sizeof
(
T
)
<=
2
,
float
,
double
>
,
T
>
NonInteger
;
typedef
T
Nested
;
typedef
T
Literal
;
EIGEN_DEVICE_FUNC
constexpr
static
Real
epsilon
()
{
return
numext
::
numeric_limits
<
T
>::
epsilon
();
}
EIGEN_DEVICE_FUNC
constexpr
static
int
digits10
()
{
return
internal
::
default_digits10_impl
<
T
>::
run
();
}
EIGEN_DEVICE_FUNC
constexpr
static
int
max_digits10
()
{
return
internal
::
default_max_digits10_impl
<
T
>::
run
();
}
EIGEN_DEVICE_FUNC
constexpr
static
int
digits
()
{
return
internal
::
default_digits_impl
<
T
>::
run
();
}
EIGEN_DEVICE_FUNC
constexpr
static
int
min_exponent
()
{
return
numext
::
numeric_limits
<
T
>::
min_exponent
;
}
EIGEN_DEVICE_FUNC
constexpr
static
int
max_exponent
()
{
return
numext
::
numeric_limits
<
T
>::
max_exponent
;
}
EIGEN_DEVICE_FUNC
constexpr
static
Real
dummy_precision
()
{
// make sure to override this for floating-point types
return
Real
(
0
);
}
EIGEN_DEVICE_FUNC
constexpr
static
T
highest
()
{
return
(
numext
::
numeric_limits
<
T
>::
max
)();
}
EIGEN_DEVICE_FUNC
constexpr
static
T
lowest
()
{
return
(
numext
::
numeric_limits
<
T
>::
lowest
)();
}
EIGEN_DEVICE_FUNC
constexpr
static
T
infinity
()
{
return
numext
::
numeric_limits
<
T
>::
infinity
();
}
EIGEN_DEVICE_FUNC
constexpr
static
T
quiet_NaN
()
{
return
numext
::
numeric_limits
<
T
>::
quiet_NaN
();
}
};
template
<
typename
T
>
struct
NumTraits
:
GenericNumTraits
<
T
>
{};
template
<
>
struct
NumTraits
<
float
>
:
GenericNumTraits
<
float
>
{
EIGEN_DEVICE_FUNC
constexpr
static
float
dummy_precision
()
{
return
1e-5
f
;
}
};
template
<
>
struct
NumTraits
<
double
>
:
GenericNumTraits
<
double
>
{
EIGEN_DEVICE_FUNC
constexpr
static
double
dummy_precision
()
{
return
1e-12
;
}
};
// GPU devices treat `long double` as `double`.
#ifndef EIGEN_GPU_COMPILE_PHASE
template
<
>
struct
NumTraits
<
long
double
>
:
GenericNumTraits
<
long
double
>
{
EIGEN_DEVICE_FUNC
constexpr
static
long
double
dummy_precision
()
{
return
static_cast
<
long
double
>
(
1e-15l
);
}
#if defined(EIGEN_ARCH_PPC) && (__LDBL_MANT_DIG__ == 106)
// PowerPC double double causes issues with some values
EIGEN_DEVICE_FUNC
constexpr
static
long
double
epsilon
()
{
// 2^(-(__LDBL_MANT_DIG__)+1)
return
static_cast
<
long
double
>
(
2.4651903288156618919116517665087e-32l
);
}
#endif
};
#endif
template
<
typename
Real_
>
struct
NumTraits
<
std
::
complex
<
Real_
>
>
:
GenericNumTraits
<
std
::
complex
<
Real_
>
>
{
typedef
Real_
Real
;
typedef
typename
NumTraits
<
Real_
>::
Literal
Literal
;
enum
{
IsComplex
=
1
,
IsSigned
=
NumTraits
<
Real_
>::
IsSigned
,
RequireInitialization
=
NumTraits
<
Real_
>::
RequireInitialization
,
ReadCost
=
2
*
NumTraits
<
Real_
>::
ReadCost
,
AddCost
=
2
*
NumTraits
<
Real
>::
AddCost
,
MulCost
=
4
*
NumTraits
<
Real
>::
MulCost
+
2
*
NumTraits
<
Real
>::
AddCost
};
EIGEN_DEVICE_FUNC
constexpr
static
Real
epsilon
()
{
return
NumTraits
<
Real
>::
epsilon
();
}
EIGEN_DEVICE_FUNC
constexpr
static
Real
dummy_precision
()
{
return
NumTraits
<
Real
>::
dummy_precision
();
}
EIGEN_DEVICE_FUNC
constexpr
static
int
digits10
()
{
return
NumTraits
<
Real
>::
digits10
();
}
EIGEN_DEVICE_FUNC
constexpr
static
int
max_digits10
()
{
return
NumTraits
<
Real
>::
max_digits10
();
}
};
template
<
typename
Scalar
,
int
Rows
,
int
Cols
,
int
Options
,
int
MaxRows
,
int
MaxCols
>
struct
NumTraits
<
Array
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>
>
{
typedef
Array
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>
ArrayType
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
typedef
Array
<
RealScalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>
Real
;
typedef
typename
NumTraits
<
Scalar
>::
NonInteger
NonIntegerScalar
;
typedef
Array
<
NonIntegerScalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>
NonInteger
;
typedef
ArrayType
&
Nested
;
typedef
typename
NumTraits
<
Scalar
>::
Literal
Literal
;
enum
{
IsComplex
=
NumTraits
<
Scalar
>::
IsComplex
,
IsInteger
=
NumTraits
<
Scalar
>::
IsInteger
,
IsSigned
=
NumTraits
<
Scalar
>::
IsSigned
,
RequireInitialization
=
1
,
ReadCost
=
ArrayType
::
SizeAtCompileTime
==
Dynamic
?
HugeCost
:
ArrayType
::
SizeAtCompileTime
*
int
(
NumTraits
<
Scalar
>::
ReadCost
),
AddCost
=
ArrayType
::
SizeAtCompileTime
==
Dynamic
?
HugeCost
:
ArrayType
::
SizeAtCompileTime
*
int
(
NumTraits
<
Scalar
>::
AddCost
),
MulCost
=
ArrayType
::
SizeAtCompileTime
==
Dynamic
?
HugeCost
:
ArrayType
::
SizeAtCompileTime
*
int
(
NumTraits
<
Scalar
>::
MulCost
)
};
EIGEN_DEVICE_FUNC
constexpr
static
RealScalar
epsilon
()
{
return
NumTraits
<
RealScalar
>::
epsilon
();
}
EIGEN_DEVICE_FUNC
constexpr
static
RealScalar
dummy_precision
()
{
return
NumTraits
<
RealScalar
>::
dummy_precision
();
}
constexpr
static
int
digits10
()
{
return
NumTraits
<
Scalar
>::
digits10
();
}
constexpr
static
int
max_digits10
()
{
return
NumTraits
<
Scalar
>::
max_digits10
();
}
};
template
<
>
struct
NumTraits
<
std
::
string
>
:
GenericNumTraits
<
std
::
string
>
{
enum
{
RequireInitialization
=
1
,
ReadCost
=
HugeCost
,
AddCost
=
HugeCost
,
MulCost
=
HugeCost
};
constexpr
static
int
digits10
()
{
return
0
;
}
constexpr
static
int
max_digits10
()
{
return
0
;
}
private:
static
inline
std
::
string
epsilon
();
static
inline
std
::
string
dummy_precision
();
static
inline
std
::
string
lowest
();
static
inline
std
::
string
highest
();
static
inline
std
::
string
infinity
();
static
inline
std
::
string
quiet_NaN
();
};
// Empty specialization for void to allow template specialization based on NumTraits<T>::Real with T==void and SFINAE.
template
<
>
struct
NumTraits
<
void
>
{};
template
<
>
struct
NumTraits
<
bool
>
:
GenericNumTraits
<
bool
>
{};
}
// end namespace Eigen
#endif // EIGEN_NUMTRAITS_H
eigen-master/Eigen/src/Core/PartialReduxEvaluator.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2018 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARTIALREDUX_H
#define EIGEN_PARTIALREDUX_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
/***************************************************************************
*
* This file provides evaluators for partial reductions.
* There are two modes:
*
* - scalar path: simply calls the respective function on the column or row.
* -> nothing special here, all the tricky part is handled by the return
* types of VectorwiseOp's members. They embed the functor calling the
* respective DenseBase's member function.
*
* - vectorized path: implements a packet-wise reductions followed by
* some (optional) processing of the outcome, e.g., division by n for mean.
*
* For the vectorized path let's observe that the packet-size and outer-unrolling
* are both decided by the assignment logic. So all we have to do is to decide
* on the inner unrolling.
*
* For the unrolling, we can reuse "internal::redux_vec_unroller" from Redux.h,
* but be need to be careful to specify correct increment.
*
***************************************************************************/
/* logic deciding a strategy for unrolling of vectorized paths */
template
<
typename
Func
,
typename
Evaluator
>
struct
packetwise_redux_traits
{
enum
{
OuterSize
=
int
(
Evaluator
::
IsRowMajor
)
?
Evaluator
::
RowsAtCompileTime
:
Evaluator
::
ColsAtCompileTime
,
Cost
=
OuterSize
==
Dynamic
?
HugeCost
:
OuterSize
*
Evaluator
::
CoeffReadCost
+
(
OuterSize
-
1
)
*
functor_traits
<
Func
>::
Cost
,
Unrolling
=
Cost
<=
EIGEN_UNROLLING_LIMIT
?
CompleteUnrolling
:
NoUnrolling
};
};
/* Value to be returned when size==0 , by default let's return 0 */
template
<
typename
PacketType
,
typename
Func
>
EIGEN_DEVICE_FUNC
PacketType
packetwise_redux_empty_value
(
const
Func
&
)
{
const
typename
unpacket_traits
<
PacketType
>::
type
zero
(
0
);
return
pset1
<
PacketType
>
(
zero
);
}
/* For products the default is 1 */
template
<
typename
PacketType
,
typename
Scalar
>
EIGEN_DEVICE_FUNC
PacketType
packetwise_redux_empty_value
(
const
scalar_product_op
<
Scalar
,
Scalar
>&
)
{
return
pset1
<
PacketType
>
(
Scalar
(
1
));
}
/* Perform the actual reduction */
template
<
typename
Func
,
typename
Evaluator
,
int
Unrolling
=
packetwise_redux_traits
<
Func
,
Evaluator
>
::
Unrolling
>
struct
packetwise_redux_impl
;
/* Perform the actual reduction with unrolling */
template
<
typename
Func
,
typename
Evaluator
>
struct
packetwise_redux_impl
<
Func
,
Evaluator
,
CompleteUnrolling
>
{
typedef
redux_novec_unroller
<
Func
,
Evaluator
,
0
,
Evaluator
::
SizeAtCompileTime
>
Base
;
typedef
typename
Evaluator
::
Scalar
Scalar
;
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
Index
/*size*/
)
{
return
redux_vec_unroller
<
Func
,
Evaluator
,
0
,
packetwise_redux_traits
<
Func
,
Evaluator
>::
OuterSize
>::
template
run
<
PacketType
>(
eval
,
func
);
}
};
/* Add a specialization of redux_vec_unroller for size==0 at compiletime.
* This specialization is not required for general reductions, which is
* why it is defined here.
*/
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_vec_unroller
<
Func
,
Evaluator
,
Start
,
0
>
{
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
PacketType
run
(
const
Evaluator
&
,
const
Func
&
f
)
{
return
packetwise_redux_empty_value
<
PacketType
>
(
f
);
}
};
/* Perform the actual reduction for dynamic sizes */
template
<
typename
Func
,
typename
Evaluator
>
struct
packetwise_redux_impl
<
Func
,
Evaluator
,
NoUnrolling
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
typedef
typename
redux_traits
<
Func
,
Evaluator
>::
PacketType
PacketScalar
;
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
Index
size
)
{
if
(
size
==
0
)
return
packetwise_redux_empty_value
<
PacketType
>
(
func
);
const
Index
size4
=
1
+
numext
::
round_down
(
size
-
1
,
4
);
PacketType
p
=
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
0
,
0
);
// This loop is optimized for instruction pipelining:
// - each iteration generates two independent instructions
// - thanks to branch prediction and out-of-order execution we have independent instructions across loops
for
(
Index
i
=
1
;
i
<
size4
;
i
+=
4
)
p
=
func
.
packetOp
(
p
,
func
.
packetOp
(
func
.
packetOp
(
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
i
+
0
,
0
),
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
i
+
1
,
0
)),
func
.
packetOp
(
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
i
+
2
,
0
),
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
i
+
3
,
0
))));
for
(
Index
i
=
size4
;
i
<
size
;
++
i
)
p
=
func
.
packetOp
(
p
,
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
i
,
0
));
return
p
;
}
};
template
<
typename
Func
,
typename
Evaluator
>
struct
packetwise_segment_redux_impl
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
typedef
typename
redux_traits
<
Func
,
Evaluator
>::
PacketType
PacketScalar
;
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
Index
size
,
Index
begin
,
Index
count
)
{
if
(
size
==
0
)
return
packetwise_redux_empty_value
<
PacketType
>
(
func
);
PacketType
p
=
eval
.
template
packetSegmentByOuterInner
<
Unaligned
,
PacketType
>(
0
,
0
,
begin
,
count
);
for
(
Index
i
=
1
;
i
<
size
;
++
i
)
p
=
func
.
packetOp
(
p
,
eval
.
template
packetSegmentByOuterInner
<
Unaligned
,
PacketType
>(
i
,
0
,
begin
,
count
));
return
p
;
}
};
template
<
typename
ArgType
,
typename
MemberOp
,
int
Direction
>
struct
evaluator
<
PartialReduxExpr
<
ArgType
,
MemberOp
,
Direction
>
>
:
evaluator_base
<
PartialReduxExpr
<
ArgType
,
MemberOp
,
Direction
>
>
{
typedef
PartialReduxExpr
<
ArgType
,
MemberOp
,
Direction
>
XprType
;
typedef
typename
internal
::
nested_eval
<
ArgType
,
1
>::
type
ArgTypeNested
;
typedef
add_const_on_value_type_t
<
ArgTypeNested
>
ConstArgTypeNested
;
typedef
internal
::
remove_all_t
<
ArgTypeNested
>
ArgTypeNestedCleaned
;
typedef
typename
ArgType
::
Scalar
InputScalar
;
typedef
typename
XprType
::
Scalar
Scalar
;
enum
{
TraversalSize
=
Direction
==
int
(
Vertical
)
?
int
(
ArgType
::
RowsAtCompileTime
)
:
int
(
ArgType
::
ColsAtCompileTime
)
};
typedef
typename
MemberOp
::
template
Cost
<
int
(
TraversalSize
)>
CostOpType
;
enum
{
CoeffReadCost
=
TraversalSize
==
Dynamic
?
HugeCost
:
TraversalSize
==
0
?
1
:
int
(
TraversalSize
)
*
int
(
evaluator
<
ArgType
>::
CoeffReadCost
)
+
int
(
CostOpType
::
value
),
ArgFlags_
=
evaluator
<
ArgType
>::
Flags
,
Vectorizable_
=
bool
(
int
(
ArgFlags_
)
&
PacketAccessBit
)
&&
bool
(
MemberOp
::
Vectorizable
)
&&
(
Direction
==
int
(
Vertical
)
?
bool
(
ArgFlags_
&
RowMajorBit
)
:
(
ArgFlags_
&
RowMajorBit
)
==
0
)
&&
(
TraversalSize
!=
0
),
Flags
=
(
traits
<
XprType
>::
Flags
&
RowMajorBit
)
|
(
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
&
(
~
RowMajorBit
)))
|
(
Vectorizable_
?
PacketAccessBit
:
0
)
|
LinearAccessBit
,
Alignment
=
0
// FIXME this will need to be improved once PartialReduxExpr is vectorized
};
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
xpr
)
:
m_arg
(
xpr
.
nestedExpression
()),
m_functor
(
xpr
.
functor
())
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
TraversalSize
==
Dynamic
?
HugeCost
:
(
TraversalSize
==
0
?
1
:
int
(
CostOpType
::
value
)));
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
Scalar
coeff
(
Index
i
,
Index
j
)
const
{
return
coeff
(
Direction
==
Vertical
?
j
:
i
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
Scalar
coeff
(
Index
index
)
const
{
return
m_functor
(
m_arg
.
template
subVector
<
DirectionType
(
Direction
)>(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
i
,
Index
j
)
const
{
return
packet
<
LoadMode
,
PacketType
>
(
Direction
==
Vertical
?
j
:
i
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
EIGEN_DEVICE_FUNC
PacketType
packet
(
Index
idx
)
const
{
static
constexpr
int
PacketSize
=
internal
::
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
PanelRows
=
Direction
==
Vertical
?
ArgType
::
RowsAtCompileTime
:
PacketSize
;
static
constexpr
int
PanelCols
=
Direction
==
Vertical
?
PacketSize
:
ArgType
::
ColsAtCompileTime
;
using
PanelType
=
Block
<
const
ArgTypeNestedCleaned
,
PanelRows
,
PanelCols
,
true
/* InnerPanel */
>
;
using
PanelEvaluator
=
typename
internal
::
redux_evaluator
<
PanelType
>
;
using
BinaryOp
=
typename
MemberOp
::
BinaryOp
;
using
Impl
=
internal
::
packetwise_redux_impl
<
BinaryOp
,
PanelEvaluator
>
;
// FIXME
// See bug 1612, currently if PacketSize==1 (i.e. complex<double> with 128bits registers) then the storage-order of
// panel get reversed and methods like packetByOuterInner do not make sense anymore in this context. So let's just
// by pass "vectorization" in this case:
if
(
PacketSize
==
1
)
return
internal
::
pset1
<
PacketType
>
(
coeff
(
idx
));
Index
startRow
=
Direction
==
Vertical
?
0
:
idx
;
Index
startCol
=
Direction
==
Vertical
?
idx
:
0
;
Index
numRows
=
Direction
==
Vertical
?
m_arg
.
rows
()
:
PacketSize
;
Index
numCols
=
Direction
==
Vertical
?
PacketSize
:
m_arg
.
cols
();
PanelType
panel
(
m_arg
,
startRow
,
startCol
,
numRows
,
numCols
);
PanelEvaluator
panel_eval
(
panel
);
PacketType
p
=
Impl
::
template
run
<
PacketType
>(
panel_eval
,
m_functor
.
binaryFunc
(),
m_arg
.
outerSize
());
return
p
;
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
i
,
Index
j
,
Index
begin
,
Index
count
)
const
{
return
packetSegment
<
LoadMode
,
PacketType
>
(
Direction
==
Vertical
?
j
:
i
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
EIGEN_DEVICE_FUNC
PacketType
packetSegment
(
Index
idx
,
Index
begin
,
Index
count
)
const
{
static
constexpr
int
PanelRows
=
Direction
==
Vertical
?
ArgType
::
RowsAtCompileTime
:
Dynamic
;
static
constexpr
int
PanelCols
=
Direction
==
Vertical
?
Dynamic
:
ArgType
::
ColsAtCompileTime
;
using
PanelType
=
Block
<
const
ArgTypeNestedCleaned
,
PanelRows
,
PanelCols
,
true
/* InnerPanel */
>
;
using
PanelEvaluator
=
typename
internal
::
redux_evaluator
<
PanelType
>
;
using
BinaryOp
=
typename
MemberOp
::
BinaryOp
;
using
Impl
=
internal
::
packetwise_segment_redux_impl
<
BinaryOp
,
PanelEvaluator
>
;
Index
startRow
=
Direction
==
Vertical
?
0
:
idx
;
Index
startCol
=
Direction
==
Vertical
?
idx
:
0
;
Index
numRows
=
Direction
==
Vertical
?
m_arg
.
rows
()
:
begin
+
count
;
Index
numCols
=
Direction
==
Vertical
?
begin
+
count
:
m_arg
.
cols
();
PanelType
panel
(
m_arg
,
startRow
,
startCol
,
numRows
,
numCols
);
PanelEvaluator
panel_eval
(
panel
);
PacketType
p
=
Impl
::
template
run
<
PacketType
>(
panel_eval
,
m_functor
.
binaryFunc
(),
m_arg
.
outerSize
(),
begin
,
count
);
return
p
;
}
protected:
ConstArgTypeNested
m_arg
;
const
MemberOp
m_functor
;
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_PARTIALREDUX_H
eigen-master/Eigen/src/Core/PermutationMatrix.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PERMUTATIONMATRIX_H
#define EIGEN_PERMUTATIONMATRIX_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
enum
PermPermProduct_t
{
PermPermProduct
};
}
// end namespace internal
/** \class PermutationBase
* \ingroup Core_Module
*
* \brief Base class for permutations
*
* \tparam Derived the derived class
*
* This class is the base class for all expressions representing a permutation matrix,
* internally stored as a vector of integers.
* The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix
* \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have:
* \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f]
* This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have:
* \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f]
*
* Permutation matrices are square and invertible.
*
* Notice that in addition to the member functions and operators listed here, there also are non-member
* operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)
* on either side.
*
* \sa class PermutationMatrix, class PermutationWrapper
*/
template
<
typename
Derived
>
class
PermutationBase
:
public
EigenBase
<
Derived
>
{
typedef
internal
::
traits
<
Derived
>
Traits
;
typedef
EigenBase
<
Derived
>
Base
;
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef
typename
Traits
::
IndicesType
IndicesType
;
enum
{
Flags
=
Traits
::
Flags
,
RowsAtCompileTime
=
Traits
::
RowsAtCompileTime
,
ColsAtCompileTime
=
Traits
::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
Traits
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
Traits
::
MaxColsAtCompileTime
};
typedef
typename
Traits
::
StorageIndex
StorageIndex
;
typedef
Matrix
<
StorageIndex
,
RowsAtCompileTime
,
ColsAtCompileTime
,
0
,
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
>
DenseMatrixType
;
typedef
PermutationMatrix
<
IndicesType
::
SizeAtCompileTime
,
IndicesType
::
MaxSizeAtCompileTime
,
StorageIndex
>
PlainPermutationType
;
typedef
PlainPermutationType
PlainObject
;
using
Base
::
derived
;
typedef
Inverse
<
Derived
>
InverseReturnType
;
typedef
void
Scalar
;
#endif
/** Copies the other permutation into *this */
template
<
typename
OtherDerived
>
Derived
&
operator
=
(
const
PermutationBase
<
OtherDerived
>&
other
)
{
indices
()
=
other
.
indices
();
return
derived
();
}
/** Assignment from the Transpositions \a tr */
template
<
typename
OtherDerived
>
Derived
&
operator
=
(
const
TranspositionsBase
<
OtherDerived
>&
tr
)
{
setIdentity
(
tr
.
size
());
for
(
Index
k
=
size
()
-
1
;
k
>=
0
;
--
k
)
applyTranspositionOnTheRight
(
k
,
tr
.
coeff
(
k
));
return
derived
();
}
/** \returns the number of rows */
inline
EIGEN_DEVICE_FUNC
Index
rows
()
const
{
return
Index
(
indices
().
size
());
}
/** \returns the number of columns */
inline
EIGEN_DEVICE_FUNC
Index
cols
()
const
{
return
Index
(
indices
().
size
());
}
/** \returns the size of a side of the respective square matrix, i.e., the number of indices */
inline
EIGEN_DEVICE_FUNC
Index
size
()
const
{
return
Index
(
indices
().
size
());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template
<
typename
DenseDerived
>
void
evalTo
(
MatrixBase
<
DenseDerived
>&
other
)
const
{
other
.
setZero
();
for
(
Index
i
=
0
;
i
<
rows
();
++
i
)
other
.
coeffRef
(
indices
().
coeff
(
i
),
i
)
=
typename
DenseDerived
::
Scalar
(
1
);
}
#endif
/** \returns a Matrix object initialized from this permutation matrix. Notice that it
* is inefficient to return this Matrix object by value. For efficiency, favor using
* the Matrix constructor taking EigenBase objects.
*/
DenseMatrixType
toDenseMatrix
()
const
{
return
derived
();
}
/** const version of indices(). */
const
IndicesType
&
indices
()
const
{
return
derived
().
indices
();
}
/** \returns a reference to the stored array representing the permutation. */
IndicesType
&
indices
()
{
return
derived
().
indices
();
}
/** Resizes to given size.
*/
inline
void
resize
(
Index
newSize
)
{
indices
().
resize
(
newSize
);
}
/** Sets *this to be the identity permutation matrix */
void
setIdentity
()
{
StorageIndex
n
=
StorageIndex
(
size
());
for
(
StorageIndex
i
=
0
;
i
<
n
;
++
i
)
indices
().
coeffRef
(
i
)
=
i
;
}
/** Sets *this to be the identity permutation matrix of given size.
*/
void
setIdentity
(
Index
newSize
)
{
resize
(
newSize
);
setIdentity
();
}
/** Multiplies *this by the transposition \f$(ij)\f$ on the left.
*
* \returns a reference to *this.
*
* \warning This is much slower than applyTranspositionOnTheRight(Index,Index):
* this has linear complexity and requires a lot of branching.
*
* \sa applyTranspositionOnTheRight(Index,Index)
*/
Derived
&
applyTranspositionOnTheLeft
(
Index
i
,
Index
j
)
{
eigen_assert
(
i
>=
0
&&
j
>=
0
&&
i
<
size
()
&&
j
<
size
());
for
(
Index
k
=
0
;
k
<
size
();
++
k
)
{
if
(
indices
().
coeff
(
k
)
==
i
)
indices
().
coeffRef
(
k
)
=
StorageIndex
(
j
);
else
if
(
indices
().
coeff
(
k
)
==
j
)
indices
().
coeffRef
(
k
)
=
StorageIndex
(
i
);
}
return
derived
();
}
/** Multiplies *this by the transposition \f$(ij)\f$ on the right.
*
* \returns a reference to *this.
*
* This is a fast operation, it only consists in swapping two indices.
*
* \sa applyTranspositionOnTheLeft(Index,Index)
*/
Derived
&
applyTranspositionOnTheRight
(
Index
i
,
Index
j
)
{
eigen_assert
(
i
>=
0
&&
j
>=
0
&&
i
<
size
()
&&
j
<
size
());
std
::
swap
(
indices
().
coeffRef
(
i
),
indices
().
coeffRef
(
j
));
return
derived
();
}
/** \returns the inverse permutation matrix.
*
* \note \blank \note_try_to_help_rvo
*/
inline
InverseReturnType
inverse
()
const
{
return
InverseReturnType
(
derived
());
}
/** \returns the transpose permutation matrix.
*
* \note \blank \note_try_to_help_rvo
*/
inline
InverseReturnType
transpose
()
const
{
return
InverseReturnType
(
derived
());
}
/**** multiplication helpers to hopefully get RVO ****/
#ifndef EIGEN_PARSED_BY_DOXYGEN
protected:
template
<
typename
OtherDerived
>
void
assignTranspose
(
const
PermutationBase
<
OtherDerived
>&
other
)
{
for
(
Index
i
=
0
;
i
<
rows
();
++
i
)
indices
().
coeffRef
(
other
.
indices
().
coeff
(
i
))
=
i
;
}
template
<
typename
Lhs
,
typename
Rhs
>
void
assignProduct
(
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
eigen_assert
(
lhs
.
cols
()
==
rhs
.
rows
());
for
(
Index
i
=
0
;
i
<
rows
();
++
i
)
indices
().
coeffRef
(
i
)
=
lhs
.
indices
().
coeff
(
rhs
.
indices
().
coeff
(
i
));
}
#endif
public:
/** \returns the product permutation matrix.
*
* \note \blank \note_try_to_help_rvo
*/
template
<
typename
Other
>
inline
PlainPermutationType
operator
*
(
const
PermutationBase
<
Other
>&
other
)
const
{
return
PlainPermutationType
(
internal
::
PermPermProduct
,
derived
(),
other
.
derived
());
}
/** \returns the product of a permutation with another inverse permutation.
*
* \note \blank \note_try_to_help_rvo
*/
template
<
typename
Other
>
inline
PlainPermutationType
operator
*
(
const
InverseImpl
<
Other
,
PermutationStorage
>&
other
)
const
{
return
PlainPermutationType
(
internal
::
PermPermProduct
,
*
this
,
other
.
eval
());
}
/** \returns the product of an inverse permutation with another permutation.
*
* \note \blank \note_try_to_help_rvo
*/
template
<
typename
Other
>
friend
inline
PlainPermutationType
operator
*
(
const
InverseImpl
<
Other
,
PermutationStorage
>&
other
,
const
PermutationBase
&
perm
)
{
return
PlainPermutationType
(
internal
::
PermPermProduct
,
other
.
eval
(),
perm
);
}
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the
* permutation.
*
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
*/
Index
determinant
()
const
{
Index
res
=
1
;
Index
n
=
size
();
Matrix
<
bool
,
RowsAtCompileTime
,
1
,
0
,
MaxRowsAtCompileTime
>
mask
(
n
);
mask
.
fill
(
false
);
Index
r
=
0
;
while
(
r
<
n
)
{
// search for the next seed
while
(
r
<
n
&&
mask
[
r
])
r
++
;
if
(
r
>=
n
)
break
;
// we got one, let's follow it until we are back to the seed
Index
k0
=
r
++
;
mask
.
coeffRef
(
k0
)
=
true
;
for
(
Index
k
=
indices
().
coeff
(
k0
);
k
!=
k0
;
k
=
indices
().
coeff
(
k
))
{
mask
.
coeffRef
(
k
)
=
true
;
res
=
-
res
;
}
}
return
res
;
}
protected:
};
namespace
internal
{
template
<
int
SizeAtCompileTime
,
int
MaxSizeAtCompileTime
,
typename
StorageIndex_
>
struct
traits
<
PermutationMatrix
<
SizeAtCompileTime
,
MaxSizeAtCompileTime
,
StorageIndex_
>
>
:
traits
<
Matrix
<
StorageIndex_
,
SizeAtCompileTime
,
SizeAtCompileTime
,
0
,
MaxSizeAtCompileTime
,
MaxSizeAtCompileTime
>
>
{
typedef
PermutationStorage
StorageKind
;
typedef
Matrix
<
StorageIndex_
,
SizeAtCompileTime
,
1
,
0
,
MaxSizeAtCompileTime
,
1
>
IndicesType
;
typedef
StorageIndex_
StorageIndex
;
typedef
void
Scalar
;
};
}
// namespace internal
/** \class PermutationMatrix
* \ingroup Core_Module
*
* \brief Permutation matrix
*
* \tparam SizeAtCompileTime the number of rows/cols, or Dynamic
* \tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to
* SizeAtCompileTime. Most of the time, you should not have to specify it. \tparam StorageIndex_ the integer type of the
* indices
*
* This class represents a permutation matrix, internally stored as a vector of integers.
*
* \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix
*/
template
<
int
SizeAtCompileTime
,
int
MaxSizeAtCompileTime
,
typename
StorageIndex_
>
class
PermutationMatrix
:
public
PermutationBase
<
PermutationMatrix
<
SizeAtCompileTime
,
MaxSizeAtCompileTime
,
StorageIndex_
>
>
{
typedef
PermutationBase
<
PermutationMatrix
>
Base
;
typedef
internal
::
traits
<
PermutationMatrix
>
Traits
;
public:
typedef
const
PermutationMatrix
&
Nested
;
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef
typename
Traits
::
IndicesType
IndicesType
;
typedef
typename
Traits
::
StorageIndex
StorageIndex
;
#endif
inline
PermutationMatrix
()
{}
/** Constructs an uninitialized permutation matrix of given size.
*/
explicit
inline
PermutationMatrix
(
Index
size
)
:
m_indices
(
size
)
{
eigen_internal_assert
(
size
<=
NumTraits
<
StorageIndex
>::
highest
());
}
/** Copy constructor. */
template
<
typename
OtherDerived
>
inline
PermutationMatrix
(
const
PermutationBase
<
OtherDerived
>&
other
)
:
m_indices
(
other
.
indices
())
{}
/** Generic constructor from expression of the indices. The indices
* array has the meaning that the permutations sends each integer i to indices[i].
*
* \warning It is your responsibility to check that the indices array that you passes actually
* describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the
* array's size.
*/
template
<
typename
Other
>
explicit
inline
PermutationMatrix
(
const
MatrixBase
<
Other
>&
indices
)
:
m_indices
(
indices
)
{}
/** Convert the Transpositions \a tr to a permutation matrix */
template
<
typename
Other
>
explicit
PermutationMatrix
(
const
TranspositionsBase
<
Other
>&
tr
)
:
m_indices
(
tr
.
size
())
{
*
this
=
tr
;
}
/** Copies the other permutation into *this */
template
<
typename
Other
>
PermutationMatrix
&
operator
=
(
const
PermutationBase
<
Other
>&
other
)
{
m_indices
=
other
.
indices
();
return
*
this
;
}
/** Assignment from the Transpositions \a tr */
template
<
typename
Other
>
PermutationMatrix
&
operator
=
(
const
TranspositionsBase
<
Other
>&
tr
)
{
return
Base
::
operator
=
(
tr
.
derived
());
}
/** const version of indices(). */
const
IndicesType
&
indices
()
const
{
return
m_indices
;
}
/** \returns a reference to the stored array representing the permutation. */
IndicesType
&
indices
()
{
return
m_indices
;
}
/**** multiplication helpers to hopefully get RVO ****/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template
<
typename
Other
>
PermutationMatrix
(
const
InverseImpl
<
Other
,
PermutationStorage
>&
other
)
:
m_indices
(
other
.
derived
().
nestedExpression
().
size
())
{
eigen_internal_assert
(
m_indices
.
size
()
<=
NumTraits
<
StorageIndex
>::
highest
());
StorageIndex
end
=
StorageIndex
(
m_indices
.
size
());
for
(
StorageIndex
i
=
0
;
i
<
end
;
++
i
)
m_indices
.
coeffRef
(
other
.
derived
().
nestedExpression
().
indices
().
coeff
(
i
))
=
i
;
}
template
<
typename
Lhs
,
typename
Rhs
>
PermutationMatrix
(
internal
::
PermPermProduct_t
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
:
m_indices
(
lhs
.
indices
().
size
())
{
Base
::
assignProduct
(
lhs
,
rhs
);
}
#endif
protected:
IndicesType
m_indices
;
};
namespace
internal
{
template
<
int
SizeAtCompileTime
,
int
MaxSizeAtCompileTime
,
typename
StorageIndex_
,
int
PacketAccess_
>
struct
traits
<
Map
<
PermutationMatrix
<
SizeAtCompileTime
,
MaxSizeAtCompileTime
,
StorageIndex_
>
,
PacketAccess_
>
>
:
traits
<
Matrix
<
StorageIndex_
,
SizeAtCompileTime
,
SizeAtCompileTime
,
0
,
MaxSizeAtCompileTime
,
MaxSizeAtCompileTime
>
>
{
typedef
PermutationStorage
StorageKind
;
typedef
Map
<
const
Matrix
<
StorageIndex_
,
SizeAtCompileTime
,
1
,
0
,
MaxSizeAtCompileTime
,
1
>
,
PacketAccess_
>
IndicesType
;
typedef
StorageIndex_
StorageIndex
;
typedef
void
Scalar
;
};
}
// namespace internal
template
<
int
SizeAtCompileTime
,
int
MaxSizeAtCompileTime
,
typename
StorageIndex_
,
int
PacketAccess_
>
class
Map
<
PermutationMatrix
<
SizeAtCompileTime
,
MaxSizeAtCompileTime
,
StorageIndex_
>
,
PacketAccess_
>
:
public
PermutationBase
<
Map
<
PermutationMatrix
<
SizeAtCompileTime
,
MaxSizeAtCompileTime
,
StorageIndex_
>
,
PacketAccess_
>
>
{
typedef
PermutationBase
<
Map
>
Base
;
typedef
internal
::
traits
<
Map
>
Traits
;
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef
typename
Traits
::
IndicesType
IndicesType
;
typedef
typename
IndicesType
::
Scalar
StorageIndex
;
#endif
inline
Map
(
const
StorageIndex
*
indicesPtr
)
:
m_indices
(
indicesPtr
)
{}
inline
Map
(
const
StorageIndex
*
indicesPtr
,
Index
size
)
:
m_indices
(
indicesPtr
,
size
)
{}
/** Copies the other permutation into *this */
template
<
typename
Other
>
Map
&
operator
=
(
const
PermutationBase
<
Other
>&
other
)
{
return
Base
::
operator
=
(
other
.
derived
());
}
/** Assignment from the Transpositions \a tr */
template
<
typename
Other
>
Map
&
operator
=
(
const
TranspositionsBase
<
Other
>&
tr
)
{
return
Base
::
operator
=
(
tr
.
derived
());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is a special case of the templated operator=. Its purpose is to
* prevent a default operator= from hiding the templated operator=.
*/
Map
&
operator
=
(
const
Map
&
other
)
{
m_indices
=
other
.
m_indices
;
return
*
this
;
}
#endif
/** const version of indices(). */
const
IndicesType
&
indices
()
const
{
return
m_indices
;
}
/** \returns a reference to the stored array representing the permutation. */
IndicesType
&
indices
()
{
return
m_indices
;
}
protected:
IndicesType
m_indices
;
};
template
<
typename
IndicesType_
>
class
TranspositionsWrapper
;
namespace
internal
{
template
<
typename
IndicesType_
>
struct
traits
<
PermutationWrapper
<
IndicesType_
>
>
{
typedef
PermutationStorage
StorageKind
;
typedef
void
Scalar
;
typedef
typename
IndicesType_
::
Scalar
StorageIndex
;
typedef
IndicesType_
IndicesType
;
enum
{
RowsAtCompileTime
=
IndicesType_
::
SizeAtCompileTime
,
ColsAtCompileTime
=
IndicesType_
::
SizeAtCompileTime
,
MaxRowsAtCompileTime
=
IndicesType
::
MaxSizeAtCompileTime
,
MaxColsAtCompileTime
=
IndicesType
::
MaxSizeAtCompileTime
,
Flags
=
0
};
};
}
// namespace internal
/** \class PermutationWrapper
* \ingroup Core_Module
*
* \brief Class to view a vector of integers as a permutation matrix
*
* \tparam IndicesType_ the type of the vector of integer (can be any compatible expression)
*
* This class allows to view any vector expression of integers as a permutation matrix.
*
* \sa class PermutationBase, class PermutationMatrix
*/
template
<
typename
IndicesType_
>
class
PermutationWrapper
:
public
PermutationBase
<
PermutationWrapper
<
IndicesType_
>
>
{
typedef
PermutationBase
<
PermutationWrapper
>
Base
;
typedef
internal
::
traits
<
PermutationWrapper
>
Traits
;
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef
typename
Traits
::
IndicesType
IndicesType
;
#endif
inline
PermutationWrapper
(
const
IndicesType
&
indices
)
:
m_indices
(
indices
)
{}
/** const version of indices(). */
const
internal
::
remove_all_t
<
typename
IndicesType
::
Nested
>&
indices
()
const
{
return
m_indices
;
}
protected:
typename
IndicesType
::
Nested
m_indices
;
};
/** \returns the matrix with the permutation applied to the columns.
*/
template
<
typename
MatrixDerived
,
typename
PermutationDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
MatrixDerived
,
PermutationDerived
,
AliasFreeProduct
>
operator
*
(
const
MatrixBase
<
MatrixDerived
>&
matrix
,
const
PermutationBase
<
PermutationDerived
>&
permutation
)
{
return
Product
<
MatrixDerived
,
PermutationDerived
,
AliasFreeProduct
>
(
matrix
.
derived
(),
permutation
.
derived
());
}
/** \returns the matrix with the permutation applied to the rows.
*/
template
<
typename
PermutationDerived
,
typename
MatrixDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
PermutationDerived
,
MatrixDerived
,
AliasFreeProduct
>
operator
*
(
const
PermutationBase
<
PermutationDerived
>&
permutation
,
const
MatrixBase
<
MatrixDerived
>&
matrix
)
{
return
Product
<
PermutationDerived
,
MatrixDerived
,
AliasFreeProduct
>
(
permutation
.
derived
(),
matrix
.
derived
());
}
template
<
typename
PermutationType
>
class
InverseImpl
<
PermutationType
,
PermutationStorage
>
:
public
EigenBase
<
Inverse
<
PermutationType
>
>
{
typedef
typename
PermutationType
::
PlainPermutationType
PlainPermutationType
;
typedef
internal
::
traits
<
PermutationType
>
PermTraits
;
protected:
InverseImpl
()
{}
public:
typedef
Inverse
<
PermutationType
>
InverseType
;
using
EigenBase
<
Inverse
<
PermutationType
>
>::
derived
;
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef
typename
PermutationType
::
DenseMatrixType
DenseMatrixType
;
enum
{
RowsAtCompileTime
=
PermTraits
::
RowsAtCompileTime
,
ColsAtCompileTime
=
PermTraits
::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
PermTraits
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
PermTraits
::
MaxColsAtCompileTime
};
#endif
#ifndef EIGEN_PARSED_BY_DOXYGEN
template
<
typename
DenseDerived
>
void
evalTo
(
MatrixBase
<
DenseDerived
>&
other
)
const
{
other
.
setZero
();
for
(
Index
i
=
0
;
i
<
derived
().
rows
();
++
i
)
other
.
coeffRef
(
i
,
derived
().
nestedExpression
().
indices
().
coeff
(
i
))
=
typename
DenseDerived
::
Scalar
(
1
);
}
#endif
/** \return the equivalent permutation matrix */
PlainPermutationType
eval
()
const
{
return
derived
();
}
DenseMatrixType
toDenseMatrix
()
const
{
return
derived
();
}
/** \returns the matrix with the inverse permutation applied to the columns.
*/
template
<
typename
OtherDerived
>
friend
const
Product
<
OtherDerived
,
InverseType
,
AliasFreeProduct
>
operator
*
(
const
MatrixBase
<
OtherDerived
>&
matrix
,
const
InverseType
&
trPerm
)
{
return
Product
<
OtherDerived
,
InverseType
,
AliasFreeProduct
>
(
matrix
.
derived
(),
trPerm
.
derived
());
}
/** \returns the matrix with the inverse permutation applied to the rows.
*/
template
<
typename
OtherDerived
>
const
Product
<
InverseType
,
OtherDerived
,
AliasFreeProduct
>
operator
*
(
const
MatrixBase
<
OtherDerived
>&
matrix
)
const
{
return
Product
<
InverseType
,
OtherDerived
,
AliasFreeProduct
>
(
derived
(),
matrix
.
derived
());
}
};
template
<
typename
Derived
>
const
PermutationWrapper
<
const
Derived
>
MatrixBase
<
Derived
>::
asPermutation
()
const
{
return
derived
();
}
namespace
internal
{
template
<
>
struct
AssignmentKind
<
DenseShape
,
PermutationShape
>
{
typedef
EigenBase2EigenBase
Kind
;
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_PERMUTATIONMATRIX_H
eigen-master/Eigen/src/Core/PlainObjectBase.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DENSESTORAGEBASE_H
#define EIGEN_DENSESTORAGEBASE_H
#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO)
#define EIGEN_INITIALIZE_COEFFS
#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED \
for (Index i = 0; i < base().size(); ++i) coeffRef(i) = Scalar(0);
#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN)
#define EIGEN_INITIALIZE_COEFFS
#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED \
for (Index i = 0; i < base().size(); ++i) coeffRef(i) = std::numeric_limits<Scalar>::quiet_NaN();
#else
#undef EIGEN_INITIALIZE_COEFFS
#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#endif
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
#ifndef EIGEN_NO_DEBUG
template
<
int
MaxSizeAtCompileTime
,
int
MaxRowsAtCompileTime
,
int
MaxColsAtCompileTime
>
struct
check_rows_cols_for_overflow
{
EIGEN_STATIC_ASSERT
(
MaxRowsAtCompileTime
*
MaxColsAtCompileTime
==
MaxSizeAtCompileTime
,
YOU
MADE
A
PROGRAMMING
MISTAKE
)
template
<
typename
Index
>
EIGEN_DEVICE_FUNC
static
EIGEN_ALWAYS_INLINE
constexpr
void
run
(
Index
,
Index
)
{}
};
template
<
int
MaxRowsAtCompileTime
>
struct
check_rows_cols_for_overflow
<
Dynamic
,
MaxRowsAtCompileTime
,
Dynamic
>
{
template
<
typename
Index
>
EIGEN_DEVICE_FUNC
static
EIGEN_ALWAYS_INLINE
constexpr
void
run
(
Index
,
Index
cols
)
{
constexpr
Index
MaxIndex
=
NumTraits
<
Index
>::
highest
();
bool
error
=
cols
>
(
MaxIndex
/
MaxRowsAtCompileTime
);
if
(
error
)
throw_std_bad_alloc
();
}
};
template
<
int
MaxColsAtCompileTime
>
struct
check_rows_cols_for_overflow
<
Dynamic
,
Dynamic
,
MaxColsAtCompileTime
>
{
template
<
typename
Index
>
EIGEN_DEVICE_FUNC
static
EIGEN_ALWAYS_INLINE
constexpr
void
run
(
Index
rows
,
Index
)
{
constexpr
Index
MaxIndex
=
NumTraits
<
Index
>::
highest
();
bool
error
=
rows
>
(
MaxIndex
/
MaxColsAtCompileTime
);
if
(
error
)
throw_std_bad_alloc
();
}
};
template
<
>
struct
check_rows_cols_for_overflow
<
Dynamic
,
Dynamic
,
Dynamic
>
{
template
<
typename
Index
>
EIGEN_DEVICE_FUNC
static
EIGEN_ALWAYS_INLINE
constexpr
void
run
(
Index
rows
,
Index
cols
)
{
constexpr
Index
MaxIndex
=
NumTraits
<
Index
>::
highest
();
bool
error
=
cols
==
0
?
false
:
(
rows
>
(
MaxIndex
/
cols
));
if
(
error
)
throw_std_bad_alloc
();
}
};
#endif
template
<
typename
Derived
,
typename
OtherDerived
=
Derived
,
bool
IsVector
=
bool
(
Derived
::
IsVectorAtCompileTime
)
&&
bool
(
OtherDerived
::
IsVectorAtCompileTime
)>
struct
conservative_resize_like_impl
;
template
<
typename
MatrixTypeA
,
typename
MatrixTypeB
,
bool
SwapPointers
>
struct
matrix_swap_impl
;
}
// end namespace internal
/** \class PlainObjectBase
* \ingroup Core_Module
* \brief %Dense storage base class for matrices and arrays.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN.
*
* \tparam Derived is the derived type, e.g., a Matrix or Array
*
* \sa \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
PlainObjectBase
:
public
internal
::
dense_xpr_base
<
Derived
>::
type
{
public:
enum
{
Options
=
internal
::
traits
<
Derived
>::
Options
};
typedef
typename
internal
::
dense_xpr_base
<
Derived
>::
type
Base
;
typedef
typename
internal
::
traits
<
Derived
>::
StorageKind
StorageKind
;
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
typename
internal
::
packet_traits
<
Scalar
>::
type
PacketScalar
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
typedef
Derived
DenseType
;
using
Base
::
ColsAtCompileTime
;
using
Base
::
Flags
;
using
Base
::
IsVectorAtCompileTime
;
using
Base
::
MaxColsAtCompileTime
;
using
Base
::
MaxRowsAtCompileTime
;
using
Base
::
MaxSizeAtCompileTime
;
using
Base
::
RowsAtCompileTime
;
using
Base
::
SizeAtCompileTime
;
typedef
Eigen
::
Map
<
Derived
,
Unaligned
>
MapType
;
typedef
const
Eigen
::
Map
<
const
Derived
,
Unaligned
>
ConstMapType
;
typedef
Eigen
::
Map
<
Derived
,
AlignedMax
>
AlignedMapType
;
typedef
const
Eigen
::
Map
<
const
Derived
,
AlignedMax
>
ConstAlignedMapType
;
template
<
typename
StrideType
>
struct
StridedMapType
{
typedef
Eigen
::
Map
<
Derived
,
Unaligned
,
StrideType
>
type
;
};
template
<
typename
StrideType
>
struct
StridedConstMapType
{
typedef
Eigen
::
Map
<
const
Derived
,
Unaligned
,
StrideType
>
type
;
};
template
<
typename
StrideType
>
struct
StridedAlignedMapType
{
typedef
Eigen
::
Map
<
Derived
,
AlignedMax
,
StrideType
>
type
;
};
template
<
typename
StrideType
>
struct
StridedConstAlignedMapType
{
typedef
Eigen
::
Map
<
const
Derived
,
AlignedMax
,
StrideType
>
type
;
};
protected:
DenseStorage
<
Scalar
,
Base
::
MaxSizeAtCompileTime
,
Base
::
RowsAtCompileTime
,
Base
::
ColsAtCompileTime
,
Options
>
m_storage
;
public:
enum
{
NeedsToAlign
=
(
SizeAtCompileTime
!=
Dynamic
)
&&
(
internal
::
traits
<
Derived
>::
Alignment
>
0
)
};
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF
(
NeedsToAlign
)
EIGEN_STATIC_ASSERT
(
internal
::
check_implication
(
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
,
(
int
(
Options
)
&
RowMajor
)
==
RowMajor
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
(
internal
::
check_implication
(
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
,
(
int
(
Options
)
&
RowMajor
)
==
0
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
((
RowsAtCompileTime
==
Dynamic
)
||
(
RowsAtCompileTime
>=
0
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
((
ColsAtCompileTime
==
Dynamic
)
||
(
ColsAtCompileTime
>=
0
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
((
MaxRowsAtCompileTime
==
Dynamic
)
||
(
MaxRowsAtCompileTime
>=
0
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
((
MaxColsAtCompileTime
==
Dynamic
)
||
(
MaxColsAtCompileTime
>=
0
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
((
MaxRowsAtCompileTime
==
RowsAtCompileTime
||
RowsAtCompileTime
==
Dynamic
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
((
MaxColsAtCompileTime
==
ColsAtCompileTime
||
ColsAtCompileTime
==
Dynamic
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_STATIC_ASSERT
(((
Options
&
(
DontAlign
|
RowMajor
))
==
Options
),
INVALID_MATRIX_TEMPLATE_PARAMETERS
)
EIGEN_DEVICE_FUNC
Base
&
base
()
{
return
*
static_cast
<
Base
*>
(
this
);
}
EIGEN_DEVICE_FUNC
const
Base
&
base
()
const
{
return
*
static_cast
<
const
Base
*>
(
this
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
noexcept
{
return
m_storage
.
rows
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
noexcept
{
return
m_storage
.
cols
();
}
/** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index,Index) const
* provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.
*
* See DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const for details. */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
Scalar
&
coeff
(
Index
rowId
,
Index
colId
)
const
{
if
(
Flags
&
RowMajorBit
)
return
m_storage
.
data
()[
colId
+
rowId
*
m_storage
.
cols
()];
else
// column-major
return
m_storage
.
data
()[
rowId
+
colId
*
m_storage
.
rows
()];
}
/** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const
* provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.
*
* See DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const for details. */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
Scalar
&
coeff
(
Index
index
)
const
{
return
m_storage
.
data
()[
index
];
}
/** This is an overloaded version of DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index,Index) const
* provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.
*
* See DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index,Index) const for details. */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
{
if
(
Flags
&
RowMajorBit
)
return
m_storage
.
data
()[
colId
+
rowId
*
m_storage
.
cols
()];
else
// column-major
return
m_storage
.
data
()[
rowId
+
colId
*
m_storage
.
rows
()];
}
/** This is an overloaded version of DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index) const
* provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.
*
* See DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index) const for details. */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
coeffRef
(
Index
index
)
{
return
m_storage
.
data
()[
index
];
}
/** This is the const version of coeffRef(Index,Index) which is thus synonym of coeff(Index,Index).
* It is provided for convenience. */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
const
{
if
(
Flags
&
RowMajorBit
)
return
m_storage
.
data
()[
colId
+
rowId
*
m_storage
.
cols
()];
else
// column-major
return
m_storage
.
data
()[
rowId
+
colId
*
m_storage
.
rows
()];
}
/** This is the const version of coeffRef(Index) which is thus synonym of coeff(Index).
* It is provided for convenience. */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
Scalar
&
coeffRef
(
Index
index
)
const
{
return
m_storage
.
data
()[
index
];
}
/** \internal */
template
<
int
LoadMode
>
EIGEN_STRONG_INLINE
PacketScalar
packet
(
Index
rowId
,
Index
colId
)
const
{
return
internal
::
ploadt
<
PacketScalar
,
LoadMode
>
(
m_storage
.
data
()
+
(
Flags
&
RowMajorBit
?
colId
+
rowId
*
m_storage
.
cols
()
:
rowId
+
colId
*
m_storage
.
rows
()));
}
/** \internal */
template
<
int
LoadMode
>
EIGEN_STRONG_INLINE
PacketScalar
packet
(
Index
index
)
const
{
return
internal
::
ploadt
<
PacketScalar
,
LoadMode
>
(
m_storage
.
data
()
+
index
);
}
/** \internal */
template
<
int
StoreMode
>
EIGEN_STRONG_INLINE
void
writePacket
(
Index
rowId
,
Index
colId
,
const
PacketScalar
&
val
)
{
internal
::
pstoret
<
Scalar
,
PacketScalar
,
StoreMode
>
(
m_storage
.
data
()
+
(
Flags
&
RowMajorBit
?
colId
+
rowId
*
m_storage
.
cols
()
:
rowId
+
colId
*
m_storage
.
rows
()),
val
);
}
/** \internal */
template
<
int
StoreMode
>
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketScalar
&
val
)
{
internal
::
pstoret
<
Scalar
,
PacketScalar
,
StoreMode
>
(
m_storage
.
data
()
+
index
,
val
);
}
/** \returns a const pointer to the data array of this matrix */
EIGEN_DEVICE_FUNC
constexpr
const
Scalar
*
data
()
const
{
return
m_storage
.
data
();
}
/** \returns a pointer to the data array of this matrix */
EIGEN_DEVICE_FUNC
constexpr
Scalar
*
data
()
{
return
m_storage
.
data
();
}
/** Resizes \c *this to a \a rows x \a cols matrix.
*
* This method is intended for dynamic-size matrices, although it is legal to call it on any
* matrix as long as fixed dimensions are left unchanged. If you only want to change the number
* of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
*
* If the current number of coefficients of \c *this exactly matches the
* product \a rows * \a cols, then no memory allocation is performed and
* the current values are left unchanged. In all other cases, including
* shrinking, the data is reallocated and all previous values are lost.
*
* Example: \include Matrix_resize_int_int.cpp
* Output: \verbinclude Matrix_resize_int_int.out
*
* \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
rows
,
Index
cols
)
{
eigen_assert
(
internal
::
check_implication
(
RowsAtCompileTime
!=
Dynamic
,
rows
==
RowsAtCompileTime
)
&&
internal
::
check_implication
(
ColsAtCompileTime
!=
Dynamic
,
cols
==
ColsAtCompileTime
)
&&
internal
::
check_implication
(
RowsAtCompileTime
==
Dynamic
&&
MaxRowsAtCompileTime
!=
Dynamic
,
rows
<=
MaxRowsAtCompileTime
)
&&
internal
::
check_implication
(
ColsAtCompileTime
==
Dynamic
&&
MaxColsAtCompileTime
!=
Dynamic
,
cols
<=
MaxColsAtCompileTime
)
&&
rows
>=
0
&&
cols
>=
0
&&
"Invalid sizes when resizing a matrix or array."
);
#ifndef EIGEN_NO_DEBUG
internal
::
check_rows_cols_for_overflow
<
MaxSizeAtCompileTime
,
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
>::
run
(
rows
,
cols
);
#endif
#ifdef EIGEN_INITIALIZE_COEFFS
Index
size
=
rows
*
cols
;
bool
size_changed
=
size
!=
this
->
size
();
m_storage
.
resize
(
size
,
rows
,
cols
);
if
(
size_changed
)
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#else
m_storage
.
resize
(
rows
*
cols
,
rows
,
cols
);
#endif
}
/** Resizes \c *this to a vector of length \a size
*
* \only_for_vectors. This method does not work for
* partially dynamic matrices when the static dimension is anything other
* than 1. For example it will not work with Matrix<double, 2, Dynamic>.
*
* Example: \include Matrix_resize_int.cpp
* Output: \verbinclude Matrix_resize_int.out
*
* \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
*/
EIGEN_DEVICE_FUNC
constexpr
void
resize
(
Index
size
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
PlainObjectBase
)
eigen_assert
(((
SizeAtCompileTime
==
Dynamic
&&
(
MaxSizeAtCompileTime
==
Dynamic
||
size
<=
MaxSizeAtCompileTime
))
||
SizeAtCompileTime
==
size
)
&&
size
>=
0
);
#ifdef EIGEN_INITIALIZE_COEFFS
bool
size_changed
=
size
!=
this
->
size
();
#endif
if
(
RowsAtCompileTime
==
1
)
m_storage
.
resize
(
size
,
1
,
size
);
else
m_storage
.
resize
(
size
,
size
,
1
);
#ifdef EIGEN_INITIALIZE_COEFFS
if
(
size_changed
)
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#endif
}
/** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the
* special value \c NoChange as in the example below.
*
* Example: \include Matrix_resize_NoChange_int.cpp
* Output: \verbinclude Matrix_resize_NoChange_int.out
*
* \sa resize(Index,Index)
*/
EIGEN_DEVICE_FUNC
constexpr
void
resize
(
NoChange_t
,
Index
cols
)
{
resize
(
rows
(),
cols
);
}
/** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special
* value \c NoChange as in the example below.
*
* Example: \include Matrix_resize_int_NoChange.cpp
* Output: \verbinclude Matrix_resize_int_NoChange.out
*
* \sa resize(Index,Index)
*/
EIGEN_DEVICE_FUNC
constexpr
void
resize
(
Index
rows
,
NoChange_t
)
{
resize
(
rows
,
cols
());
}
/** Resizes \c *this to have the same dimensions as \a other.
* Takes care of doing all the checking that's needed.
*
* Note that copying a row-vector into a vector (and conversely) is allowed.
* The resizing, if any, is then done in the appropriate way so that row-vectors
* remain row-vectors and vectors remain vectors.
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
resizeLike
(
const
EigenBase
<
OtherDerived
>&
_other
)
{
const
OtherDerived
&
other
=
_other
.
derived
();
#ifndef EIGEN_NO_DEBUG
internal
::
check_rows_cols_for_overflow
<
MaxSizeAtCompileTime
,
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
>::
run
(
other
.
rows
(),
other
.
cols
());
#endif
const
Index
othersize
=
other
.
rows
()
*
other
.
cols
();
if
(
RowsAtCompileTime
==
1
)
{
eigen_assert
(
other
.
rows
()
==
1
||
other
.
cols
()
==
1
);
resize
(
1
,
othersize
);
}
else
if
(
ColsAtCompileTime
==
1
)
{
eigen_assert
(
other
.
rows
()
==
1
||
other
.
cols
()
==
1
);
resize
(
othersize
,
1
);
}
else
resize
(
other
.
rows
(),
other
.
cols
());
}
/** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
*
* The method is intended for matrices of dynamic size. If you only want to change the number
* of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
* conservativeResize(Index, NoChange_t).
*
* Matrices are resized relative to the top-left element. In case values need to be
* appended to the matrix they will be uninitialized.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
conservativeResize
(
Index
rows
,
Index
cols
)
{
internal
::
conservative_resize_like_impl
<
Derived
>::
run
(
*
this
,
rows
,
cols
);
}
/** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
*
* As opposed to conservativeResize(Index rows, Index cols), this version leaves
* the number of columns unchanged.
*
* In case the matrix is growing, new rows will be uninitialized.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
conservativeResize
(
Index
rows
,
NoChange_t
)
{
// Note: see the comment in conservativeResize(Index,Index)
conservativeResize
(
rows
,
cols
());
}
/** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
*
* As opposed to conservativeResize(Index rows, Index cols), this version leaves
* the number of rows unchanged.
*
* In case the matrix is growing, new columns will be uninitialized.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
conservativeResize
(
NoChange_t
,
Index
cols
)
{
// Note: see the comment in conservativeResize(Index,Index)
conservativeResize
(
rows
(),
cols
);
}
/** Resizes the vector to \a size while retaining old values.
*
* \only_for_vectors. This method does not work for
* partially dynamic matrices when the static dimension is anything other
* than 1. For example it will not work with Matrix<double, 2, Dynamic>.
*
* When values are appended, they will be uninitialized.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
conservativeResize
(
Index
size
)
{
internal
::
conservative_resize_like_impl
<
Derived
>::
run
(
*
this
,
size
);
}
/** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched.
*
* The method is intended for matrices of dynamic size. If you only want to change the number
* of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
* conservativeResize(Index, NoChange_t).
*
* Matrices are resized relative to the top-left element. In case values need to be
* appended to the matrix they will copied from \c other.
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
conservativeResizeLike
(
const
DenseBase
<
OtherDerived
>&
other
)
{
internal
::
conservative_resize_like_impl
<
Derived
,
OtherDerived
>::
run
(
*
this
,
other
);
}
/** This is a special case of the templated operator=. Its purpose is to
* prevent a default operator= from hiding the templated operator=.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Derived
&
operator
=
(
const
PlainObjectBase
&
other
)
{
return
_set
(
other
);
}
/** \sa MatrixBase::lazyAssign() */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
lazyAssign
(
const
DenseBase
<
OtherDerived
>&
other
)
{
_resize_to_match
(
other
);
return
Base
::
lazyAssign
(
other
.
derived
());
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
ReturnByValue
<
OtherDerived
>&
func
)
{
resize
(
func
.
rows
(),
func
.
cols
());
return
Base
::
operator
=
(
func
);
}
// Prevent user from trying to instantiate PlainObjectBase objects
// by making all its constructor protected. See bug 1074.
protected:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
PlainObjectBase
()
=
default
;
/** \brief Move constructor */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
PlainObjectBase
(
PlainObjectBase
&&
)
=
default
;
/** \brief Move assignment operator */
EIGEN_DEVICE_FUNC
constexpr
PlainObjectBase
&
operator
=
(
PlainObjectBase
&&
other
)
noexcept
{
m_storage
=
std
::
move
(
other
.
m_storage
);
return
*
this
;
}
/** Copy constructor */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
PlainObjectBase
(
const
PlainObjectBase
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PlainObjectBase
(
Index
size
,
Index
rows
,
Index
cols
)
:
m_storage
(
size
,
rows
,
cols
)
{}
/** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients.
*
* \only_for_vectors
*
* This constructor is for 1D array or vectors with more than 4 coefficients.
*
* \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
* constructor must match the the fixed number of rows (resp. columns) of \c *this.
*/
template
<
typename
...
ArgTypes
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PlainObjectBase
(
const
Scalar
&
a0
,
const
Scalar
&
a1
,
const
Scalar
&
a2
,
const
Scalar
&
a3
,
const
ArgTypes
&
...
args
)
:
m_storage
()
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE
(
PlainObjectBase
,
sizeof
...(
args
)
+
4
);
m_storage
.
data
()[
0
]
=
a0
;
m_storage
.
data
()[
1
]
=
a1
;
m_storage
.
data
()[
2
]
=
a2
;
m_storage
.
data
()[
3
]
=
a3
;
Index
i
=
4
;
auto
x
=
{(
m_storage
.
data
()[
i
++
]
=
args
,
0
)...};
static_cast
<
void
>
(
x
);
}
/** \brief Constructs a Matrix or Array and initializes it by elements given by an initializer list of initializer
* lists
*/
EIGEN_DEVICE_FUNC
explicit
constexpr
EIGEN_STRONG_INLINE
PlainObjectBase
(
const
std
::
initializer_list
<
std
::
initializer_list
<
Scalar
>>&
list
)
:
m_storage
()
{
size_t
list_size
=
0
;
if
(
list
.
begin
()
!=
list
.
end
())
{
list_size
=
list
.
begin
()
->
size
();
}
// This is to allow syntax like VectorXi {{1, 2, 3, 4}}
if
(
ColsAtCompileTime
==
1
&&
list
.
size
()
==
1
)
{
eigen_assert
(
list_size
==
static_cast
<
size_t
>
(
RowsAtCompileTime
)
||
RowsAtCompileTime
==
Dynamic
);
resize
(
list_size
,
ColsAtCompileTime
);
if
(
list
.
begin
()
->
begin
()
!=
nullptr
)
{
Index
index
=
0
;
for
(
const
Scalar
&
e
:
*
list
.
begin
())
{
coeffRef
(
index
++
)
=
e
;
}
}
}
else
{
eigen_assert
(
list
.
size
()
==
static_cast
<
size_t
>
(
RowsAtCompileTime
)
||
RowsAtCompileTime
==
Dynamic
);
eigen_assert
(
list_size
==
static_cast
<
size_t
>
(
ColsAtCompileTime
)
||
ColsAtCompileTime
==
Dynamic
);
resize
(
list
.
size
(),
list_size
);
Index
row_index
=
0
;
for
(
const
std
::
initializer_list
<
Scalar
>&
row
:
list
)
{
eigen_assert
(
list_size
==
row
.
size
());
Index
col_index
=
0
;
for
(
const
Scalar
&
e
:
row
)
{
coeffRef
(
row_index
,
col_index
)
=
e
;
++
col_index
;
}
++
row_index
;
}
}
}
/** \sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PlainObjectBase
(
const
DenseBase
<
OtherDerived
>&
other
)
:
m_storage
()
{
resizeLike
(
other
);
_set_noalias
(
other
);
}
/** \sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PlainObjectBase
(
const
EigenBase
<
OtherDerived
>&
other
)
:
m_storage
()
{
resizeLike
(
other
);
*
this
=
other
.
derived
();
}
/** \brief Copy constructor with in-place evaluation */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PlainObjectBase
(
const
ReturnByValue
<
OtherDerived
>&
other
)
{
// FIXME this does not automatically transpose vectors if necessary
resize
(
other
.
rows
(),
other
.
cols
());
other
.
evalTo
(
this
->
derived
());
}
public:
/** \brief Copies the generic expression \a other into *this.
* \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
EigenBase
<
OtherDerived
>&
other
)
{
_resize_to_match
(
other
);
Base
::
operator
=
(
other
.
derived
());
return
this
->
derived
();
}
/** \name Map
* These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,
* while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
* \a data pointers.
*
* Here is an example using strides:
* \include Matrix_Map_stride.cpp
* Output: \verbinclude Matrix_Map_stride.out
*
* \see class Map
*/
///@{
static
inline
ConstMapType
Map
(
const
Scalar
*
data
)
{
return
ConstMapType
(
data
);
}
static
inline
MapType
Map
(
Scalar
*
data
)
{
return
MapType
(
data
);
}
static
inline
ConstMapType
Map
(
const
Scalar
*
data
,
Index
size
)
{
return
ConstMapType
(
data
,
size
);
}
static
inline
MapType
Map
(
Scalar
*
data
,
Index
size
)
{
return
MapType
(
data
,
size
);
}
static
inline
ConstMapType
Map
(
const
Scalar
*
data
,
Index
rows
,
Index
cols
)
{
return
ConstMapType
(
data
,
rows
,
cols
);
}
static
inline
MapType
Map
(
Scalar
*
data
,
Index
rows
,
Index
cols
)
{
return
MapType
(
data
,
rows
,
cols
);
}
static
inline
ConstAlignedMapType
MapAligned
(
const
Scalar
*
data
)
{
return
ConstAlignedMapType
(
data
);
}
static
inline
AlignedMapType
MapAligned
(
Scalar
*
data
)
{
return
AlignedMapType
(
data
);
}
static
inline
ConstAlignedMapType
MapAligned
(
const
Scalar
*
data
,
Index
size
)
{
return
ConstAlignedMapType
(
data
,
size
);
}
static
inline
AlignedMapType
MapAligned
(
Scalar
*
data
,
Index
size
)
{
return
AlignedMapType
(
data
,
size
);
}
static
inline
ConstAlignedMapType
MapAligned
(
const
Scalar
*
data
,
Index
rows
,
Index
cols
)
{
return
ConstAlignedMapType
(
data
,
rows
,
cols
);
}
static
inline
AlignedMapType
MapAligned
(
Scalar
*
data
,
Index
rows
,
Index
cols
)
{
return
AlignedMapType
(
data
,
rows
,
cols
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedConstMapType
<
Stride
<
Outer
,
Inner
>>::
type
Map
(
const
Scalar
*
data
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedConstMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedMapType
<
Stride
<
Outer
,
Inner
>>::
type
Map
(
Scalar
*
data
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedConstMapType
<
Stride
<
Outer
,
Inner
>>::
type
Map
(
const
Scalar
*
data
,
Index
size
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedConstMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
size
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedMapType
<
Stride
<
Outer
,
Inner
>>::
type
Map
(
Scalar
*
data
,
Index
size
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
size
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedConstMapType
<
Stride
<
Outer
,
Inner
>>::
type
Map
(
const
Scalar
*
data
,
Index
rows
,
Index
cols
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedConstMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
rows
,
cols
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedMapType
<
Stride
<
Outer
,
Inner
>>::
type
Map
(
Scalar
*
data
,
Index
rows
,
Index
cols
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
rows
,
cols
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedConstAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
MapAligned
(
const
Scalar
*
data
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedConstAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
MapAligned
(
Scalar
*
data
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedConstAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
MapAligned
(
const
Scalar
*
data
,
Index
size
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedConstAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
size
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
MapAligned
(
Scalar
*
data
,
Index
size
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
size
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedConstAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
MapAligned
(
const
Scalar
*
data
,
Index
rows
,
Index
cols
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedConstAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
rows
,
cols
,
stride
);
}
template
<
int
Outer
,
int
Inner
>
static
inline
typename
StridedAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
MapAligned
(
Scalar
*
data
,
Index
rows
,
Index
cols
,
const
Stride
<
Outer
,
Inner
>&
stride
)
{
return
typename
StridedAlignedMapType
<
Stride
<
Outer
,
Inner
>>::
type
(
data
,
rows
,
cols
,
stride
);
}
///@}
using
Base
::
setConstant
;
EIGEN_DEVICE_FUNC
Derived
&
setConstant
(
Index
size
,
const
Scalar
&
val
);
EIGEN_DEVICE_FUNC
Derived
&
setConstant
(
Index
rows
,
Index
cols
,
const
Scalar
&
val
);
EIGEN_DEVICE_FUNC
Derived
&
setConstant
(
NoChange_t
,
Index
cols
,
const
Scalar
&
val
);
EIGEN_DEVICE_FUNC
Derived
&
setConstant
(
Index
rows
,
NoChange_t
,
const
Scalar
&
val
);
using
Base
::
setZero
;
EIGEN_DEVICE_FUNC
Derived
&
setZero
(
Index
size
);
EIGEN_DEVICE_FUNC
Derived
&
setZero
(
Index
rows
,
Index
cols
);
EIGEN_DEVICE_FUNC
Derived
&
setZero
(
NoChange_t
,
Index
cols
);
EIGEN_DEVICE_FUNC
Derived
&
setZero
(
Index
rows
,
NoChange_t
);
using
Base
::
setOnes
;
EIGEN_DEVICE_FUNC
Derived
&
setOnes
(
Index
size
);
EIGEN_DEVICE_FUNC
Derived
&
setOnes
(
Index
rows
,
Index
cols
);
EIGEN_DEVICE_FUNC
Derived
&
setOnes
(
NoChange_t
,
Index
cols
);
EIGEN_DEVICE_FUNC
Derived
&
setOnes
(
Index
rows
,
NoChange_t
);
using
Base
::
setRandom
;
Derived
&
setRandom
(
Index
size
);
Derived
&
setRandom
(
Index
rows
,
Index
cols
);
Derived
&
setRandom
(
NoChange_t
,
Index
cols
);
Derived
&
setRandom
(
Index
rows
,
NoChange_t
);
#ifdef EIGEN_PLAINOBJECTBASE_PLUGIN
#include EIGEN_PLAINOBJECTBASE_PLUGIN
#endif
protected:
/** \internal Resizes *this in preparation for assigning \a other to it.
* Takes care of doing all the checking that's needed.
*
* Note that copying a row-vector into a vector (and conversely) is allowed.
* The resizing, if any, is then done in the appropriate way so that row-vectors
* remain row-vectors and vectors remain vectors.
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_resize_to_match
(
const
EigenBase
<
OtherDerived
>&
other
)
{
#ifdef EIGEN_NO_AUTOMATIC_RESIZING
eigen_assert
((
this
->
size
()
==
0
||
(
IsVectorAtCompileTime
?
(
this
->
size
()
==
other
.
size
())
:
(
rows
()
==
other
.
rows
()
&&
cols
()
==
other
.
cols
())))
&&
"Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"
);
EIGEN_ONLY_USED_FOR_DEBUG
(
other
);
#else
resizeLike
(
other
);
#endif
}
/**
* \brief Copies the value of the expression \a other into \c *this with automatic resizing.
*
* *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
* it will be initialized.
*
* Note that copying a row-vector into a vector (and conversely) is allowed.
* The resizing, if any, is then done in the appropriate way so that row-vectors
* remain row-vectors and vectors remain vectors.
*
* \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()
*
* \internal
*/
// aliasing is dealt once in internal::call_assignment
// so at this stage we have to assume aliasing... and resising has to be done later.
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Derived
&
_set
(
const
DenseBase
<
OtherDerived
>&
other
)
{
internal
::
call_assignment
(
this
->
derived
(),
other
.
derived
());
return
this
->
derived
();
}
/** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
* is the case when creating a new matrix) so one can enforce lazy evaluation.
*
* \sa operator=(const MatrixBase<OtherDerived>&), _set()
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Derived
&
_set_noalias
(
const
DenseBase
<
OtherDerived
>&
other
)
{
// I don't think we need this resize call since the lazyAssign will anyways resize
// and lazyAssign will be called by the assign selector.
//_resize_to_match(other);
// the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
// it wouldn't allow to copy a row-vector into a column-vector.
internal
::
call_assignment_no_alias
(
this
->
derived
(),
other
.
derived
(),
internal
::
assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
());
return
this
->
derived
();
}
template
<
typename
T0
,
typename
T1
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init2
(
Index
rows
,
Index
cols
,
std
::
enable_if_t
<
Base
::
SizeAtCompileTime
!=
2
,
T0
>*
=
0
)
{
EIGEN_STATIC_ASSERT
(
internal
::
is_valid_index_type
<
T0
>::
value
&&
internal
::
is_valid_index_type
<
T1
>::
value
,
T0
AND
T1
MUST
BE
INTEGER
TYPES
)
resize
(
rows
,
cols
);
}
template
<
typename
T0
,
typename
T1
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init2
(
const
T0
&
val0
,
const
T1
&
val1
,
std
::
enable_if_t
<
Base
::
SizeAtCompileTime
==
2
,
T0
>*
=
0
)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE
(
PlainObjectBase
,
2
)
m_storage
.
data
()[
0
]
=
Scalar
(
val0
);
m_storage
.
data
()[
1
]
=
Scalar
(
val1
);
}
template
<
typename
T0
,
typename
T1
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init2
(
const
Index
&
val0
,
const
Index
&
val1
,
std
::
enable_if_t
<
(
!
internal
::
is_same
<
Index
,
Scalar
>::
value
)
&&
(
internal
::
is_same
<
T0
,
Index
>::
value
)
&&
(
internal
::
is_same
<
T1
,
Index
>::
value
)
&&
Base
::
SizeAtCompileTime
==
2
,
T1
>*
=
0
)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE
(
PlainObjectBase
,
2
)
m_storage
.
data
()[
0
]
=
Scalar
(
val0
);
m_storage
.
data
()[
1
]
=
Scalar
(
val1
);
}
// The argument is convertible to the Index type and we either have a non 1x1 Matrix, or a dynamic-sized Array,
// then the argument is meant to be the size of the object.
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
Index
size
,
std
::
enable_if_t
<
(
Base
::
SizeAtCompileTime
!=
1
||
!
internal
::
is_convertible
<
T
,
Scalar
>::
value
)
&&
((
!
internal
::
is_same
<
typename
internal
::
traits
<
Derived
>::
XprKind
,
ArrayXpr
>::
value
||
Base
::
SizeAtCompileTime
==
Dynamic
)),
T
>*
=
0
)
{
// NOTE MSVC 2008 complains if we directly put bool(NumTraits<T>::IsInteger) as the EIGEN_STATIC_ASSERT argument.
const
bool
is_integer_alike
=
internal
::
is_valid_index_type
<
T
>::
value
;
EIGEN_UNUSED_VARIABLE
(
is_integer_alike
);
EIGEN_STATIC_ASSERT
(
is_integer_alike
,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED
)
resize
(
size
);
}
// We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar
// type can be implicitly converted)
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
Scalar
&
val0
,
std
::
enable_if_t
<
Base
::
SizeAtCompileTime
==
1
&&
internal
::
is_convertible
<
T
,
Scalar
>::
value
,
T
>*
=
0
)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE
(
PlainObjectBase
,
1
)
m_storage
.
data
()[
0
]
=
val0
;
}
// We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar
// type match the index type)
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
Index
&
val0
,
std
::
enable_if_t
<
(
!
internal
::
is_same
<
Index
,
Scalar
>::
value
)
&&
(
internal
::
is_same
<
Index
,
T
>::
value
)
&&
Base
::
SizeAtCompileTime
==
1
&&
internal
::
is_convertible
<
T
,
Scalar
>::
value
,
T
*>*
=
0
)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE
(
PlainObjectBase
,
1
)
m_storage
.
data
()[
0
]
=
Scalar
(
val0
);
}
// Initialize a fixed size matrix from a pointer to raw data
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
Scalar
*
data
)
{
this
->
_set_noalias
(
ConstMapType
(
data
));
}
// Initialize an arbitrary matrix from a dense expression
template
<
typename
T
,
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
DenseBase
<
OtherDerived
>&
other
)
{
this
->
_set_noalias
(
other
);
}
// Initialize an arbitrary matrix from an object convertible to the Derived type.
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
Derived
&
other
)
{
this
->
_set_noalias
(
other
);
}
// Initialize an arbitrary matrix from a generic Eigen expression
template
<
typename
T
,
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
EigenBase
<
OtherDerived
>&
other
)
{
this
->
derived
()
=
other
;
}
template
<
typename
T
,
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
ReturnByValue
<
OtherDerived
>&
other
)
{
resize
(
other
.
rows
(),
other
.
cols
());
other
.
evalTo
(
this
->
derived
());
}
template
<
typename
T
,
typename
OtherDerived
,
int
ColsAtCompileTime
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
RotationBase
<
OtherDerived
,
ColsAtCompileTime
>&
r
)
{
this
->
derived
()
=
r
;
}
// For fixed-size Array<Scalar,...>
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
Scalar
&
val0
,
std
::
enable_if_t
<
Base
::
SizeAtCompileTime
!=
Dynamic
&&
Base
::
SizeAtCompileTime
!=
1
&&
internal
::
is_convertible
<
T
,
Scalar
>::
value
&&
internal
::
is_same
<
typename
internal
::
traits
<
Derived
>::
XprKind
,
ArrayXpr
>::
value
,
T
>*
=
0
)
{
Base
::
setConstant
(
val0
);
}
// For fixed-size Array<Index,...>
template
<
typename
T
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
_init1
(
const
Index
&
val0
,
std
::
enable_if_t
<
(
!
internal
::
is_same
<
Index
,
Scalar
>::
value
)
&&
(
internal
::
is_same
<
Index
,
T
>::
value
)
&&
Base
::
SizeAtCompileTime
!=
Dynamic
&&
Base
::
SizeAtCompileTime
!=
1
&&
internal
::
is_convertible
<
T
,
Scalar
>::
value
&&
internal
::
is_same
<
typename
internal
::
traits
<
Derived
>::
XprKind
,
ArrayXpr
>::
value
,
T
*>*
=
0
)
{
Base
::
setConstant
(
val0
);
}
template
<
typename
MatrixTypeA
,
typename
MatrixTypeB
,
bool
SwapPointers
>
friend
struct
internal
::
matrix_swap_impl
;
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal
* \brief Override DenseBase::swap() since for dynamic-sized matrices
* of same type it is enough to swap the data pointers.
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
swap
(
DenseBase
<
OtherDerived
>&
other
)
{
enum
{
SwapPointers
=
internal
::
is_same
<
Derived
,
OtherDerived
>::
value
&&
Base
::
SizeAtCompileTime
==
Dynamic
};
internal
::
matrix_swap_impl
<
Derived
,
OtherDerived
,
bool
(
SwapPointers
)
>::
run
(
this
->
derived
(),
other
.
derived
());
}
/** \internal
* \brief const version forwarded to DenseBase::swap
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
swap
(
DenseBase
<
OtherDerived
>
const
&
other
)
{
Base
::
swap
(
other
.
derived
());
}
enum
{
IsPlainObjectBase
=
1
};
#endif
public:
// These apparently need to be down here for nvcc+icc to prevent duplicate
// Map symbol.
template
<
typename
PlainObjectType
,
int
MapOptions
,
typename
StrideType
>
friend
class
Eigen
::
Map
;
friend
class
Eigen
::
Map
<
Derived
,
Unaligned
>
;
friend
class
Eigen
::
Map
<
const
Derived
,
Unaligned
>
;
#if EIGEN_MAX_ALIGN_BYTES > 0
// for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class
// twice.
friend
class
Eigen
::
Map
<
Derived
,
AlignedMax
>
;
friend
class
Eigen
::
Map
<
const
Derived
,
AlignedMax
>
;
#endif
};
namespace
internal
{
template
<
typename
Derived
,
typename
OtherDerived
,
bool
IsVector
>
struct
conservative_resize_like_impl
{
static
constexpr
bool
IsRelocatable
=
std
::
is_trivially_copyable
<
typename
Derived
::
Scalar
>::
value
;
static
void
run
(
DenseBase
<
Derived
>&
_this
,
Index
rows
,
Index
cols
)
{
if
(
_this
.
rows
()
==
rows
&&
_this
.
cols
()
==
cols
)
return
;
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE
(
Derived
)
if
(
IsRelocatable
&&
((
Derived
::
IsRowMajor
&&
_this
.
cols
()
==
cols
)
||
// row-major and we change only the number of rows
(
!
Derived
::
IsRowMajor
&&
_this
.
rows
()
==
rows
)))
// column-major and we change only the number of columns
{
#ifndef EIGEN_NO_DEBUG
internal
::
check_rows_cols_for_overflow
<
Derived
::
MaxSizeAtCompileTime
,
Derived
::
MaxRowsAtCompileTime
,
Derived
::
MaxColsAtCompileTime
>::
run
(
rows
,
cols
);
#endif
_this
.
derived
().
m_storage
.
conservativeResize
(
rows
*
cols
,
rows
,
cols
);
}
else
{
// The storage order does not allow us to use reallocation.
Derived
tmp
(
rows
,
cols
);
const
Index
common_rows
=
numext
::
mini
(
rows
,
_this
.
rows
());
const
Index
common_cols
=
numext
::
mini
(
cols
,
_this
.
cols
());
tmp
.
block
(
0
,
0
,
common_rows
,
common_cols
)
=
_this
.
block
(
0
,
0
,
common_rows
,
common_cols
);
_this
.
derived
().
swap
(
tmp
);
}
}
static
void
run
(
DenseBase
<
Derived
>&
_this
,
const
DenseBase
<
OtherDerived
>&
other
)
{
if
(
_this
.
rows
()
==
other
.
rows
()
&&
_this
.
cols
()
==
other
.
cols
())
return
;
// Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
// neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
// dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
// conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
// EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE
(
Derived
)
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE
(
OtherDerived
)
if
(
IsRelocatable
&&
((
Derived
::
IsRowMajor
&&
_this
.
cols
()
==
other
.
cols
())
||
// row-major and we change only the number of rows
(
!
Derived
::
IsRowMajor
&&
_this
.
rows
()
==
other
.
rows
())))
// column-major and we change only the number of columns
{
const
Index
new_rows
=
other
.
rows
()
-
_this
.
rows
();
const
Index
new_cols
=
other
.
cols
()
-
_this
.
cols
();
_this
.
derived
().
m_storage
.
conservativeResize
(
other
.
size
(),
other
.
rows
(),
other
.
cols
());
if
(
new_rows
>
0
)
_this
.
bottomRightCorner
(
new_rows
,
other
.
cols
())
=
other
.
bottomRows
(
new_rows
);
else
if
(
new_cols
>
0
)
_this
.
bottomRightCorner
(
other
.
rows
(),
new_cols
)
=
other
.
rightCols
(
new_cols
);
}
else
{
// The storage order does not allow us to use reallocation.
Derived
tmp
(
other
);
const
Index
common_rows
=
numext
::
mini
(
tmp
.
rows
(),
_this
.
rows
());
const
Index
common_cols
=
numext
::
mini
(
tmp
.
cols
(),
_this
.
cols
());
tmp
.
block
(
0
,
0
,
common_rows
,
common_cols
)
=
_this
.
block
(
0
,
0
,
common_rows
,
common_cols
);
_this
.
derived
().
swap
(
tmp
);
}
}
};
// Here, the specialization for vectors inherits from the general matrix case
// to allow calling .conservativeResize(rows,cols) on vectors.
template
<
typename
Derived
,
typename
OtherDerived
>
struct
conservative_resize_like_impl
<
Derived
,
OtherDerived
,
true
>
:
conservative_resize_like_impl
<
Derived
,
OtherDerived
,
false
>
{
typedef
conservative_resize_like_impl
<
Derived
,
OtherDerived
,
false
>
Base
;
using
Base
::
IsRelocatable
;
using
Base
::
run
;
static
void
run
(
DenseBase
<
Derived
>&
_this
,
Index
size
)
{
const
Index
new_rows
=
Derived
::
RowsAtCompileTime
==
1
?
1
:
size
;
const
Index
new_cols
=
Derived
::
RowsAtCompileTime
==
1
?
size
:
1
;
if
(
IsRelocatable
)
_this
.
derived
().
m_storage
.
conservativeResize
(
size
,
new_rows
,
new_cols
);
else
Base
::
run
(
_this
.
derived
(),
new_rows
,
new_cols
);
}
static
void
run
(
DenseBase
<
Derived
>&
_this
,
const
DenseBase
<
OtherDerived
>&
other
)
{
if
(
_this
.
rows
()
==
other
.
rows
()
&&
_this
.
cols
()
==
other
.
cols
())
return
;
const
Index
num_new_elements
=
other
.
size
()
-
_this
.
size
();
const
Index
new_rows
=
Derived
::
RowsAtCompileTime
==
1
?
1
:
other
.
rows
();
const
Index
new_cols
=
Derived
::
RowsAtCompileTime
==
1
?
other
.
cols
()
:
1
;
if
(
IsRelocatable
)
_this
.
derived
().
m_storage
.
conservativeResize
(
other
.
size
(),
new_rows
,
new_cols
);
else
Base
::
run
(
_this
.
derived
(),
new_rows
,
new_cols
);
if
(
num_new_elements
>
0
)
_this
.
tail
(
num_new_elements
)
=
other
.
tail
(
num_new_elements
);
}
};
template
<
typename
MatrixTypeA
,
typename
MatrixTypeB
,
bool
SwapPointers
>
struct
matrix_swap_impl
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
MatrixTypeA
&
a
,
MatrixTypeB
&
b
)
{
a
.
base
().
swap
(
b
);
}
};
template
<
typename
MatrixTypeA
,
typename
MatrixTypeB
>
struct
matrix_swap_impl
<
MatrixTypeA
,
MatrixTypeB
,
true
>
{
EIGEN_DEVICE_FUNC
static
inline
void
run
(
MatrixTypeA
&
a
,
MatrixTypeB
&
b
)
{
static_cast
<
typename
MatrixTypeA
::
Base
&>
(
a
).
m_storage
.
swap
(
static_cast
<
typename
MatrixTypeB
::
Base
&>
(
b
).
m_storage
);
}
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_DENSESTORAGEBASE_H
eigen-master/Eigen/src/Core/Product.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PRODUCT_H
#define EIGEN_PRODUCT_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
,
typename
StorageKind
>
class
ProductImpl
;
namespace
internal
{
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
>
struct
traits
<
Product
<
Lhs
,
Rhs
,
Option
>>
{
typedef
remove_all_t
<
Lhs
>
LhsCleaned
;
typedef
remove_all_t
<
Rhs
>
RhsCleaned
;
typedef
traits
<
LhsCleaned
>
LhsTraits
;
typedef
traits
<
RhsCleaned
>
RhsTraits
;
typedef
MatrixXpr
XprKind
;
typedef
typename
ScalarBinaryOpTraits
<
typename
traits
<
LhsCleaned
>::
Scalar
,
typename
traits
<
RhsCleaned
>::
Scalar
>::
ReturnType
Scalar
;
typedef
typename
product_promote_storage_type
<
typename
LhsTraits
::
StorageKind
,
typename
RhsTraits
::
StorageKind
,
internal
::
product_type
<
Lhs
,
Rhs
>::
ret
>::
ret
StorageKind
;
typedef
typename
promote_index_type
<
typename
LhsTraits
::
StorageIndex
,
typename
RhsTraits
::
StorageIndex
>::
type
StorageIndex
;
enum
{
RowsAtCompileTime
=
LhsTraits
::
RowsAtCompileTime
,
ColsAtCompileTime
=
RhsTraits
::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
LhsTraits
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
RhsTraits
::
MaxColsAtCompileTime
,
// FIXME: only needed by GeneralMatrixMatrixTriangular
InnerSize
=
min_size_prefer_fixed
(
LhsTraits
::
ColsAtCompileTime
,
RhsTraits
::
RowsAtCompileTime
),
// The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
Flags
=
(
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
)
?
RowMajorBit
:
(
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
)
?
0
:
(((
LhsTraits
::
Flags
&
NoPreferredStorageOrderBit
)
&&
(
RhsTraits
::
Flags
&
RowMajorBit
))
||
((
RhsTraits
::
Flags
&
NoPreferredStorageOrderBit
)
&&
(
LhsTraits
::
Flags
&
RowMajorBit
)))
?
RowMajorBit
:
NoPreferredStorageOrderBit
};
};
struct
TransposeProductEnum
{
// convenience enumerations to specialize transposed products
enum
:
int
{
Default
=
0x00
,
Matrix
=
0x01
,
Permutation
=
0x02
,
MatrixMatrix
=
(
Matrix
<<
8
)
|
Matrix
,
MatrixPermutation
=
(
Matrix
<<
8
)
|
Permutation
,
PermutationMatrix
=
(
Permutation
<<
8
)
|
Matrix
};
};
template
<
typename
Xpr
>
struct
TransposeKind
{
static
constexpr
int
Kind
=
is_matrix_base_xpr
<
Xpr
>::
value
?
TransposeProductEnum
::
Matrix
:
is_permutation_base_xpr
<
Xpr
>::
value
?
TransposeProductEnum
::
Permutation
:
TransposeProductEnum
::
Default
;
};
template
<
typename
Lhs
,
typename
Rhs
>
struct
TransposeProductKind
{
static
constexpr
int
Kind
=
(
TransposeKind
<
Lhs
>::
Kind
<<
8
)
|
TransposeKind
<
Rhs
>::
Kind
;
};
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
,
int
Kind
=
TransposeProductKind
<
Lhs
,
Rhs
>
::
Kind
>
struct
product_transpose_helper
{
// by default, don't optimize the transposed product
using
Derived
=
Product
<
Lhs
,
Rhs
,
Option
>
;
using
Scalar
=
typename
Derived
::
Scalar
;
using
TransposeType
=
Transpose
<
const
Derived
>
;
using
ConjugateTransposeType
=
CwiseUnaryOp
<
scalar_conjugate_op
<
Scalar
>
,
TransposeType
>
;
using
AdjointType
=
std
::
conditional_t
<
NumTraits
<
Scalar
>::
IsComplex
,
ConjugateTransposeType
,
TransposeType
>
;
// return (lhs * rhs)^T
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
TransposeType
run_transpose
(
const
Derived
&
derived
)
{
return
TransposeType
(
derived
);
}
// return (lhs * rhs)^H
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
AdjointType
run_adjoint
(
const
Derived
&
derived
)
{
return
AdjointType
(
TransposeType
(
derived
));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
>
struct
product_transpose_helper
<
Lhs
,
Rhs
,
Option
,
TransposeProductEnum
::
MatrixMatrix
>
{
// expand the transposed matrix-matrix product
using
Derived
=
Product
<
Lhs
,
Rhs
,
Option
>
;
using
LhsScalar
=
typename
traits
<
Lhs
>::
Scalar
;
using
LhsTransposeType
=
typename
DenseBase
<
Lhs
>::
ConstTransposeReturnType
;
using
LhsConjugateTransposeType
=
CwiseUnaryOp
<
scalar_conjugate_op
<
LhsScalar
>
,
LhsTransposeType
>
;
using
LhsAdjointType
=
std
::
conditional_t
<
NumTraits
<
LhsScalar
>::
IsComplex
,
LhsConjugateTransposeType
,
LhsTransposeType
>
;
using
RhsScalar
=
typename
traits
<
Rhs
>::
Scalar
;
using
RhsTransposeType
=
typename
DenseBase
<
Rhs
>::
ConstTransposeReturnType
;
using
RhsConjugateTransposeType
=
CwiseUnaryOp
<
scalar_conjugate_op
<
RhsScalar
>
,
RhsTransposeType
>
;
using
RhsAdjointType
=
std
::
conditional_t
<
NumTraits
<
RhsScalar
>::
IsComplex
,
RhsConjugateTransposeType
,
RhsTransposeType
>
;
using
TransposeType
=
Product
<
RhsTransposeType
,
LhsTransposeType
,
Option
>
;
using
AdjointType
=
Product
<
RhsAdjointType
,
LhsAdjointType
,
Option
>
;
// return rhs^T * lhs^T
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
TransposeType
run_transpose
(
const
Derived
&
derived
)
{
return
TransposeType
(
RhsTransposeType
(
derived
.
rhs
()),
LhsTransposeType
(
derived
.
lhs
()));
}
// return rhs^H * lhs^H
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
AdjointType
run_adjoint
(
const
Derived
&
derived
)
{
return
AdjointType
(
RhsAdjointType
(
RhsTransposeType
(
derived
.
rhs
())),
LhsAdjointType
(
LhsTransposeType
(
derived
.
lhs
())));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
>
struct
product_transpose_helper
<
Lhs
,
Rhs
,
Option
,
TransposeProductEnum
::
PermutationMatrix
>
{
// expand the transposed permutation-matrix product
using
Derived
=
Product
<
Lhs
,
Rhs
,
Option
>
;
using
LhsInverseType
=
typename
PermutationBase
<
Lhs
>::
InverseReturnType
;
using
RhsScalar
=
typename
traits
<
Rhs
>::
Scalar
;
using
RhsTransposeType
=
typename
DenseBase
<
Rhs
>::
ConstTransposeReturnType
;
using
RhsConjugateTransposeType
=
CwiseUnaryOp
<
scalar_conjugate_op
<
RhsScalar
>
,
RhsTransposeType
>
;
using
RhsAdjointType
=
std
::
conditional_t
<
NumTraits
<
RhsScalar
>::
IsComplex
,
RhsConjugateTransposeType
,
RhsTransposeType
>
;
using
TransposeType
=
Product
<
RhsTransposeType
,
LhsInverseType
,
Option
>
;
using
AdjointType
=
Product
<
RhsAdjointType
,
LhsInverseType
,
Option
>
;
// return rhs^T * lhs^-1
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
TransposeType
run_transpose
(
const
Derived
&
derived
)
{
return
TransposeType
(
RhsTransposeType
(
derived
.
rhs
()),
LhsInverseType
(
derived
.
lhs
()));
}
// return rhs^H * lhs^-1
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
AdjointType
run_adjoint
(
const
Derived
&
derived
)
{
return
AdjointType
(
RhsAdjointType
(
RhsTransposeType
(
derived
.
rhs
())),
LhsInverseType
(
derived
.
lhs
()));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
>
struct
product_transpose_helper
<
Lhs
,
Rhs
,
Option
,
TransposeProductEnum
::
MatrixPermutation
>
{
// expand the transposed matrix-permutation product
using
Derived
=
Product
<
Lhs
,
Rhs
,
Option
>
;
using
LhsScalar
=
typename
traits
<
Lhs
>::
Scalar
;
using
LhsTransposeType
=
typename
DenseBase
<
Lhs
>::
ConstTransposeReturnType
;
using
LhsConjugateTransposeType
=
CwiseUnaryOp
<
scalar_conjugate_op
<
LhsScalar
>
,
LhsTransposeType
>
;
using
LhsAdjointType
=
std
::
conditional_t
<
NumTraits
<
LhsScalar
>::
IsComplex
,
LhsConjugateTransposeType
,
LhsTransposeType
>
;
using
RhsInverseType
=
typename
PermutationBase
<
Rhs
>::
InverseReturnType
;
using
TransposeType
=
Product
<
RhsInverseType
,
LhsTransposeType
,
Option
>
;
using
AdjointType
=
Product
<
RhsInverseType
,
LhsAdjointType
,
Option
>
;
// return rhs^-1 * lhs^T
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
TransposeType
run_transpose
(
const
Derived
&
derived
)
{
return
TransposeType
(
RhsInverseType
(
derived
.
rhs
()),
LhsTransposeType
(
derived
.
lhs
()));
}
// return rhs^-1 * lhs^H
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
AdjointType
run_adjoint
(
const
Derived
&
derived
)
{
return
AdjointType
(
RhsInverseType
(
derived
.
rhs
()),
LhsAdjointType
(
LhsTransposeType
(
derived
.
lhs
())));
}
};
}
// end namespace internal
/** \class Product
* \ingroup Core_Module
*
* \brief Expression of the product of two arbitrary matrices or vectors
*
* \tparam Lhs_ the type of the left-hand side expression
* \tparam Rhs_ the type of the right-hand side expression
*
* This class represents an expression of the product of two arbitrary matrices.
*
* The other template parameters are:
* \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct
*
*/
template
<
typename
Lhs_
,
typename
Rhs_
,
int
Option
>
class
Product
:
public
ProductImpl
<
Lhs_
,
Rhs_
,
Option
,
typename
internal
::
product_promote_storage_type
<
typename
internal
::
traits
<
Lhs_
>::
StorageKind
,
typename
internal
::
traits
<
Rhs_
>::
StorageKind
,
internal
::
product_type
<
Lhs_
,
Rhs_
>::
ret
>::
ret
>
{
public:
typedef
Lhs_
Lhs
;
typedef
Rhs_
Rhs
;
typedef
typename
ProductImpl
<
Lhs
,
Rhs
,
Option
,
typename
internal
::
product_promote_storage_type
<
typename
internal
::
traits
<
Lhs
>::
StorageKind
,
typename
internal
::
traits
<
Rhs
>::
StorageKind
,
internal
::
product_type
<
Lhs
,
Rhs
>::
ret
>::
ret
>::
Base
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
Product
)
typedef
typename
internal
::
ref_selector
<
Lhs
>::
type
LhsNested
;
typedef
typename
internal
::
ref_selector
<
Rhs
>::
type
RhsNested
;
typedef
internal
::
remove_all_t
<
LhsNested
>
LhsNestedCleaned
;
typedef
internal
::
remove_all_t
<
RhsNested
>
RhsNestedCleaned
;
using
TransposeReturnType
=
typename
internal
::
product_transpose_helper
<
Lhs
,
Rhs
,
Option
>::
TransposeType
;
using
AdjointReturnType
=
typename
internal
::
product_transpose_helper
<
Lhs
,
Rhs
,
Option
>::
AdjointType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Product
(
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
:
m_lhs
(
lhs
),
m_rhs
(
rhs
)
{
eigen_assert
(
lhs
.
cols
()
==
rhs
.
rows
()
&&
"invalid matrix product"
&&
"if you wanted a coeff-wise or a dot product use the respective explicit functions"
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
noexcept
{
return
m_lhs
.
rows
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
noexcept
{
return
m_rhs
.
cols
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
LhsNestedCleaned
&
lhs
()
const
{
return
m_lhs
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
RhsNestedCleaned
&
rhs
()
const
{
return
m_rhs
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
TransposeReturnType
transpose
()
const
{
return
internal
::
product_transpose_helper
<
Lhs
,
Rhs
,
Option
>::
run_transpose
(
*
this
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
AdjointReturnType
adjoint
()
const
{
return
internal
::
product_transpose_helper
<
Lhs
,
Rhs
,
Option
>::
run_adjoint
(
*
this
);
}
protected:
LhsNested
m_lhs
;
RhsNested
m_rhs
;
};
namespace
internal
{
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
,
int
ProductTag
=
internal
::
product_type
<
Lhs
,
Rhs
>
::
ret
>
class
dense_product_base
:
public
internal
::
dense_xpr_base
<
Product
<
Lhs
,
Rhs
,
Option
>>::
type
{};
/** Conversion to scalar for inner-products */
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
>
class
dense_product_base
<
Lhs
,
Rhs
,
Option
,
InnerProduct
>
:
public
internal
::
dense_xpr_base
<
Product
<
Lhs
,
Rhs
,
Option
>>::
type
{
typedef
Product
<
Lhs
,
Rhs
,
Option
>
ProductXpr
;
typedef
typename
internal
::
dense_xpr_base
<
ProductXpr
>::
type
Base
;
public:
using
Base
::
derived
;
typedef
typename
Base
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
operator
const
Scalar
()
const
{
return
internal
::
evaluator
<
ProductXpr
>
(
derived
()).
coeff
(
0
,
0
);
}
};
}
// namespace internal
// Generic API dispatcher
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
,
typename
StorageKind
>
class
ProductImpl
:
public
internal
::
generic_xpr_base
<
Product
<
Lhs
,
Rhs
,
Option
>
,
MatrixXpr
,
StorageKind
>::
type
{
public:
typedef
typename
internal
::
generic_xpr_base
<
Product
<
Lhs
,
Rhs
,
Option
>
,
MatrixXpr
,
StorageKind
>::
type
Base
;
};
template
<
typename
Lhs
,
typename
Rhs
,
int
Option
>
class
ProductImpl
<
Lhs
,
Rhs
,
Option
,
Dense
>
:
public
internal
::
dense_product_base
<
Lhs
,
Rhs
,
Option
>
{
typedef
Product
<
Lhs
,
Rhs
,
Option
>
Derived
;
public:
typedef
typename
internal
::
dense_product_base
<
Lhs
,
Rhs
,
Option
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Derived
)
protected:
enum
{
IsOneByOne
=
(
RowsAtCompileTime
==
1
||
RowsAtCompileTime
==
Dynamic
)
&&
(
ColsAtCompileTime
==
1
||
ColsAtCompileTime
==
Dynamic
),
EnableCoeff
=
IsOneByOne
||
Option
==
LazyProduct
};
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
coeff
(
Index
row
,
Index
col
)
const
{
EIGEN_STATIC_ASSERT
(
EnableCoeff
,
THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS
);
eigen_assert
((
Option
==
LazyProduct
)
||
(
this
->
rows
()
==
1
&&
this
->
cols
()
==
1
));
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeff
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
coeff
(
Index
i
)
const
{
EIGEN_STATIC_ASSERT
(
EnableCoeff
,
THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS
);
eigen_assert
((
Option
==
LazyProduct
)
||
(
this
->
rows
()
==
1
&&
this
->
cols
()
==
1
));
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeff
(
i
);
}
};
}
// end namespace Eigen
#endif // EIGEN_PRODUCT_H
eigen-master/Eigen/src/Core/ProductEvaluators.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PRODUCTEVALUATORS_H
#define EIGEN_PRODUCTEVALUATORS_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
/** \internal
* Evaluator of a product expression.
* Since products require special treatments to handle all possible cases,
* we simply defer the evaluation logic to a product_evaluator class
* which offers more partial specialization possibilities.
*
* \sa class product_evaluator
*/
template
<
typename
Lhs
,
typename
Rhs
,
int
Options
>
struct
evaluator
<
Product
<
Lhs
,
Rhs
,
Options
>>
:
public
product_evaluator
<
Product
<
Lhs
,
Rhs
,
Options
>>
{
typedef
Product
<
Lhs
,
Rhs
,
Options
>
XprType
;
typedef
product_evaluator
<
XprType
>
Base
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
)
{}
};
// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B"
// TODO we should apply that rule only if that's really helpful
template
<
typename
Lhs
,
typename
Rhs
,
typename
Scalar1
,
typename
Scalar2
,
typename
Plain1
>
struct
evaluator_assume_aliasing
<
CwiseBinaryOp
<
internal
::
scalar_product_op
<
Scalar1
,
Scalar2
>
,
const
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
Scalar1
>
,
Plain1
>
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>>
{
static
const
bool
value
=
true
;
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Scalar1
,
typename
Scalar2
,
typename
Plain1
>
struct
evaluator
<
CwiseBinaryOp
<
internal
::
scalar_product_op
<
Scalar1
,
Scalar2
>
,
const
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
Scalar1
>
,
Plain1
>
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>>
:
public
evaluator
<
Product
<
EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE
(
Scalar1
,
Lhs
,
product
),
Rhs
,
DefaultProduct
>>
{
typedef
CwiseBinaryOp
<
internal
::
scalar_product_op
<
Scalar1
,
Scalar2
>
,
const
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
Scalar1
>
,
Plain1
>
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>
XprType
;
typedef
evaluator
<
Product
<
EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE
(
Scalar1
,
Lhs
,
product
),
Rhs
,
DefaultProduct
>>
Base
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
.
lhs
().
functor
().
m_other
*
xpr
.
rhs
().
lhs
()
*
xpr
.
rhs
().
rhs
())
{}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
DiagIndex
>
struct
evaluator
<
Diagonal
<
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>
,
DiagIndex
>>
:
public
evaluator
<
Diagonal
<
const
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
DiagIndex
>>
{
typedef
Diagonal
<
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>
,
DiagIndex
>
XprType
;
typedef
evaluator
<
Diagonal
<
const
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
DiagIndex
>>
Base
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
xpr
)
:
Base
(
Diagonal
<
const
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
DiagIndex
>
(
Product
<
Lhs
,
Rhs
,
LazyProduct
>
(
xpr
.
nestedExpression
().
lhs
(),
xpr
.
nestedExpression
().
rhs
()),
xpr
.
index
()))
{}
};
// Helper class to perform a matrix product with the destination at hand.
// Depending on the sizes of the factors, there are different evaluation strategies
// as controlled by internal::product_type.
template
<
typename
Lhs
,
typename
Rhs
,
typename
LhsShape
=
typename
evaluator_traits
<
Lhs
>
::
Shape
,
typename
RhsShape
=
typename
evaluator_traits
<
Rhs
>::
Shape
,
int
ProductType
=
internal
::
product_type
<
Lhs
,
Rhs
>::
value
>
struct
generic_product_impl
;
template
<
typename
Lhs
,
typename
Rhs
>
struct
evaluator_assume_aliasing
<
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>
{
static
const
bool
value
=
true
;
};
// This is the default evaluator implementation for products:
// It creates a temporary and call generic_product_impl
template
<
typename
Lhs
,
typename
Rhs
,
int
Options
,
int
ProductTag
,
typename
LhsShape
,
typename
RhsShape
>
struct
product_evaluator
<
Product
<
Lhs
,
Rhs
,
Options
>
,
ProductTag
,
LhsShape
,
RhsShape
>
:
public
evaluator
<
typename
Product
<
Lhs
,
Rhs
,
Options
>::
PlainObject
>
{
typedef
Product
<
Lhs
,
Rhs
,
Options
>
XprType
;
typedef
typename
XprType
::
PlainObject
PlainObject
;
typedef
evaluator
<
PlainObject
>
Base
;
enum
{
Flags
=
Base
::
Flags
|
EvalBeforeNestingBit
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
product_evaluator
(
const
XprType
&
xpr
)
:
m_result
(
xpr
.
rows
(),
xpr
.
cols
())
{
internal
::
construct_at
<
Base
>
(
this
,
m_result
);
// FIXME shall we handle nested_eval here?,
// if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in
// permutation_matrix_product, transposition_matrix_product, etc.)
// typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
// typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
// typedef internal::remove_all_t<LhsNested> LhsNestedCleaned;
// typedef internal::remove_all_t<RhsNested> RhsNestedCleaned;
//
// const LhsNested lhs(xpr.lhs());
// const RhsNested rhs(xpr.rhs());
//
// generic_product_impl<LhsNestedCleaned, RhsNestedCleaned>::evalTo(m_result, lhs, rhs);
generic_product_impl
<
Lhs
,
Rhs
,
LhsShape
,
RhsShape
,
ProductTag
>::
evalTo
(
m_result
,
xpr
.
lhs
(),
xpr
.
rhs
());
}
protected:
PlainObject
m_result
;
};
// The following three shortcuts are enabled only if the scalar types match exactly.
// TODO: we could enable them for different scalar types when the product is not vectorized.
// Dense = Product
template
<
typename
DstXprType
,
typename
Lhs
,
typename
Rhs
,
int
Options
,
typename
Scalar
>
struct
Assignment
<
DstXprType
,
Product
<
Lhs
,
Rhs
,
Options
>
,
internal
::
assign_op
<
Scalar
,
Scalar
>
,
Dense2Dense
,
std
::
enable_if_t
<
(
Options
==
DefaultProduct
||
Options
==
AliasFreeProduct
)
>>
{
typedef
Product
<
Lhs
,
Rhs
,
Options
>
SrcXprType
;
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
assign_op
<
Scalar
,
Scalar
>&
)
{
Index
dstRows
=
src
.
rows
();
Index
dstCols
=
src
.
cols
();
if
((
dst
.
rows
()
!=
dstRows
)
||
(
dst
.
cols
()
!=
dstCols
))
dst
.
resize
(
dstRows
,
dstCols
);
// FIXME shall we handle nested_eval here?
generic_product_impl
<
Lhs
,
Rhs
>::
evalTo
(
dst
,
src
.
lhs
(),
src
.
rhs
());
}
};
// Dense += Product
template
<
typename
DstXprType
,
typename
Lhs
,
typename
Rhs
,
int
Options
,
typename
Scalar
>
struct
Assignment
<
DstXprType
,
Product
<
Lhs
,
Rhs
,
Options
>
,
internal
::
add_assign_op
<
Scalar
,
Scalar
>
,
Dense2Dense
,
std
::
enable_if_t
<
(
Options
==
DefaultProduct
||
Options
==
AliasFreeProduct
)
>>
{
typedef
Product
<
Lhs
,
Rhs
,
Options
>
SrcXprType
;
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
add_assign_op
<
Scalar
,
Scalar
>&
)
{
eigen_assert
(
dst
.
rows
()
==
src
.
rows
()
&&
dst
.
cols
()
==
src
.
cols
());
// FIXME shall we handle nested_eval here?
generic_product_impl
<
Lhs
,
Rhs
>::
addTo
(
dst
,
src
.
lhs
(),
src
.
rhs
());
}
};
// Dense -= Product
template
<
typename
DstXprType
,
typename
Lhs
,
typename
Rhs
,
int
Options
,
typename
Scalar
>
struct
Assignment
<
DstXprType
,
Product
<
Lhs
,
Rhs
,
Options
>
,
internal
::
sub_assign_op
<
Scalar
,
Scalar
>
,
Dense2Dense
,
std
::
enable_if_t
<
(
Options
==
DefaultProduct
||
Options
==
AliasFreeProduct
)
>>
{
typedef
Product
<
Lhs
,
Rhs
,
Options
>
SrcXprType
;
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
sub_assign_op
<
Scalar
,
Scalar
>&
)
{
eigen_assert
(
dst
.
rows
()
==
src
.
rows
()
&&
dst
.
cols
()
==
src
.
cols
());
// FIXME shall we handle nested_eval here?
generic_product_impl
<
Lhs
,
Rhs
>::
subTo
(
dst
,
src
.
lhs
(),
src
.
rhs
());
}
};
// Dense ?= scalar * Product
// TODO we should apply that rule if that's really helpful
// for instance, this is not good for inner products
template
<
typename
DstXprType
,
typename
Lhs
,
typename
Rhs
,
typename
AssignFunc
,
typename
Scalar
,
typename
ScalarBis
,
typename
Plain
>
struct
Assignment
<
DstXprType
,
CwiseBinaryOp
<
internal
::
scalar_product_op
<
ScalarBis
,
Scalar
>
,
const
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
ScalarBis
>
,
Plain
>
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>
,
AssignFunc
,
Dense2Dense
>
{
typedef
CwiseBinaryOp
<
internal
::
scalar_product_op
<
ScalarBis
,
Scalar
>
,
const
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
ScalarBis
>
,
Plain
>
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>
SrcXprType
;
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
AssignFunc
&
func
)
{
call_assignment_no_alias
(
dst
,
(
src
.
lhs
().
functor
().
m_other
*
src
.
rhs
().
lhs
())
*
src
.
rhs
().
rhs
(),
func
);
}
};
//----------------------------------------
// Catch "Dense ?= xpr + Product<>" expression to save one temporary
// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct
template
<
typename
OtherXpr
,
typename
Lhs
,
typename
Rhs
>
struct
evaluator_assume_aliasing
<
CwiseBinaryOp
<
internal
::
scalar_sum_op
<
typename
OtherXpr
::
Scalar
,
typename
Product
<
Lhs
,
Rhs
,
DefaultProduct
>::
Scalar
>
,
const
OtherXpr
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>
,
DenseShape
>
{
static
const
bool
value
=
true
;
};
template
<
typename
OtherXpr
,
typename
Lhs
,
typename
Rhs
>
struct
evaluator_assume_aliasing
<
CwiseBinaryOp
<
internal
::
scalar_difference_op
<
typename
OtherXpr
::
Scalar
,
typename
Product
<
Lhs
,
Rhs
,
DefaultProduct
>::
Scalar
>
,
const
OtherXpr
,
const
Product
<
Lhs
,
Rhs
,
DefaultProduct
>>
,
DenseShape
>
{
static
const
bool
value
=
true
;
};
template
<
typename
DstXprType
,
typename
OtherXpr
,
typename
ProductType
,
typename
Func1
,
typename
Func2
>
struct
assignment_from_xpr_op_product
{
template
<
typename
SrcXprType
,
typename
InitialFunc
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
InitialFunc
&
/*func*/
)
{
call_assignment_no_alias
(
dst
,
src
.
lhs
(),
Func1
());
call_assignment_no_alias
(
dst
,
src
.
rhs
(),
Func2
());
}
};
#define EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(ASSIGN_OP, BINOP, ASSIGN_OP2) \
template <typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, \
typename SrcScalar, typename OtherScalar, typename ProdScalar> \
struct Assignment<DstXprType, \
CwiseBinaryOp<internal::BINOP<OtherScalar, ProdScalar>, const OtherXpr, \
const Product<Lhs, Rhs, DefaultProduct>>, \
internal::ASSIGN_OP<DstScalar, SrcScalar>, Dense2Dense> \
: assignment_from_xpr_op_product<DstXprType, OtherXpr, Product<Lhs, Rhs, DefaultProduct>, \
internal::ASSIGN_OP<DstScalar, OtherScalar>, \
internal::ASSIGN_OP2<DstScalar, ProdScalar>> {}
EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT
(
assign_op
,
scalar_sum_op
,
add_assign_op
);
EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT
(
add_assign_op
,
scalar_sum_op
,
add_assign_op
);
EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT
(
sub_assign_op
,
scalar_sum_op
,
sub_assign_op
);
EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT
(
assign_op
,
scalar_difference_op
,
sub_assign_op
);
EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT
(
add_assign_op
,
scalar_difference_op
,
sub_assign_op
);
EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT
(
sub_assign_op
,
scalar_difference_op
,
add_assign_op
);
//----------------------------------------
template
<
typename
Lhs
,
typename
Rhs
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
InnerProduct
>
{
using
impl
=
default_inner_product_impl
<
Lhs
,
Rhs
,
false
>
;
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
dst
.
coeffRef
(
0
,
0
)
=
impl
::
run
(
lhs
,
rhs
);
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
addTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
dst
.
coeffRef
(
0
,
0
)
+=
impl
::
run
(
lhs
,
rhs
);
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
subTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
dst
.
coeffRef
(
0
,
0
)
-=
impl
::
run
(
lhs
,
rhs
);
}
};
/***********************************************************************
* Implementation of outer dense * dense vector product
***********************************************************************/
// Column major result
template
<
typename
Dst
,
typename
Lhs
,
typename
Rhs
,
typename
Func
>
void
EIGEN_DEVICE_FUNC
outer_product_selector_run
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Func
&
func
,
const
false_type
&
)
{
evaluator
<
Rhs
>
rhsEval
(
rhs
);
ei_declare_local_nested_eval
(
Lhs
,
lhs
,
Rhs
::
SizeAtCompileTime
,
actual_lhs
);
// FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored
// FIXME not very good if rhs is real and lhs complex while alpha is real too
const
Index
cols
=
dst
.
cols
();
for
(
Index
j
=
0
;
j
<
cols
;
++
j
)
func
(
dst
.
col
(
j
),
rhsEval
.
coeff
(
Index
(
0
),
j
)
*
actual_lhs
);
}
// Row major result
template
<
typename
Dst
,
typename
Lhs
,
typename
Rhs
,
typename
Func
>
void
EIGEN_DEVICE_FUNC
outer_product_selector_run
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Func
&
func
,
const
true_type
&
)
{
evaluator
<
Lhs
>
lhsEval
(
lhs
);
ei_declare_local_nested_eval
(
Rhs
,
rhs
,
Lhs
::
SizeAtCompileTime
,
actual_rhs
);
// FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored
// FIXME not very good if lhs is real and rhs complex while alpha is real too
const
Index
rows
=
dst
.
rows
();
for
(
Index
i
=
0
;
i
<
rows
;
++
i
)
func
(
dst
.
row
(
i
),
lhsEval
.
coeff
(
i
,
Index
(
0
))
*
actual_rhs
);
}
template
<
typename
Lhs
,
typename
Rhs
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
OuterProduct
>
{
template
<
typename
T
>
struct
is_row_major
:
bool_constant
<
(
int
(
T
::
Flags
)
&
RowMajorBit
)
>
{};
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
// TODO it would be nice to be able to exploit our *_assign_op functors for that purpose
struct
set
{
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
void
operator
()(
const
Dst
&
dst
,
const
Src
&
src
)
const
{
dst
.
const_cast_derived
()
=
src
;
}
};
struct
add
{
/** Add to dst. */
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
void
operator
()(
const
Dst
&
dst
,
const
Src
&
src
)
const
{
dst
.
const_cast_derived
()
+=
src
;
}
};
struct
sub
{
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
void
operator
()(
const
Dst
&
dst
,
const
Src
&
src
)
const
{
dst
.
const_cast_derived
()
-=
src
;
}
};
/** Scaled add. */
struct
adds
{
Scalar
m_scale
;
/** Constructor */
explicit
adds
(
const
Scalar
&
s
)
:
m_scale
(
s
)
{}
/** Scaled add to dst. */
template
<
typename
Dst
,
typename
Src
>
void
EIGEN_DEVICE_FUNC
operator
()(
const
Dst
&
dst
,
const
Src
&
src
)
const
{
dst
.
const_cast_derived
()
+=
m_scale
*
src
;
}
};
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
internal
::
outer_product_selector_run
(
dst
,
lhs
,
rhs
,
set
(),
is_row_major
<
Dst
>
());
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
addTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
internal
::
outer_product_selector_run
(
dst
,
lhs
,
rhs
,
add
(),
is_row_major
<
Dst
>
());
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
subTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
internal
::
outer_product_selector_run
(
dst
,
lhs
,
rhs
,
sub
(),
is_row_major
<
Dst
>
());
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
scaleAndAddTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
internal
::
outer_product_selector_run
(
dst
,
lhs
,
rhs
,
adds
(
alpha
),
is_row_major
<
Dst
>
());
}
};
// This base class provides default implementations for evalTo, addTo, subTo, in terms of scaleAndAddTo
template
<
typename
Lhs
,
typename
Rhs
,
typename
Derived
>
struct
generic_product_impl_base
{
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
dst
.
setZero
();
scaleAndAddTo
(
dst
,
lhs
,
rhs
,
Scalar
(
1
));
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
addTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
scaleAndAddTo
(
dst
,
lhs
,
rhs
,
Scalar
(
1
));
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
subTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
scaleAndAddTo
(
dst
,
lhs
,
rhs
,
Scalar
(
-
1
));
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
scaleAndAddTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
Derived
::
scaleAndAddTo
(
dst
,
lhs
,
rhs
,
alpha
);
}
};
template
<
typename
Lhs
,
typename
Rhs
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
GemvProduct
>
:
generic_product_impl_base
<
Lhs
,
Rhs
,
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
GemvProduct
>>
{
typedef
typename
nested_eval
<
Lhs
,
1
>::
type
LhsNested
;
typedef
typename
nested_eval
<
Rhs
,
1
>::
type
RhsNested
;
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
enum
{
Side
=
Lhs
::
IsVectorAtCompileTime
?
OnTheLeft
:
OnTheRight
};
typedef
internal
::
remove_all_t
<
std
::
conditional_t
<
int
(
Side
)
==
OnTheRight
,
LhsNested
,
RhsNested
>>
MatrixType
;
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
scaleAndAddTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
// Fallback to inner product if both the lhs and rhs is a runtime vector.
if
(
lhs
.
rows
()
==
1
&&
rhs
.
cols
()
==
1
)
{
dst
.
coeffRef
(
0
,
0
)
+=
alpha
*
lhs
.
row
(
0
).
conjugate
().
dot
(
rhs
.
col
(
0
));
return
;
}
LhsNested
actual_lhs
(
lhs
);
RhsNested
actual_rhs
(
rhs
);
internal
::
gemv_dense_selector
<
Side
,
(
int
(
MatrixType
::
Flags
)
&
RowMajorBit
)
?
RowMajor
:
ColMajor
,
bool
(
internal
::
blas_traits
<
MatrixType
>::
HasUsableDirectAccess
)
>::
run
(
actual_lhs
,
actual_rhs
,
dst
,
alpha
);
}
};
template
<
typename
Lhs
,
typename
Rhs
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
CoeffBasedProductMode
>
{
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
// Same as: dst.noalias() = lhs.lazyProduct(rhs);
// but easier on the compiler side
call_assignment_no_alias
(
dst
,
lhs
.
lazyProduct
(
rhs
),
internal
::
assign_op
<
typename
Dst
::
Scalar
,
Scalar
>
());
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
addTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
// dst.noalias() += lhs.lazyProduct(rhs);
call_assignment_no_alias
(
dst
,
lhs
.
lazyProduct
(
rhs
),
internal
::
add_assign_op
<
typename
Dst
::
Scalar
,
Scalar
>
());
}
template
<
typename
Dst
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
subTo
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
// dst.noalias() -= lhs.lazyProduct(rhs);
call_assignment_no_alias
(
dst
,
lhs
.
lazyProduct
(
rhs
),
internal
::
sub_assign_op
<
typename
Dst
::
Scalar
,
Scalar
>
());
}
// This is a special evaluation path called from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h
// This variant tries to extract scalar multiples from both the LHS and RHS and factor them out. For instance:
// dst {,+,-}= (s1*A)*(B*s2)
// will be rewritten as:
// dst {,+,-}= (s1*s2) * (A.lazyProduct(B))
// There are at least four benefits of doing so:
// 1 - huge performance gain for heap-allocated matrix types as it save costly allocations.
// 2 - it is faster than simply by-passing the heap allocation through stack allocation.
// 3 - it makes this fallback consistent with the heavy GEMM routine.
// 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices.
// (see https://stackoverflow.com/questions/54738495)
// For small fixed sizes matrices, however, the gains are less obvious, it is sometimes x2 faster, but sometimes x3
// slower, and the behavior depends also a lot on the compiler... This is why this re-writing strategy is currently
// enabled only when falling back from the main GEMM.
template
<
typename
Dst
,
typename
Func
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
eval_dynamic
(
Dst
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Func
&
func
)
{
enum
{
HasScalarFactor
=
blas_traits
<
Lhs
>::
HasScalarFactor
||
blas_traits
<
Rhs
>::
HasScalarFactor
,
ConjLhs
=
blas_traits
<
Lhs
>::
NeedToConjugate
,
ConjRhs
=
blas_traits
<
Rhs
>::
NeedToConjugate
};
// FIXME: in c++11 this should be auto, and extractScalarFactor should also return auto
// this is important for real*complex_mat
Scalar
actualAlpha
=
combine_scalar_factors
<
Scalar
>
(
lhs
,
rhs
);
eval_dynamic_impl
(
dst
,
blas_traits
<
Lhs
>::
extract
(
lhs
).
template
conjugateIf
<
ConjLhs
>(),
blas_traits
<
Rhs
>::
extract
(
rhs
).
template
conjugateIf
<
ConjRhs
>(),
func
,
actualAlpha
,
bool_constant
<
HasScalarFactor
>
());
}
protected:
template
<
typename
Dst
,
typename
LhsT
,
typename
RhsT
,
typename
Func
,
typename
Scalar
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
eval_dynamic_impl
(
Dst
&
dst
,
const
LhsT
&
lhs
,
const
RhsT
&
rhs
,
const
Func
&
func
,
const
Scalar
&
s
/* == 1 */
,
false_type
)
{
EIGEN_UNUSED_VARIABLE
(
s
);
eigen_internal_assert
(
numext
::
is_exactly_one
(
s
));
call_restricted_packet_assignment_no_alias
(
dst
,
lhs
.
lazyProduct
(
rhs
),
func
);
}
template
<
typename
Dst
,
typename
LhsT
,
typename
RhsT
,
typename
Func
,
typename
Scalar
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
eval_dynamic_impl
(
Dst
&
dst
,
const
LhsT
&
lhs
,
const
RhsT
&
rhs
,
const
Func
&
func
,
const
Scalar
&
s
,
true_type
)
{
call_restricted_packet_assignment_no_alias
(
dst
,
s
*
lhs
.
lazyProduct
(
rhs
),
func
);
}
};
// This specialization enforces the use of a coefficient-based evaluation strategy
template
<
typename
Lhs
,
typename
Rhs
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
LazyCoeffBasedProductMode
>
:
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
DenseShape
,
CoeffBasedProductMode
>
{};
// Case 2: Evaluate coeff by coeff
//
// This is mostly taken from CoeffBasedProduct.h
// The main difference is that we add an extra argument to the etor_product_*_impl::run() function
// for the inner dimension of the product, because evaluator object do not know their size.
template
<
int
Traversal
,
int
UnrollingIndex
,
typename
Lhs
,
typename
Rhs
,
typename
RetScalar
>
struct
etor_product_coeff_impl
;
template
<
int
StorageOrder
,
int
UnrollingIndex
,
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
;
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
>
struct
product_evaluator
<
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
ProductTag
,
DenseShape
,
DenseShape
>
:
evaluator_base
<
Product
<
Lhs
,
Rhs
,
LazyProduct
>>
{
typedef
Product
<
Lhs
,
Rhs
,
LazyProduct
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
product_evaluator
(
const
XprType
&
xpr
)
:
m_lhs
(
xpr
.
lhs
()),
m_rhs
(
xpr
.
rhs
()),
m_lhsImpl
(
m_lhs
),
// FIXME the creation of the evaluator objects should result in a no-op, but check that!
m_rhsImpl
(
m_rhs
),
// Moreover, they are only useful for the packet path, so we could completely disable
// them when not needed, or perhaps declare them on the fly on the packet method... We
// have experiment to check what's best.
m_innerDim
(
xpr
.
lhs
().
cols
())
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
NumTraits
<
Scalar
>::
MulCost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
NumTraits
<
Scalar
>::
AddCost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
#if 0
std::cerr << "LhsOuterStrideBytes= " << LhsOuterStrideBytes << "\n";
std::cerr << "RhsOuterStrideBytes= " << RhsOuterStrideBytes << "\n";
std::cerr << "LhsAlignment= " << LhsAlignment << "\n";
std::cerr << "RhsAlignment= " << RhsAlignment << "\n";
std::cerr << "CanVectorizeLhs= " << CanVectorizeLhs << "\n";
std::cerr << "CanVectorizeRhs= " << CanVectorizeRhs << "\n";
std::cerr << "CanVectorizeInner= " << CanVectorizeInner << "\n";
std::cerr << "EvalToRowMajor= " << EvalToRowMajor << "\n";
std::cerr << "Alignment= " << Alignment << "\n";
std::cerr << "Flags= " << Flags << "\n";
#endif
}
// Everything below here is taken from CoeffBasedProduct.h
typedef
typename
internal
::
nested_eval
<
Lhs
,
Rhs
::
ColsAtCompileTime
>::
type
LhsNested
;
typedef
typename
internal
::
nested_eval
<
Rhs
,
Lhs
::
RowsAtCompileTime
>::
type
RhsNested
;
typedef
internal
::
remove_all_t
<
LhsNested
>
LhsNestedCleaned
;
typedef
internal
::
remove_all_t
<
RhsNested
>
RhsNestedCleaned
;
typedef
evaluator
<
LhsNestedCleaned
>
LhsEtorType
;
typedef
evaluator
<
RhsNestedCleaned
>
RhsEtorType
;
enum
{
RowsAtCompileTime
=
LhsNestedCleaned
::
RowsAtCompileTime
,
ColsAtCompileTime
=
RhsNestedCleaned
::
ColsAtCompileTime
,
InnerSize
=
min_size_prefer_fixed
(
LhsNestedCleaned
::
ColsAtCompileTime
,
RhsNestedCleaned
::
RowsAtCompileTime
),
MaxRowsAtCompileTime
=
LhsNestedCleaned
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
RhsNestedCleaned
::
MaxColsAtCompileTime
};
typedef
typename
find_best_packet
<
Scalar
,
RowsAtCompileTime
>::
type
LhsVecPacketType
;
typedef
typename
find_best_packet
<
Scalar
,
ColsAtCompileTime
>::
type
RhsVecPacketType
;
enum
{
LhsCoeffReadCost
=
LhsEtorType
::
CoeffReadCost
,
RhsCoeffReadCost
=
RhsEtorType
::
CoeffReadCost
,
CoeffReadCost
=
InnerSize
==
0
?
NumTraits
<
Scalar
>::
ReadCost
:
InnerSize
==
Dynamic
?
HugeCost
:
InnerSize
*
(
NumTraits
<
Scalar
>::
MulCost
+
int
(
LhsCoeffReadCost
)
+
int
(
RhsCoeffReadCost
))
+
(
InnerSize
-
1
)
*
NumTraits
<
Scalar
>::
AddCost
,
Unroll
=
CoeffReadCost
<=
EIGEN_UNROLLING_LIMIT
,
LhsFlags
=
LhsEtorType
::
Flags
,
RhsFlags
=
RhsEtorType
::
Flags
,
LhsRowMajor
=
LhsFlags
&
RowMajorBit
,
RhsRowMajor
=
RhsFlags
&
RowMajorBit
,
LhsVecPacketSize
=
unpacket_traits
<
LhsVecPacketType
>::
size
,
RhsVecPacketSize
=
unpacket_traits
<
RhsVecPacketType
>::
size
,
// Here, we don't care about alignment larger than the usable packet size.
LhsAlignment
=
plain_enum_min
(
LhsEtorType
::
Alignment
,
LhsVecPacketSize
*
int
(
sizeof
(
typename
LhsNestedCleaned
::
Scalar
))),
RhsAlignment
=
plain_enum_min
(
RhsEtorType
::
Alignment
,
RhsVecPacketSize
*
int
(
sizeof
(
typename
RhsNestedCleaned
::
Scalar
))),
SameType
=
is_same
<
typename
LhsNestedCleaned
::
Scalar
,
typename
RhsNestedCleaned
::
Scalar
>::
value
,
CanVectorizeRhs
=
bool
(
RhsRowMajor
)
&&
(
RhsFlags
&
PacketAccessBit
)
&&
(
ColsAtCompileTime
!=
1
),
CanVectorizeLhs
=
(
!
LhsRowMajor
)
&&
(
LhsFlags
&
PacketAccessBit
)
&&
(
RowsAtCompileTime
!=
1
),
EvalToRowMajor
=
(
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
)
?
1
:
(
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
)
?
0
:
(
bool
(
RhsRowMajor
)
&&
!
CanVectorizeLhs
),
Flags
=
((
int
(
LhsFlags
)
|
int
(
RhsFlags
))
&
HereditaryBits
&
~
RowMajorBit
)
|
(
EvalToRowMajor
?
RowMajorBit
:
0
)
// TODO enable vectorization for mixed types
|
(
SameType
&&
(
CanVectorizeLhs
||
CanVectorizeRhs
)
?
PacketAccessBit
:
0
)
|
(
XprType
::
IsVectorAtCompileTime
?
LinearAccessBit
:
0
),
LhsOuterStrideBytes
=
int
(
LhsNestedCleaned
::
OuterStrideAtCompileTime
)
*
int
(
sizeof
(
typename
LhsNestedCleaned
::
Scalar
)),
RhsOuterStrideBytes
=
int
(
RhsNestedCleaned
::
OuterStrideAtCompileTime
)
*
int
(
sizeof
(
typename
RhsNestedCleaned
::
Scalar
)),
Alignment
=
bool
(
CanVectorizeLhs
)
?
(
LhsOuterStrideBytes
<=
0
||
(
int
(
LhsOuterStrideBytes
)
%
plain_enum_max
(
1
,
LhsAlignment
))
!=
0
?
0
:
LhsAlignment
)
:
bool
(
CanVectorizeRhs
)
?
(
RhsOuterStrideBytes
<=
0
||
(
int
(
RhsOuterStrideBytes
)
%
plain_enum_max
(
1
,
RhsAlignment
))
!=
0
?
0
:
RhsAlignment
)
:
0
,
/* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
* of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
* loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
* the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
*/
CanVectorizeInner
=
SameType
&&
LhsRowMajor
&&
(
!
RhsRowMajor
)
&&
(
int
(
LhsFlags
)
&
int
(
RhsFlags
)
&
ActualPacketAccessBit
)
&&
(
int
(
InnerSize
)
%
packet_traits
<
Scalar
>::
size
==
0
)
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
(
m_lhs
.
row
(
row
).
transpose
().
cwiseProduct
(
m_rhs
.
col
(
col
))).
sum
();
}
/* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
* which is why we don't set the LinearAccessBit.
* TODO: this seems possible when the result is a vector
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
CoeffReturnType
coeff
(
Index
index
)
const
{
const
Index
row
=
(
RowsAtCompileTime
==
1
||
MaxRowsAtCompileTime
==
1
)
?
0
:
index
;
const
Index
col
=
(
RowsAtCompileTime
==
1
||
MaxRowsAtCompileTime
==
1
)
?
index
:
0
;
return
(
m_lhs
.
row
(
row
).
transpose
().
cwiseProduct
(
m_rhs
.
col
(
col
))).
sum
();
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
PacketType
packet
(
Index
row
,
Index
col
)
const
{
PacketType
res
;
typedef
etor_product_packet_impl
<
bool
(
int
(
Flags
)
&
RowMajorBit
)
?
RowMajor
:
ColMajor
,
Unroll
?
int
(
InnerSize
)
:
Dynamic
,
LhsEtorType
,
RhsEtorType
,
PacketType
,
LoadMode
>
PacketImpl
;
PacketImpl
::
run
(
row
,
col
,
m_lhsImpl
,
m_rhsImpl
,
m_innerDim
,
res
);
return
res
;
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
PacketType
packet
(
Index
index
)
const
{
const
Index
row
=
(
RowsAtCompileTime
==
1
||
MaxRowsAtCompileTime
==
1
)
?
0
:
index
;
const
Index
col
=
(
RowsAtCompileTime
==
1
||
MaxRowsAtCompileTime
==
1
)
?
index
:
0
;
return
packet
<
LoadMode
,
PacketType
>
(
row
,
col
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
PacketType
res
;
typedef
etor_product_packet_impl
<
bool
(
int
(
Flags
)
&
RowMajorBit
)
?
RowMajor
:
ColMajor
,
Unroll
?
int
(
InnerSize
)
:
Dynamic
,
LhsEtorType
,
RhsEtorType
,
PacketType
,
LoadMode
>
PacketImpl
;
PacketImpl
::
run_segment
(
row
,
col
,
m_lhsImpl
,
m_rhsImpl
,
m_innerDim
,
res
,
begin
,
count
);
return
res
;
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
const
Index
row
=
(
RowsAtCompileTime
==
1
||
MaxRowsAtCompileTime
==
1
)
?
0
:
index
;
const
Index
col
=
(
RowsAtCompileTime
==
1
||
MaxRowsAtCompileTime
==
1
)
?
index
:
0
;
return
packetSegment
<
LoadMode
,
PacketType
>
(
row
,
col
,
begin
,
count
);
}
protected:
add_const_on_value_type_t
<
LhsNested
>
m_lhs
;
add_const_on_value_type_t
<
RhsNested
>
m_rhs
;
LhsEtorType
m_lhsImpl
;
RhsEtorType
m_rhsImpl
;
// TODO: Get rid of m_innerDim if known at compile time
Index
m_innerDim
;
};
template
<
typename
Lhs
,
typename
Rhs
>
struct
product_evaluator
<
Product
<
Lhs
,
Rhs
,
DefaultProduct
>
,
LazyCoeffBasedProductMode
,
DenseShape
,
DenseShape
>
:
product_evaluator
<
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
CoeffBasedProductMode
,
DenseShape
,
DenseShape
>
{
typedef
Product
<
Lhs
,
Rhs
,
DefaultProduct
>
XprType
;
typedef
Product
<
Lhs
,
Rhs
,
LazyProduct
>
BaseProduct
;
typedef
product_evaluator
<
BaseProduct
,
CoeffBasedProductMode
,
DenseShape
,
DenseShape
>
Base
;
enum
{
Flags
=
Base
::
Flags
|
EvalBeforeNestingBit
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
product_evaluator
(
const
XprType
&
xpr
)
:
Base
(
BaseProduct
(
xpr
.
lhs
(),
xpr
.
rhs
()))
{}
};
/****************************************
*** Coeff based product, Packet path ***
****************************************/
template
<
int
UnrollingIndex
,
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
RowMajor
,
UnrollingIndex
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
)
{
etor_product_packet_impl
<
RowMajor
,
UnrollingIndex
-
1
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>::
run
(
row
,
col
,
lhs
,
rhs
,
innerDim
,
res
);
res
=
pmadd
(
pset1
<
Packet
>
(
lhs
.
coeff
(
row
,
Index
(
UnrollingIndex
-
1
))),
rhs
.
template
packet
<
LoadMode
,
Packet
>(
Index
(
UnrollingIndex
-
1
),
col
),
res
);
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
,
Index
begin
,
Index
count
)
{
etor_product_packet_impl
<
RowMajor
,
UnrollingIndex
-
1
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>::
run_segment
(
row
,
col
,
lhs
,
rhs
,
innerDim
,
res
,
begin
,
count
);
res
=
pmadd
(
pset1
<
Packet
>
(
lhs
.
coeff
(
row
,
Index
(
UnrollingIndex
-
1
))),
rhs
.
template
packetSegment
<
LoadMode
,
Packet
>(
Index
(
UnrollingIndex
-
1
),
col
,
begin
,
count
),
res
);
}
};
template
<
int
UnrollingIndex
,
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
ColMajor
,
UnrollingIndex
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
)
{
etor_product_packet_impl
<
ColMajor
,
UnrollingIndex
-
1
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>::
run
(
row
,
col
,
lhs
,
rhs
,
innerDim
,
res
);
res
=
pmadd
(
lhs
.
template
packet
<
LoadMode
,
Packet
>(
row
,
Index
(
UnrollingIndex
-
1
)),
pset1
<
Packet
>
(
rhs
.
coeff
(
Index
(
UnrollingIndex
-
1
),
col
)),
res
);
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
,
Index
begin
,
Index
count
)
{
etor_product_packet_impl
<
ColMajor
,
UnrollingIndex
-
1
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>::
run_segment
(
row
,
col
,
lhs
,
rhs
,
innerDim
,
res
,
begin
,
count
);
res
=
pmadd
(
lhs
.
template
packetSegment
<
LoadMode
,
Packet
>(
row
,
Index
(
UnrollingIndex
-
1
),
begin
,
count
),
pset1
<
Packet
>
(
rhs
.
coeff
(
Index
(
UnrollingIndex
-
1
),
col
)),
res
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
RowMajor
,
1
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
/*innerDim*/
,
Packet
&
res
)
{
res
=
pmul
(
pset1
<
Packet
>
(
lhs
.
coeff
(
row
,
Index
(
0
))),
rhs
.
template
packet
<
LoadMode
,
Packet
>(
Index
(
0
),
col
));
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
/*innerDim*/
,
Packet
&
res
,
Index
begin
,
Index
count
)
{
res
=
pmul
(
pset1
<
Packet
>
(
lhs
.
coeff
(
row
,
Index
(
0
))),
rhs
.
template
packetSegment
<
LoadMode
,
Packet
>(
Index
(
0
),
col
,
begin
,
count
));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
ColMajor
,
1
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
/*innerDim*/
,
Packet
&
res
)
{
res
=
pmul
(
lhs
.
template
packet
<
LoadMode
,
Packet
>(
row
,
Index
(
0
)),
pset1
<
Packet
>
(
rhs
.
coeff
(
Index
(
0
),
col
)));
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
/*innerDim*/
,
Packet
&
res
,
Index
begin
,
Index
count
)
{
res
=
pmul
(
lhs
.
template
packetSegment
<
LoadMode
,
Packet
>(
row
,
Index
(
0
),
begin
,
count
),
pset1
<
Packet
>
(
rhs
.
coeff
(
Index
(
0
),
col
)));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
RowMajor
,
0
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
/*row*/
,
Index
/*col*/
,
const
Lhs
&
/*lhs*/
,
const
Rhs
&
/*rhs*/
,
Index
/*innerDim*/
,
Packet
&
res
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
/*row*/
,
Index
/*col*/
,
const
Lhs
&
/*lhs*/
,
const
Rhs
&
/*rhs*/
,
Index
/*innerDim*/
,
Packet
&
res
,
Index
/*begin*/
,
Index
/*count*/
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
ColMajor
,
0
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
/*row*/
,
Index
/*col*/
,
const
Lhs
&
/*lhs*/
,
const
Rhs
&
/*rhs*/
,
Index
/*innerDim*/
,
Packet
&
res
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
/*row*/
,
Index
/*col*/
,
const
Lhs
&
/*lhs*/
,
const
Rhs
&
/*rhs*/
,
Index
/*innerDim*/
,
Packet
&
res
,
Index
/*begin*/
,
Index
/*count*/
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
}
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
RowMajor
,
Dynamic
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
for
(
Index
i
=
0
;
i
<
innerDim
;
++
i
)
res
=
pmadd
(
pset1
<
Packet
>
(
lhs
.
coeff
(
row
,
i
)),
rhs
.
template
packet
<
LoadMode
,
Packet
>(
i
,
col
),
res
);
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
,
Index
begin
,
Index
count
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
for
(
Index
i
=
0
;
i
<
innerDim
;
++
i
)
res
=
pmadd
(
pset1
<
Packet
>
(
lhs
.
coeff
(
row
,
i
)),
rhs
.
template
packetSegment
<
LoadMode
,
Packet
>(
i
,
col
,
begin
,
count
),
res
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
typename
Packet
,
int
LoadMode
>
struct
etor_product_packet_impl
<
ColMajor
,
Dynamic
,
Lhs
,
Rhs
,
Packet
,
LoadMode
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
for
(
Index
i
=
0
;
i
<
innerDim
;
++
i
)
res
=
pmadd
(
lhs
.
template
packet
<
LoadMode
,
Packet
>(
row
,
i
),
pset1
<
Packet
>
(
rhs
.
coeff
(
i
,
col
)),
res
);
}
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run_segment
(
Index
row
,
Index
col
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
Index
innerDim
,
Packet
&
res
,
Index
begin
,
Index
count
)
{
res
=
pset1
<
Packet
>
(
typename
unpacket_traits
<
Packet
>::
type
(
0
));
for
(
Index
i
=
0
;
i
<
innerDim
;
++
i
)
res
=
pmadd
(
lhs
.
template
packetSegment
<
LoadMode
,
Packet
>(
row
,
i
,
begin
,
count
),
pset1
<
Packet
>
(
rhs
.
coeff
(
i
,
col
)),
res
);
}
};
/***************************************************************************
* Triangular products
***************************************************************************/
template
<
int
Mode
,
bool
LhsIsTriangular
,
typename
Lhs
,
bool
LhsIsVector
,
typename
Rhs
,
bool
RhsIsVector
>
struct
triangular_product_impl
;
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
TriangularShape
,
DenseShape
,
ProductTag
>
:
generic_product_impl_base
<
Lhs
,
Rhs
,
generic_product_impl
<
Lhs
,
Rhs
,
TriangularShape
,
DenseShape
,
ProductTag
>>
{
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
template
<
typename
Dest
>
static
void
scaleAndAddTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
triangular_product_impl
<
Lhs
::
Mode
,
true
,
typename
Lhs
::
MatrixType
,
false
,
Rhs
,
Rhs
::
ColsAtCompileTime
==
1
>::
run
(
dst
,
lhs
.
nestedExpression
(),
rhs
,
alpha
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
TriangularShape
,
ProductTag
>
:
generic_product_impl_base
<
Lhs
,
Rhs
,
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
TriangularShape
,
ProductTag
>>
{
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
template
<
typename
Dest
>
static
void
scaleAndAddTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
triangular_product_impl
<
Rhs
::
Mode
,
false
,
Lhs
,
Lhs
::
RowsAtCompileTime
==
1
,
typename
Rhs
::
MatrixType
,
false
>::
run
(
dst
,
lhs
,
rhs
.
nestedExpression
(),
alpha
);
}
};
/***************************************************************************
* SelfAdjoint products
***************************************************************************/
template
<
typename
Lhs
,
int
LhsMode
,
bool
LhsIsVector
,
typename
Rhs
,
int
RhsMode
,
bool
RhsIsVector
>
struct
selfadjoint_product_impl
;
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
SelfAdjointShape
,
DenseShape
,
ProductTag
>
:
generic_product_impl_base
<
Lhs
,
Rhs
,
generic_product_impl
<
Lhs
,
Rhs
,
SelfAdjointShape
,
DenseShape
,
ProductTag
>>
{
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
void
scaleAndAddTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
selfadjoint_product_impl
<
typename
Lhs
::
MatrixType
,
Lhs
::
Mode
,
false
,
Rhs
,
0
,
Rhs
::
IsVectorAtCompileTime
>::
run
(
dst
,
lhs
.
nestedExpression
(),
rhs
,
alpha
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
SelfAdjointShape
,
ProductTag
>
:
generic_product_impl_base
<
Lhs
,
Rhs
,
generic_product_impl
<
Lhs
,
Rhs
,
DenseShape
,
SelfAdjointShape
,
ProductTag
>>
{
typedef
typename
Product
<
Lhs
,
Rhs
>::
Scalar
Scalar
;
template
<
typename
Dest
>
static
void
scaleAndAddTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
,
const
Scalar
&
alpha
)
{
selfadjoint_product_impl
<
Lhs
,
0
,
Lhs
::
IsVectorAtCompileTime
,
typename
Rhs
::
MatrixType
,
Rhs
::
Mode
,
false
>::
run
(
dst
,
lhs
,
rhs
.
nestedExpression
(),
alpha
);
}
};
/***************************************************************************
* Diagonal products
***************************************************************************/
template
<
typename
MatrixType
,
typename
DiagonalType
,
typename
Derived
,
int
ProductOrder
>
struct
diagonal_product_evaluator_base
:
evaluator_base
<
Derived
>
{
typedef
typename
ScalarBinaryOpTraits
<
typename
MatrixType
::
Scalar
,
typename
DiagonalType
::
Scalar
>::
ReturnType
Scalar
;
public:
enum
{
CoeffReadCost
=
int
(
NumTraits
<
Scalar
>::
MulCost
)
+
int
(
evaluator
<
MatrixType
>::
CoeffReadCost
)
+
int
(
evaluator
<
DiagonalType
>::
CoeffReadCost
),
MatrixFlags
=
evaluator
<
MatrixType
>::
Flags
,
DiagFlags
=
evaluator
<
DiagonalType
>::
Flags
,
StorageOrder_
=
(
Derived
::
MaxRowsAtCompileTime
==
1
&&
Derived
::
MaxColsAtCompileTime
!=
1
)
?
RowMajor
:
(
Derived
::
MaxColsAtCompileTime
==
1
&&
Derived
::
MaxRowsAtCompileTime
!=
1
)
?
ColMajor
:
MatrixFlags
&
RowMajorBit
?
RowMajor
:
ColMajor
,
SameStorageOrder_
=
int
(
StorageOrder_
)
==
((
MatrixFlags
&
RowMajorBit
)
?
RowMajor
:
ColMajor
),
ScalarAccessOnDiag_
=
!
((
int
(
StorageOrder_
)
==
ColMajor
&&
int
(
ProductOrder
)
==
OnTheLeft
)
||
(
int
(
StorageOrder_
)
==
RowMajor
&&
int
(
ProductOrder
)
==
OnTheRight
)),
SameTypes_
=
is_same
<
typename
MatrixType
::
Scalar
,
typename
DiagonalType
::
Scalar
>::
value
,
// FIXME currently we need same types, but in the future the next rule should be the one
// Vectorizable_ = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (SameTypes_ &&
// bool(int(DiagFlags)&PacketAccessBit))),
Vectorizable_
=
bool
(
int
(
MatrixFlags
)
&
PacketAccessBit
)
&&
SameTypes_
&&
(
SameStorageOrder_
||
(
MatrixFlags
&
LinearAccessBit
)
==
LinearAccessBit
)
&&
(
ScalarAccessOnDiag_
||
(
bool
(
int
(
DiagFlags
)
&
PacketAccessBit
))),
LinearAccessMask_
=
(
MatrixType
::
RowsAtCompileTime
==
1
||
MatrixType
::
ColsAtCompileTime
==
1
)
?
LinearAccessBit
:
0
,
Flags
=
((
HereditaryBits
|
LinearAccessMask_
)
&
(
unsigned
int
)(
MatrixFlags
))
|
(
Vectorizable_
?
PacketAccessBit
:
0
),
Alignment
=
evaluator
<
MatrixType
>::
Alignment
,
AsScalarProduct
=
(
DiagonalType
::
SizeAtCompileTime
==
1
)
||
(
DiagonalType
::
SizeAtCompileTime
==
Dynamic
&&
MatrixType
::
RowsAtCompileTime
==
1
&&
ProductOrder
==
OnTheLeft
)
||
(
DiagonalType
::
SizeAtCompileTime
==
Dynamic
&&
MatrixType
::
ColsAtCompileTime
==
1
&&
ProductOrder
==
OnTheRight
)
};
EIGEN_DEVICE_FUNC
diagonal_product_evaluator_base
(
const
MatrixType
&
mat
,
const
DiagonalType
&
diag
)
:
m_diagImpl
(
diag
),
m_matImpl
(
mat
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
NumTraits
<
Scalar
>::
MulCost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
Scalar
coeff
(
Index
idx
)
const
{
if
(
AsScalarProduct
)
return
m_diagImpl
.
coeff
(
0
)
*
m_matImpl
.
coeff
(
idx
);
else
return
m_diagImpl
.
coeff
(
idx
)
*
m_matImpl
.
coeff
(
idx
);
}
protected:
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet_impl
(
Index
row
,
Index
col
,
Index
id
,
internal
::
true_type
)
const
{
return
internal
::
pmul
(
m_matImpl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
),
internal
::
pset1
<
PacketType
>
(
m_diagImpl
.
coeff
(
id
)));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet_impl
(
Index
row
,
Index
col
,
Index
id
,
internal
::
false_type
)
const
{
enum
{
InnerSize
=
(
MatrixType
::
Flags
&
RowMajorBit
)
?
MatrixType
::
ColsAtCompileTime
:
MatrixType
::
RowsAtCompileTime
,
DiagonalPacketLoadMode
=
plain_enum_min
(
LoadMode
,
((
InnerSize
%
16
)
==
0
)
?
int
(
Aligned16
)
:
int
(
evaluator
<
DiagonalType
>::
Alignment
))
// FIXME hardcoded 16!!
};
return
internal
::
pmul
(
m_matImpl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
),
m_diagImpl
.
template
packet
<
DiagonalPacketLoadMode
,
PacketType
>(
id
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet_segment_impl
(
Index
row
,
Index
col
,
Index
id
,
Index
begin
,
Index
count
,
internal
::
true_type
)
const
{
return
internal
::
pmul
(
m_matImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
),
internal
::
pset1
<
PacketType
>
(
m_diagImpl
.
coeff
(
id
)));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet_segment_impl
(
Index
row
,
Index
col
,
Index
id
,
Index
begin
,
Index
count
,
internal
::
false_type
)
const
{
enum
{
InnerSize
=
(
MatrixType
::
Flags
&
RowMajorBit
)
?
MatrixType
::
ColsAtCompileTime
:
MatrixType
::
RowsAtCompileTime
,
DiagonalPacketLoadMode
=
plain_enum_min
(
LoadMode
,
((
InnerSize
%
16
)
==
0
)
?
int
(
Aligned16
)
:
int
(
evaluator
<
DiagonalType
>::
Alignment
))
// FIXME hardcoded 16!!
};
return
internal
::
pmul
(
m_matImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
),
m_diagImpl
.
template
packetSegment
<
DiagonalPacketLoadMode
,
PacketType
>(
id
,
begin
,
count
));
}
evaluator
<
DiagonalType
>
m_diagImpl
;
evaluator
<
MatrixType
>
m_matImpl
;
};
// diagonal * dense
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductKind
,
int
ProductTag
>
struct
product_evaluator
<
Product
<
Lhs
,
Rhs
,
ProductKind
>
,
ProductTag
,
DiagonalShape
,
DenseShape
>
:
diagonal_product_evaluator_base
<
Rhs
,
typename
Lhs
::
DiagonalVectorType
,
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
OnTheLeft
>
{
typedef
diagonal_product_evaluator_base
<
Rhs
,
typename
Lhs
::
DiagonalVectorType
,
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
OnTheLeft
>
Base
;
using
Base
::
coeff
;
using
Base
::
m_diagImpl
;
using
Base
::
m_matImpl
;
typedef
typename
Base
::
Scalar
Scalar
;
typedef
Product
<
Lhs
,
Rhs
,
ProductKind
>
XprType
;
typedef
typename
XprType
::
PlainObject
PlainObject
;
typedef
typename
Lhs
::
DiagonalVectorType
DiagonalType
;
static
constexpr
int
StorageOrder
=
Base
::
StorageOrder_
;
using
IsRowMajor_t
=
bool_constant
<
StorageOrder
==
RowMajor
>
;
EIGEN_DEVICE_FUNC
explicit
product_evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
.
rhs
(),
xpr
.
lhs
().
diagonal
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
Scalar
coeff
(
Index
row
,
Index
col
)
const
{
return
m_diagImpl
.
coeff
(
row
)
*
m_matImpl
.
coeff
(
row
,
col
);
}
#ifndef EIGEN_GPUCC
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
// FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case.
// See also similar calls below.
return
this
->
template
packet_impl
<
LoadMode
,
PacketType
>(
row
,
col
,
row
,
IsRowMajor_t
());
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
idx
)
const
{
return
packet
<
LoadMode
,
PacketType
>
(
int
(
StorageOrder
)
==
ColMajor
?
idx
:
0
,
int
(
StorageOrder
)
==
ColMajor
?
0
:
idx
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
// FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case.
// See also similar calls below.
return
this
->
template
packet_segment_impl
<
LoadMode
,
PacketType
>(
row
,
col
,
row
,
begin
,
count
,
IsRowMajor_t
());
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
idx
,
Index
begin
,
Index
count
)
const
{
return
packetSegment
<
LoadMode
,
PacketType
>
(
StorageOrder
==
ColMajor
?
idx
:
0
,
StorageOrder
==
ColMajor
?
0
:
idx
,
begin
,
count
);
}
#endif
};
// dense * diagonal
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductKind
,
int
ProductTag
>
struct
product_evaluator
<
Product
<
Lhs
,
Rhs
,
ProductKind
>
,
ProductTag
,
DenseShape
,
DiagonalShape
>
:
diagonal_product_evaluator_base
<
Lhs
,
typename
Rhs
::
DiagonalVectorType
,
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
OnTheRight
>
{
typedef
diagonal_product_evaluator_base
<
Lhs
,
typename
Rhs
::
DiagonalVectorType
,
Product
<
Lhs
,
Rhs
,
LazyProduct
>
,
OnTheRight
>
Base
;
using
Base
::
coeff
;
using
Base
::
m_diagImpl
;
using
Base
::
m_matImpl
;
typedef
typename
Base
::
Scalar
Scalar
;
typedef
Product
<
Lhs
,
Rhs
,
ProductKind
>
XprType
;
typedef
typename
XprType
::
PlainObject
PlainObject
;
static
constexpr
int
StorageOrder
=
Base
::
StorageOrder_
;
using
IsColMajor_t
=
bool_constant
<
StorageOrder
==
ColMajor
>
;
EIGEN_DEVICE_FUNC
explicit
product_evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
.
lhs
(),
xpr
.
rhs
().
diagonal
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
Scalar
coeff
(
Index
row
,
Index
col
)
const
{
return
m_matImpl
.
coeff
(
row
,
col
)
*
m_diagImpl
.
coeff
(
col
);
}
#ifndef EIGEN_GPUCC
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
this
->
template
packet_impl
<
LoadMode
,
PacketType
>(
row
,
col
,
col
,
IsColMajor_t
());
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
idx
)
const
{
return
packet
<
LoadMode
,
PacketType
>
(
StorageOrder
==
ColMajor
?
idx
:
0
,
StorageOrder
==
ColMajor
?
0
:
idx
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
this
->
template
packet_segment_impl
<
LoadMode
,
PacketType
>(
row
,
col
,
col
,
begin
,
count
,
IsColMajor_t
());
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
idx
,
Index
begin
,
Index
count
)
const
{
return
packetSegment
<
LoadMode
,
PacketType
>
(
StorageOrder
==
ColMajor
?
idx
:
0
,
StorageOrder
==
ColMajor
?
0
:
idx
,
begin
,
count
);
}
#endif
};
/***************************************************************************
* Products with permutation matrices
***************************************************************************/
/** \internal
* \class permutation_matrix_product
* Internal helper class implementing the product between a permutation matrix and a matrix.
* This class is specialized for DenseShape below and for SparseShape in SparseCore/SparsePermutation.h
*/
template
<
typename
ExpressionType
,
int
Side
,
bool
Transposed
,
typename
ExpressionShape
>
struct
permutation_matrix_product
;
template
<
typename
ExpressionType
,
int
Side
,
bool
Transposed
>
struct
permutation_matrix_product
<
ExpressionType
,
Side
,
Transposed
,
DenseShape
>
{
typedef
typename
nested_eval
<
ExpressionType
,
1
>::
type
MatrixType
;
typedef
remove_all_t
<
MatrixType
>
MatrixTypeCleaned
;
template
<
typename
Dest
,
typename
PermutationType
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Dest
&
dst
,
const
PermutationType
&
perm
,
const
ExpressionType
&
xpr
)
{
MatrixType
mat
(
xpr
);
const
Index
n
=
Side
==
OnTheLeft
?
mat
.
rows
()
:
mat
.
cols
();
// FIXME we need an is_same for expression that is not sensitive to constness. For instance
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
// if(is_same<MatrixTypeCleaned,Dest>::value && extract_data(dst) == extract_data(mat))
if
(
is_same_dense
(
dst
,
mat
))
{
// apply the permutation inplace
Matrix
<
bool
,
PermutationType
::
RowsAtCompileTime
,
1
,
0
,
PermutationType
::
MaxRowsAtCompileTime
>
mask
(
perm
.
size
());
mask
.
fill
(
false
);
Index
r
=
0
;
while
(
r
<
perm
.
size
())
{
// search for the next seed
while
(
r
<
perm
.
size
()
&&
mask
[
r
])
r
++
;
if
(
r
>=
perm
.
size
())
break
;
// we got one, let's follow it until we are back to the seed
Index
k0
=
r
++
;
Index
kPrev
=
k0
;
mask
.
coeffRef
(
k0
)
=
true
;
for
(
Index
k
=
perm
.
indices
().
coeff
(
k0
);
k
!=
k0
;
k
=
perm
.
indices
().
coeff
(
k
))
{
Block
<
Dest
,
Side
==
OnTheLeft
?
1
:
Dest
::
RowsAtCompileTime
,
Side
==
OnTheRight
?
1
:
Dest
::
ColsAtCompileTime
>
(
dst
,
k
)
.
swap
(
Block
<
Dest
,
Side
==
OnTheLeft
?
1
:
Dest
::
RowsAtCompileTime
,
Side
==
OnTheRight
?
1
:
Dest
::
ColsAtCompileTime
>
(
dst
,
((
Side
==
OnTheLeft
)
^
Transposed
)
?
k0
:
kPrev
));
mask
.
coeffRef
(
k
)
=
true
;
kPrev
=
k
;
}
}
}
else
{
for
(
Index
i
=
0
;
i
<
n
;
++
i
)
{
Block
<
Dest
,
Side
==
OnTheLeft
?
1
:
Dest
::
RowsAtCompileTime
,
Side
==
OnTheRight
?
1
:
Dest
::
ColsAtCompileTime
>
(
dst
,
((
Side
==
OnTheLeft
)
^
Transposed
)
?
perm
.
indices
().
coeff
(
i
)
:
i
)
=
Block
<
const
MatrixTypeCleaned
,
Side
==
OnTheLeft
?
1
:
MatrixTypeCleaned
::
RowsAtCompileTime
,
Side
==
OnTheRight
?
1
:
MatrixTypeCleaned
::
ColsAtCompileTime
>
(
mat
,
((
Side
==
OnTheRight
)
^
Transposed
)
?
perm
.
indices
().
coeff
(
i
)
:
i
);
}
}
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
PermutationShape
,
MatrixShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
permutation_matrix_product
<
Rhs
,
OnTheLeft
,
false
,
MatrixShape
>::
run
(
dst
,
lhs
,
rhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
MatrixShape
,
PermutationShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
permutation_matrix_product
<
Lhs
,
OnTheRight
,
false
,
MatrixShape
>::
run
(
dst
,
rhs
,
lhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Inverse
<
Lhs
>
,
Rhs
,
PermutationShape
,
MatrixShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Inverse
<
Lhs
>&
lhs
,
const
Rhs
&
rhs
)
{
permutation_matrix_product
<
Rhs
,
OnTheLeft
,
true
,
MatrixShape
>::
run
(
dst
,
lhs
.
nestedExpression
(),
rhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Inverse
<
Rhs
>
,
MatrixShape
,
PermutationShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Inverse
<
Rhs
>&
rhs
)
{
permutation_matrix_product
<
Lhs
,
OnTheRight
,
true
,
MatrixShape
>::
run
(
dst
,
rhs
.
nestedExpression
(),
lhs
);
}
};
/***************************************************************************
* Products with transpositions matrices
***************************************************************************/
// FIXME could we unify Transpositions and Permutation into a single "shape"??
/** \internal
* \class transposition_matrix_product
* Internal helper class implementing the product between a permutation matrix and a matrix.
*/
template
<
typename
ExpressionType
,
int
Side
,
bool
Transposed
,
typename
ExpressionShape
>
struct
transposition_matrix_product
{
typedef
typename
nested_eval
<
ExpressionType
,
1
>::
type
MatrixType
;
typedef
remove_all_t
<
MatrixType
>
MatrixTypeCleaned
;
template
<
typename
Dest
,
typename
TranspositionType
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
Dest
&
dst
,
const
TranspositionType
&
tr
,
const
ExpressionType
&
xpr
)
{
MatrixType
mat
(
xpr
);
typedef
typename
TranspositionType
::
StorageIndex
StorageIndex
;
const
Index
size
=
tr
.
size
();
StorageIndex
j
=
0
;
if
(
!
is_same_dense
(
dst
,
mat
))
dst
=
mat
;
for
(
Index
k
=
(
Transposed
?
size
-
1
:
0
);
Transposed
?
k
>=
0
:
k
<
size
;
Transposed
?
--
k
:
++
k
)
if
(
Index
(
j
=
tr
.
coeff
(
k
))
!=
k
)
{
if
(
Side
==
OnTheLeft
)
dst
.
row
(
k
).
swap
(
dst
.
row
(
j
));
else
if
(
Side
==
OnTheRight
)
dst
.
col
(
k
).
swap
(
dst
.
col
(
j
));
}
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
TranspositionsShape
,
MatrixShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
transposition_matrix_product
<
Rhs
,
OnTheLeft
,
false
,
MatrixShape
>::
run
(
dst
,
lhs
,
rhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
MatrixShape
,
TranspositionsShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
transposition_matrix_product
<
Lhs
,
OnTheRight
,
false
,
MatrixShape
>::
run
(
dst
,
rhs
,
lhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Transpose
<
Lhs
>
,
Rhs
,
TranspositionsShape
,
MatrixShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Transpose
<
Lhs
>&
lhs
,
const
Rhs
&
rhs
)
{
transposition_matrix_product
<
Rhs
,
OnTheLeft
,
true
,
MatrixShape
>::
run
(
dst
,
lhs
.
nestedExpression
(),
rhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Transpose
<
Rhs
>
,
MatrixShape
,
TranspositionsShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Transpose
<
Rhs
>&
rhs
)
{
transposition_matrix_product
<
Lhs
,
OnTheRight
,
true
,
MatrixShape
>::
run
(
dst
,
rhs
.
nestedExpression
(),
lhs
);
}
};
/***************************************************************************
* skew symmetric products
* for now we just call the generic implementation
***************************************************************************/
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
SkewSymmetricShape
,
MatrixShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
generic_product_impl
<
typename
Lhs
::
DenseMatrixType
,
Rhs
,
DenseShape
,
MatrixShape
,
ProductTag
>::
evalTo
(
dst
,
lhs
,
rhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
,
typename
MatrixShape
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
MatrixShape
,
SkewSymmetricShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
generic_product_impl
<
Lhs
,
typename
Rhs
::
DenseMatrixType
,
MatrixShape
,
DenseShape
,
ProductTag
>::
evalTo
(
dst
,
lhs
,
rhs
);
}
};
template
<
typename
Lhs
,
typename
Rhs
,
int
ProductTag
>
struct
generic_product_impl
<
Lhs
,
Rhs
,
SkewSymmetricShape
,
SkewSymmetricShape
,
ProductTag
>
{
template
<
typename
Dest
>
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
evalTo
(
Dest
&
dst
,
const
Lhs
&
lhs
,
const
Rhs
&
rhs
)
{
generic_product_impl
<
typename
Lhs
::
DenseMatrixType
,
typename
Rhs
::
DenseMatrixType
,
DenseShape
,
DenseShape
,
ProductTag
>::
evalTo
(
dst
,
lhs
,
rhs
);
}
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_PRODUCT_EVALUATORS_H
eigen-master/Eigen/src/Core/Random.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_RANDOM_H
#define EIGEN_RANDOM_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
Scalar
>
struct
scalar_random_op
{
inline
const
Scalar
operator
()()
const
{
return
random
<
Scalar
>
();
}
};
template
<
typename
Scalar
>
struct
functor_traits
<
scalar_random_op
<
Scalar
>
>
{
enum
{
Cost
=
5
*
NumTraits
<
Scalar
>::
MulCost
,
PacketAccess
=
false
,
IsRepeatable
=
false
};
};
}
// end namespace internal
/** \returns a random matrix expression
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* \not_reentrant
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
* instead.
*
*
* Example: \include MatrixBase_random_int_int.cpp
* Output: \verbinclude MatrixBase_random_int_int.out
*
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
* See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators.
*
* \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random()
*/
template
<
typename
Derived
>
inline
const
typename
DenseBase
<
Derived
>::
RandomReturnType
DenseBase
<
Derived
>::
Random
(
Index
rows
,
Index
cols
)
{
return
NullaryExpr
(
rows
,
cols
,
internal
::
scalar_random_op
<
Scalar
>
());
}
/** \returns a random vector expression
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* The parameter \a size is the size of the returned vector.
* Must be compatible with this MatrixBase type.
*
* \only_for_vectors
* \not_reentrant
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
* it is redundant to pass \a size as argument, so Random() should be used
* instead.
*
* Example: \include MatrixBase_random_int.cpp
* Output: \verbinclude MatrixBase_random_int.out
*
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
* a temporary vector whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random()
*/
template
<
typename
Derived
>
inline
const
typename
DenseBase
<
Derived
>::
RandomReturnType
DenseBase
<
Derived
>::
Random
(
Index
size
)
{
return
NullaryExpr
(
size
,
internal
::
scalar_random_op
<
Scalar
>
());
}
/** \returns a fixed-size random matrix or vector expression
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* Example: \include MatrixBase_random.cpp
* Output: \verbinclude MatrixBase_random.out
*
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
* \not_reentrant
*
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index)
*/
template
<
typename
Derived
>
inline
const
typename
DenseBase
<
Derived
>::
RandomReturnType
DenseBase
<
Derived
>::
Random
()
{
return
NullaryExpr
(
RowsAtCompileTime
,
ColsAtCompileTime
,
internal
::
scalar_random_op
<
Scalar
>
());
}
/** Sets all coefficients in this expression to random values.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \not_reentrant
*
* Example: \include MatrixBase_setRandom.cpp
* Output: \verbinclude MatrixBase_setRandom.out
*
* \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
Derived
&
DenseBase
<
Derived
>::
setRandom
()
{
return
*
this
=
Random
(
rows
(),
cols
());
}
/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \only_for_vectors
* \not_reentrant
*
* Example: \include Matrix_setRandom_int.cpp
* Output: \verbinclude Matrix_setRandom_int.out
*
* \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random()
*/
template
<
typename
Derived
>
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setRandom
(
Index
newSize
)
{
resize
(
newSize
);
return
setRandom
();
}
/** Resizes to the given size, and sets all coefficients in this expression to random values.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \not_reentrant
*
* \param rows the new number of rows
* \param cols the new number of columns
*
* Example: \include Matrix_setRandom_int_int.cpp
* Output: \verbinclude Matrix_setRandom_int_int.out
*
* \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random()
*/
template
<
typename
Derived
>
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setRandom
(
Index
rows
,
Index
cols
)
{
resize
(
rows
,
cols
);
return
setRandom
();
}
/** Resizes to the given size, changing only the number of columns, and sets all
* coefficients in this expression to random values. For the parameter of type
* NoChange_t, just pass the special value \c NoChange.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \not_reentrant
*
* \sa DenseBase::setRandom(), setRandom(Index), setRandom(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Random()
*/
template
<
typename
Derived
>
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setRandom
(
NoChange_t
,
Index
cols
)
{
return
setRandom
(
rows
(),
cols
);
}
/** Resizes to the given size, changing only the number of rows, and sets all
* coefficients in this expression to random values. For the parameter of type
* NoChange_t, just pass the special value \c NoChange.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \not_reentrant
*
* \sa DenseBase::setRandom(), setRandom(Index), setRandom(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Random()
*/
template
<
typename
Derived
>
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setRandom
(
Index
rows
,
NoChange_t
)
{
return
setRandom
(
rows
,
cols
());
}
}
// end namespace Eigen
#endif // EIGEN_RANDOM_H
eigen-master/Eigen/src/Core/RandomImpl.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2024 Charles Schlosser <cs.schlosser@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_RANDOM_IMPL_H
#define EIGEN_RANDOM_IMPL_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
/****************************************************************************
* Implementation of random *
****************************************************************************/
template
<
typename
Scalar
,
bool
IsComplex
,
bool
IsInteger
>
struct
random_default_impl
{};
template
<
typename
Scalar
>
struct
random_impl
:
random_default_impl
<
Scalar
,
NumTraits
<
Scalar
>::
IsComplex
,
NumTraits
<
Scalar
>::
IsInteger
>
{};
template
<
typename
Scalar
>
struct
random_retval
{
typedef
Scalar
type
;
};
template
<
typename
Scalar
>
inline
EIGEN_MATHFUNC_RETVAL
(
random
,
Scalar
)
random
(
const
Scalar
&
x
,
const
Scalar
&
y
)
{
return
EIGEN_MATHFUNC_IMPL
(
random
,
Scalar
)
::
run
(
x
,
y
);
}
template
<
typename
Scalar
>
inline
EIGEN_MATHFUNC_RETVAL
(
random
,
Scalar
)
random
()
{
return
EIGEN_MATHFUNC_IMPL
(
random
,
Scalar
)
::
run
();
}
// TODO: replace or provide alternatives to this, e.g. std::random_device
struct
eigen_random_device
{
using
ReturnType
=
int
;
static
constexpr
int
Entropy
=
meta_floor_log2
<
(
unsigned
int
)(
RAND_MAX
)
+
1
>::
value
;
static
constexpr
ReturnType
Highest
=
RAND_MAX
;
static
EIGEN_DEVICE_FUNC
inline
ReturnType
run
()
{
return
std
::
rand
();
}
};
// Fill a built-in unsigned integer with numRandomBits beginning with the least significant bit
template
<
typename
Scalar
>
struct
random_bits_impl
{
EIGEN_STATIC_ASSERT
(
std
::
is_unsigned
<
Scalar
>::
value
,
SCALAR
MUST
BE
A
BUILT
-
IN
UNSIGNED
INTEGER
)
using
RandomDevice
=
eigen_random_device
;
using
RandomReturnType
=
typename
RandomDevice
::
ReturnType
;
static
constexpr
int
kEntropy
=
RandomDevice
::
Entropy
;
static
constexpr
int
kTotalBits
=
sizeof
(
Scalar
)
*
CHAR_BIT
;
// return a Scalar filled with numRandomBits beginning from the least significant bit
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
int
numRandomBits
)
{
eigen_assert
((
numRandomBits
>=
0
)
&&
(
numRandomBits
<=
kTotalBits
));
const
Scalar
mask
=
Scalar
(
-
1
)
>>
((
kTotalBits
-
numRandomBits
)
&
(
kTotalBits
-
1
));
Scalar
randomBits
=
0
;
for
(
int
shift
=
0
;
shift
<
numRandomBits
;
shift
+=
kEntropy
)
{
RandomReturnType
r
=
RandomDevice
::
run
();
randomBits
|=
static_cast
<
Scalar
>
(
r
)
<<
shift
;
}
// clear the excess bits
randomBits
&=
mask
;
return
randomBits
;
}
};
template
<
typename
BitsType
>
EIGEN_DEVICE_FUNC
inline
BitsType
getRandomBits
(
int
numRandomBits
)
{
return
random_bits_impl
<
BitsType
>::
run
(
numRandomBits
);
}
// random implementation for a built-in floating point type
template
<
typename
Scalar
,
bool
BuiltIn
=
std
::
is_floating_point
<
Scalar
>
::
value
>
struct
random_float_impl
{
using
BitsType
=
typename
numext
::
get_integer_by_size
<
sizeof
(
Scalar
)
>::
unsigned_type
;
static
constexpr
EIGEN_DEVICE_FUNC
inline
int
mantissaBits
()
{
const
int
digits
=
NumTraits
<
Scalar
>::
digits
();
return
digits
-
1
;
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
int
numRandomBits
)
{
eigen_assert
(
numRandomBits
>=
0
&&
numRandomBits
<=
mantissaBits
());
BitsType
randomBits
=
getRandomBits
<
BitsType
>
(
numRandomBits
);
// if fewer than MantissaBits is requested, shift them to the left
randomBits
<<=
(
mantissaBits
()
-
numRandomBits
);
// randomBits is in the half-open interval [2,4)
randomBits
|=
numext
::
bit_cast
<
BitsType
>
(
Scalar
(
2
));
// result is in the half-open interval [-1,1)
Scalar
result
=
numext
::
bit_cast
<
Scalar
>
(
randomBits
)
-
Scalar
(
3
);
return
result
;
}
};
// random implementation for a custom floating point type
// uses double as the implementation with a mantissa with a size equal to either the target scalar's mantissa or that of
// double, whichever is smaller
template
<
typename
Scalar
>
struct
random_float_impl
<
Scalar
,
false
>
{
static
EIGEN_DEVICE_FUNC
inline
int
mantissaBits
()
{
const
int
digits
=
NumTraits
<
Scalar
>::
digits
();
constexpr
int
kDoubleDigits
=
NumTraits
<
double
>::
digits
();
return
numext
::
mini
(
digits
,
kDoubleDigits
)
-
1
;
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
int
numRandomBits
)
{
eigen_assert
(
numRandomBits
>=
0
&&
numRandomBits
<=
mantissaBits
());
Scalar
result
=
static_cast
<
Scalar
>
(
random_float_impl
<
double
>::
run
(
numRandomBits
));
return
result
;
}
};
#if !EIGEN_COMP_NVCC
// random implementation for long double
// this specialization is not compatible with double-double scalars
template
<
bool
Specialize
=
(
sizeof
(
long
double
)
==
2
*
sizeof
(
uint64_t
))
&&
((
std
::
numeric_limits
<
long
double
>
::
digits
!=
(
2
*
std
::
numeric_limits
<
double
>::
digits
)))
>
struct
random_longdouble_impl
{
static
constexpr
int
Size
=
sizeof
(
long
double
);
static
constexpr
EIGEN_DEVICE_FUNC
int
mantissaBits
()
{
return
NumTraits
<
long
double
>::
digits
()
-
1
;
}
static
EIGEN_DEVICE_FUNC
inline
long
double
run
(
int
numRandomBits
)
{
eigen_assert
(
numRandomBits
>=
0
&&
numRandomBits
<=
mantissaBits
());
EIGEN_USING_STD
(
memcpy
);
int
numLowBits
=
numext
::
mini
(
numRandomBits
,
64
);
int
numHighBits
=
numext
::
maxi
(
numRandomBits
-
64
,
0
);
uint64_t
randomBits
[
2
];
long
double
result
=
2.0
L
;
memcpy
(
&
randomBits
,
&
result
,
Size
);
randomBits
[
0
]
|=
getRandomBits
<
uint64_t
>
(
numLowBits
);
randomBits
[
1
]
|=
getRandomBits
<
uint64_t
>
(
numHighBits
);
memcpy
(
&
result
,
&
randomBits
,
Size
);
result
-=
3.0
L
;
return
result
;
}
};
template
<
>
struct
random_longdouble_impl
<
false
>
{
static
constexpr
EIGEN_DEVICE_FUNC
int
mantissaBits
()
{
return
NumTraits
<
double
>::
digits
()
-
1
;
}
static
EIGEN_DEVICE_FUNC
inline
long
double
run
(
int
numRandomBits
)
{
return
static_cast
<
long
double
>
(
random_float_impl
<
double
>::
run
(
numRandomBits
));
}
};
template
<
>
struct
random_float_impl
<
long
double
>
:
random_longdouble_impl
<>
{};
#endif
template
<
typename
Scalar
>
struct
random_default_impl
<
Scalar
,
false
,
false
>
{
using
Impl
=
random_float_impl
<
Scalar
>
;
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
x
,
const
Scalar
&
y
,
int
numRandomBits
)
{
Scalar
half_x
=
Scalar
(
0.5
)
*
x
;
Scalar
half_y
=
Scalar
(
0.5
)
*
y
;
Scalar
result
=
(
half_x
+
half_y
)
+
(
half_y
-
half_x
)
*
run
(
numRandomBits
);
// result is in the half-open interval [x, y) -- provided that x < y
return
result
;
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
x
,
const
Scalar
&
y
)
{
return
run
(
x
,
y
,
Impl
::
mantissaBits
());
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
int
numRandomBits
)
{
return
Impl
::
run
(
numRandomBits
);
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
()
{
return
run
(
Impl
::
mantissaBits
());
}
};
template
<
typename
Scalar
,
bool
IsSigned
=
NumTraits
<
Scalar
>
::
IsSigned
,
bool
BuiltIn
=
std
::
is_integral
<
Scalar
>::
value
>
struct
random_int_impl
;
// random implementation for a built-in unsigned integer type
template
<
typename
Scalar
>
struct
random_int_impl
<
Scalar
,
false
,
true
>
{
static
constexpr
int
kTotalBits
=
sizeof
(
Scalar
)
*
CHAR_BIT
;
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
x
,
const
Scalar
&
y
)
{
if
(
y
<=
x
)
return
x
;
Scalar
range
=
y
-
x
;
// handle edge case where [x,y] spans the entire range of Scalar
if
(
range
==
NumTraits
<
Scalar
>::
highest
())
return
run
();
Scalar
count
=
range
+
1
;
// calculate the number of random bits needed to fill range
int
numRandomBits
=
log2_ceil
(
count
);
Scalar
randomBits
;
do
{
randomBits
=
getRandomBits
<
Scalar
>
(
numRandomBits
);
// if the random draw is outside [0, range), try again (rejection sampling)
// in the worst-case scenario, the probability of rejection is: 1/2 - 1/2^numRandomBits < 50%
}
while
(
randomBits
>=
count
);
Scalar
result
=
x
+
randomBits
;
return
result
;
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
()
{
return
getRandomBits
<
Scalar
>
(
kTotalBits
);
}
};
// random implementation for a built-in signed integer type
template
<
typename
Scalar
>
struct
random_int_impl
<
Scalar
,
true
,
true
>
{
static
constexpr
int
kTotalBits
=
sizeof
(
Scalar
)
*
CHAR_BIT
;
using
BitsType
=
typename
make_unsigned
<
Scalar
>::
type
;
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
x
,
const
Scalar
&
y
)
{
if
(
y
<=
x
)
return
x
;
// Avoid overflow by representing `range` as an unsigned type
BitsType
range
=
static_cast
<
BitsType
>
(
y
)
-
static_cast
<
BitsType
>
(
x
);
BitsType
randomBits
=
random_int_impl
<
BitsType
>::
run
(
0
,
range
);
// Avoid overflow in the case where `x` is negative and there is a large range so
// `randomBits` would also be negative if cast to `Scalar` first.
Scalar
result
=
static_cast
<
Scalar
>
(
static_cast
<
BitsType
>
(
x
)
+
randomBits
);
return
result
;
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
()
{
return
static_cast
<
Scalar
>
(
getRandomBits
<
BitsType
>
(
kTotalBits
));
}
};
// todo: custom integers
template
<
typename
Scalar
,
bool
IsSigned
>
struct
random_int_impl
<
Scalar
,
IsSigned
,
false
>
{
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
,
const
Scalar
&
)
{
return
run
();
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
()
{
eigen_assert
(
std
::
false_type
::
value
&&
"RANDOM FOR CUSTOM INTEGERS NOT YET SUPPORTED"
);
return
Scalar
(
0
);
}
};
template
<
typename
Scalar
>
struct
random_default_impl
<
Scalar
,
false
,
true
>
:
random_int_impl
<
Scalar
>
{};
template
<
>
struct
random_impl
<
bool
>
{
static
EIGEN_DEVICE_FUNC
inline
bool
run
(
const
bool
&
x
,
const
bool
&
y
)
{
if
(
y
<=
x
)
return
x
;
return
run
();
}
static
EIGEN_DEVICE_FUNC
inline
bool
run
()
{
return
getRandomBits
<
unsigned
>
(
1
)
?
true
:
false
;
}
};
template
<
typename
Scalar
>
struct
random_default_impl
<
Scalar
,
true
,
false
>
{
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
using
Impl
=
random_impl
<
RealScalar
>
;
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
x
,
const
Scalar
&
y
,
int
numRandomBits
)
{
return
Scalar
(
Impl
::
run
(
x
.
real
(),
y
.
real
(),
numRandomBits
),
Impl
::
run
(
x
.
imag
(),
y
.
imag
(),
numRandomBits
));
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
const
Scalar
&
x
,
const
Scalar
&
y
)
{
return
Scalar
(
Impl
::
run
(
x
.
real
(),
y
.
real
()),
Impl
::
run
(
x
.
imag
(),
y
.
imag
()));
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
(
int
numRandomBits
)
{
return
Scalar
(
Impl
::
run
(
numRandomBits
),
Impl
::
run
(
numRandomBits
));
}
static
EIGEN_DEVICE_FUNC
inline
Scalar
run
()
{
return
Scalar
(
Impl
::
run
(),
Impl
::
run
());
}
};
}
// namespace internal
}
// namespace Eigen
#endif // EIGEN_RANDOM_IMPL_H
eigen-master/Eigen/src/Core/Redux.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REDUX_H
#define EIGEN_REDUX_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
// TODO
// * implement other kind of vectorization
// * factorize code
/***************************************************************************
* Part 1 : the logic deciding a strategy for vectorization and unrolling
***************************************************************************/
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_traits
{
public:
typedef
typename
find_best_packet
<
typename
Evaluator
::
Scalar
,
Evaluator
::
SizeAtCompileTime
>::
type
PacketType
;
enum
{
PacketSize
=
unpacket_traits
<
PacketType
>::
size
,
InnerMaxSize
=
int
(
Evaluator
::
IsRowMajor
)
?
Evaluator
::
MaxColsAtCompileTime
:
Evaluator
::
MaxRowsAtCompileTime
,
OuterMaxSize
=
int
(
Evaluator
::
IsRowMajor
)
?
Evaluator
::
MaxRowsAtCompileTime
:
Evaluator
::
MaxColsAtCompileTime
,
SliceVectorizedWork
=
int
(
InnerMaxSize
)
==
Dynamic
?
Dynamic
:
int
(
OuterMaxSize
)
==
Dynamic
?
(
int
(
InnerMaxSize
)
>=
int
(
PacketSize
)
?
Dynamic
:
0
)
:
(
int
(
InnerMaxSize
)
/
int
(
PacketSize
))
*
int
(
OuterMaxSize
)
};
enum
{
MayLinearize
=
(
int
(
Evaluator
::
Flags
)
&
LinearAccessBit
),
MightVectorize
=
(
int
(
Evaluator
::
Flags
)
&
ActualPacketAccessBit
)
&&
(
functor_traits
<
Func
>::
PacketAccess
),
MayLinearVectorize
=
bool
(
MightVectorize
)
&&
bool
(
MayLinearize
),
MaySliceVectorize
=
bool
(
MightVectorize
)
&&
(
int
(
SliceVectorizedWork
)
==
Dynamic
||
int
(
SliceVectorizedWork
)
>=
3
)
};
public:
enum
{
Traversal
=
int
(
MayLinearVectorize
)
?
int
(
LinearVectorizedTraversal
)
:
int
(
MaySliceVectorize
)
?
int
(
SliceVectorizedTraversal
)
:
int
(
MayLinearize
)
?
int
(
LinearTraversal
)
:
int
(
DefaultTraversal
)
};
public:
enum
{
Cost
=
Evaluator
::
SizeAtCompileTime
==
Dynamic
?
HugeCost
:
int
(
Evaluator
::
SizeAtCompileTime
)
*
int
(
Evaluator
::
CoeffReadCost
)
+
(
Evaluator
::
SizeAtCompileTime
-
1
)
*
functor_traits
<
Func
>::
Cost
,
UnrollingLimit
=
EIGEN_UNROLLING_LIMIT
*
(
int
(
Traversal
)
==
int
(
DefaultTraversal
)
?
1
:
int
(
PacketSize
))
};
public:
enum
{
Unrolling
=
Cost
<=
UnrollingLimit
?
CompleteUnrolling
:
NoUnrolling
};
#ifdef EIGEN_DEBUG_ASSIGN
static
void
debug
()
{
std
::
cerr
<<
"Xpr: "
<<
typeid
(
typename
Evaluator
::
XprType
).
name
()
<<
std
::
endl
;
std
::
cerr
.
setf
(
std
::
ios
::
hex
,
std
::
ios
::
basefield
);
EIGEN_DEBUG_VAR
(
Evaluator
::
Flags
)
std
::
cerr
.
unsetf
(
std
::
ios
::
hex
);
EIGEN_DEBUG_VAR
(
InnerMaxSize
)
EIGEN_DEBUG_VAR
(
OuterMaxSize
)
EIGEN_DEBUG_VAR
(
SliceVectorizedWork
)
EIGEN_DEBUG_VAR
(
PacketSize
)
EIGEN_DEBUG_VAR
(
MightVectorize
)
EIGEN_DEBUG_VAR
(
MayLinearVectorize
)
EIGEN_DEBUG_VAR
(
MaySliceVectorize
)
std
::
cerr
<<
"Traversal"
<<
" = "
<<
Traversal
<<
" ("
<<
demangle_traversal
(
Traversal
)
<<
")"
<<
std
::
endl
;
EIGEN_DEBUG_VAR
(
UnrollingLimit
)
std
::
cerr
<<
"Unrolling"
<<
" = "
<<
Unrolling
<<
" ("
<<
demangle_unrolling
(
Unrolling
)
<<
")"
<<
std
::
endl
;
std
::
cerr
<<
std
::
endl
;
}
#endif
};
/***************************************************************************
* Part 2 : unrollers
***************************************************************************/
/*** no vectorization ***/
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
,
Index
Length
>
struct
redux_novec_unroller
{
static
constexpr
Index
HalfLength
=
Length
/
2
;
typedef
typename
Evaluator
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
)
{
return
func
(
redux_novec_unroller
<
Func
,
Evaluator
,
Start
,
HalfLength
>::
run
(
eval
,
func
),
redux_novec_unroller
<
Func
,
Evaluator
,
Start
+
HalfLength
,
Length
-
HalfLength
>::
run
(
eval
,
func
));
}
};
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_novec_unroller
<
Func
,
Evaluator
,
Start
,
1
>
{
static
constexpr
Index
outer
=
Start
/
Evaluator
::
InnerSizeAtCompileTime
;
static
constexpr
Index
inner
=
Start
%
Evaluator
::
InnerSizeAtCompileTime
;
typedef
typename
Evaluator
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
)
{
return
eval
.
coeffByOuterInner
(
outer
,
inner
);
}
};
// This is actually dead code and will never be called. It is required
// to prevent false warnings regarding failed inlining though
// for 0 length run() will never be called at all.
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_novec_unroller
<
Func
,
Evaluator
,
Start
,
0
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
,
const
Func
&
)
{
return
Scalar
();
}
};
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
,
Index
Length
>
struct
redux_novec_linear_unroller
{
static
constexpr
Index
HalfLength
=
Length
/
2
;
typedef
typename
Evaluator
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
)
{
return
func
(
redux_novec_linear_unroller
<
Func
,
Evaluator
,
Start
,
HalfLength
>::
run
(
eval
,
func
),
redux_novec_linear_unroller
<
Func
,
Evaluator
,
Start
+
HalfLength
,
Length
-
HalfLength
>::
run
(
eval
,
func
));
}
};
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_novec_linear_unroller
<
Func
,
Evaluator
,
Start
,
1
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
)
{
return
eval
.
coeff
(
Start
);
}
};
// This is actually dead code and will never be called. It is required
// to prevent false warnings regarding failed inlining though
// for 0 length run() will never be called at all.
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_novec_linear_unroller
<
Func
,
Evaluator
,
Start
,
0
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
,
const
Func
&
)
{
return
Scalar
();
}
};
/*** vectorization ***/
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
,
Index
Length
>
struct
redux_vec_unroller
{
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
func
)
{
constexpr
Index
HalfLength
=
Length
/
2
;
return
func
.
packetOp
(
redux_vec_unroller
<
Func
,
Evaluator
,
Start
,
HalfLength
>::
template
run
<
PacketType
>(
eval
,
func
),
redux_vec_unroller
<
Func
,
Evaluator
,
Start
+
HalfLength
,
Length
-
HalfLength
>::
template
run
<
PacketType
>(
eval
,
func
));
}
};
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_vec_unroller
<
Func
,
Evaluator
,
Start
,
1
>
{
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
)
{
constexpr
Index
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
constexpr
Index
index
=
Start
*
PacketSize
;
constexpr
Index
outer
=
index
/
int
(
Evaluator
::
InnerSizeAtCompileTime
);
constexpr
Index
inner
=
index
%
int
(
Evaluator
::
InnerSizeAtCompileTime
);
constexpr
int
alignment
=
Evaluator
::
Alignment
;
return
eval
.
template
packetByOuterInner
<
alignment
,
PacketType
>(
outer
,
inner
);
}
};
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
,
Index
Length
>
struct
redux_vec_linear_unroller
{
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
func
)
{
constexpr
Index
HalfLength
=
Length
/
2
;
return
func
.
packetOp
(
redux_vec_linear_unroller
<
Func
,
Evaluator
,
Start
,
HalfLength
>::
template
run
<
PacketType
>(
eval
,
func
),
redux_vec_linear_unroller
<
Func
,
Evaluator
,
Start
+
HalfLength
,
Length
-
HalfLength
>::
template
run
<
PacketType
>(
eval
,
func
));
}
};
template
<
typename
Func
,
typename
Evaluator
,
Index
Start
>
struct
redux_vec_linear_unroller
<
Func
,
Evaluator
,
Start
,
1
>
{
template
<
typename
PacketType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
PacketType
run
(
const
Evaluator
&
eval
,
const
Func
&
)
{
constexpr
Index
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
constexpr
Index
index
=
(
Start
*
PacketSize
);
constexpr
int
alignment
=
Evaluator
::
Alignment
;
return
eval
.
template
packet
<
alignment
,
PacketType
>(
index
);
}
};
/***************************************************************************
* Part 3 : implementation of all cases
***************************************************************************/
template
<
typename
Func
,
typename
Evaluator
,
int
Traversal
=
redux_traits
<
Func
,
Evaluator
>
::
Traversal
,
int
Unrolling
=
redux_traits
<
Func
,
Evaluator
>::
Unrolling
>
struct
redux_impl
;
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_impl
<
Func
,
Evaluator
,
DefaultTraversal
,
NoUnrolling
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
template
<
typename
XprType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
xpr
)
{
eigen_assert
(
xpr
.
rows
()
>
0
&&
xpr
.
cols
()
>
0
&&
"you are using an empty matrix"
);
Scalar
res
=
eval
.
coeffByOuterInner
(
0
,
0
);
for
(
Index
i
=
1
;
i
<
xpr
.
innerSize
();
++
i
)
res
=
func
(
res
,
eval
.
coeffByOuterInner
(
0
,
i
));
for
(
Index
i
=
1
;
i
<
xpr
.
outerSize
();
++
i
)
for
(
Index
j
=
0
;
j
<
xpr
.
innerSize
();
++
j
)
res
=
func
(
res
,
eval
.
coeffByOuterInner
(
i
,
j
));
return
res
;
}
};
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_impl
<
Func
,
Evaluator
,
LinearTraversal
,
NoUnrolling
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
template
<
typename
XprType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
xpr
)
{
eigen_assert
(
xpr
.
size
()
>
0
&&
"you are using an empty matrix"
);
Scalar
res
=
eval
.
coeff
(
0
);
for
(
Index
k
=
1
;
k
<
xpr
.
size
();
++
k
)
res
=
func
(
res
,
eval
.
coeff
(
k
));
return
res
;
}
};
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_impl
<
Func
,
Evaluator
,
DefaultTraversal
,
CompleteUnrolling
>
:
redux_novec_unroller
<
Func
,
Evaluator
,
0
,
Evaluator
::
SizeAtCompileTime
>
{
typedef
redux_novec_unroller
<
Func
,
Evaluator
,
0
,
Evaluator
::
SizeAtCompileTime
>
Base
;
typedef
typename
Evaluator
::
Scalar
Scalar
;
template
<
typename
XprType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
/*xpr*/
)
{
return
Base
::
run
(
eval
,
func
);
}
};
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_impl
<
Func
,
Evaluator
,
LinearTraversal
,
CompleteUnrolling
>
:
redux_novec_linear_unroller
<
Func
,
Evaluator
,
0
,
Evaluator
::
SizeAtCompileTime
>
{
typedef
redux_novec_linear_unroller
<
Func
,
Evaluator
,
0
,
Evaluator
::
SizeAtCompileTime
>
Base
;
typedef
typename
Evaluator
::
Scalar
Scalar
;
template
<
typename
XprType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
/*xpr*/
)
{
return
Base
::
run
(
eval
,
func
);
}
};
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_impl
<
Func
,
Evaluator
,
LinearVectorizedTraversal
,
NoUnrolling
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
typedef
typename
redux_traits
<
Func
,
Evaluator
>::
PacketType
PacketScalar
;
template
<
typename
XprType
>
static
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
xpr
)
{
const
Index
size
=
xpr
.
size
();
constexpr
Index
packetSize
=
redux_traits
<
Func
,
Evaluator
>::
PacketSize
;
constexpr
int
packetAlignment
=
unpacket_traits
<
PacketScalar
>::
alignment
;
constexpr
int
alignment0
=
(
bool
(
Evaluator
::
Flags
&
DirectAccessBit
)
&&
bool
(
packet_traits
<
Scalar
>::
AlignedOnScalar
))
?
int
(
packetAlignment
)
:
int
(
Unaligned
);
constexpr
int
alignment
=
plain_enum_max
(
alignment0
,
Evaluator
::
Alignment
);
const
Index
alignedStart
=
internal
::
first_default_aligned
(
xpr
);
const
Index
alignedSize2
=
((
size
-
alignedStart
)
/
(
2
*
packetSize
))
*
(
2
*
packetSize
);
const
Index
alignedSize
=
((
size
-
alignedStart
)
/
(
packetSize
))
*
(
packetSize
);
const
Index
alignedEnd2
=
alignedStart
+
alignedSize2
;
const
Index
alignedEnd
=
alignedStart
+
alignedSize
;
Scalar
res
;
if
(
alignedSize
)
{
PacketScalar
packet_res0
=
eval
.
template
packet
<
alignment
,
PacketScalar
>(
alignedStart
);
if
(
alignedSize
>
packetSize
)
// we have at least two packets to partly unroll the loop
{
PacketScalar
packet_res1
=
eval
.
template
packet
<
alignment
,
PacketScalar
>(
alignedStart
+
packetSize
);
for
(
Index
index
=
alignedStart
+
2
*
packetSize
;
index
<
alignedEnd2
;
index
+=
2
*
packetSize
)
{
packet_res0
=
func
.
packetOp
(
packet_res0
,
eval
.
template
packet
<
alignment
,
PacketScalar
>(
index
));
packet_res1
=
func
.
packetOp
(
packet_res1
,
eval
.
template
packet
<
alignment
,
PacketScalar
>(
index
+
packetSize
));
}
packet_res0
=
func
.
packetOp
(
packet_res0
,
packet_res1
);
if
(
alignedEnd
>
alignedEnd2
)
packet_res0
=
func
.
packetOp
(
packet_res0
,
eval
.
template
packet
<
alignment
,
PacketScalar
>(
alignedEnd2
));
}
res
=
func
.
predux
(
packet_res0
);
for
(
Index
index
=
0
;
index
<
alignedStart
;
++
index
)
res
=
func
(
res
,
eval
.
coeff
(
index
));
for
(
Index
index
=
alignedEnd
;
index
<
size
;
++
index
)
res
=
func
(
res
,
eval
.
coeff
(
index
));
}
else
// too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
res
=
eval
.
coeff
(
0
);
for
(
Index
index
=
1
;
index
<
size
;
++
index
)
res
=
func
(
res
,
eval
.
coeff
(
index
));
}
return
res
;
}
};
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
template
<
typename
Func
,
typename
Evaluator
,
int
Unrolling
>
struct
redux_impl
<
Func
,
Evaluator
,
SliceVectorizedTraversal
,
Unrolling
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
typedef
typename
redux_traits
<
Func
,
Evaluator
>::
PacketType
PacketType
;
template
<
typename
XprType
>
EIGEN_DEVICE_FUNC
static
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
xpr
)
{
eigen_assert
(
xpr
.
rows
()
>
0
&&
xpr
.
cols
()
>
0
&&
"you are using an empty matrix"
);
constexpr
Index
packetSize
=
redux_traits
<
Func
,
Evaluator
>::
PacketSize
;
const
Index
innerSize
=
xpr
.
innerSize
();
const
Index
outerSize
=
xpr
.
outerSize
();
const
Index
packetedInnerSize
=
((
innerSize
)
/
packetSize
)
*
packetSize
;
Scalar
res
;
if
(
packetedInnerSize
)
{
PacketType
packet_res
=
eval
.
template
packet
<
Unaligned
,
PacketType
>(
0
,
0
);
for
(
Index
j
=
0
;
j
<
outerSize
;
++
j
)
for
(
Index
i
=
(
j
==
0
?
packetSize
:
0
);
i
<
packetedInnerSize
;
i
+=
Index
(
packetSize
))
packet_res
=
func
.
packetOp
(
packet_res
,
eval
.
template
packetByOuterInner
<
Unaligned
,
PacketType
>(
j
,
i
));
res
=
func
.
predux
(
packet_res
);
for
(
Index
j
=
0
;
j
<
outerSize
;
++
j
)
for
(
Index
i
=
packetedInnerSize
;
i
<
innerSize
;
++
i
)
res
=
func
(
res
,
eval
.
coeffByOuterInner
(
j
,
i
));
}
else
// too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
res
=
redux_impl
<
Func
,
Evaluator
,
DefaultTraversal
,
NoUnrolling
>::
run
(
eval
,
func
,
xpr
);
}
return
res
;
}
};
template
<
typename
Func
,
typename
Evaluator
>
struct
redux_impl
<
Func
,
Evaluator
,
LinearVectorizedTraversal
,
CompleteUnrolling
>
{
typedef
typename
Evaluator
::
Scalar
Scalar
;
typedef
typename
redux_traits
<
Func
,
Evaluator
>::
PacketType
PacketType
;
static
constexpr
Index
PacketSize
=
redux_traits
<
Func
,
Evaluator
>::
PacketSize
;
static
constexpr
Index
Size
=
Evaluator
::
SizeAtCompileTime
;
static
constexpr
Index
VectorizedSize
=
(
int
(
Size
)
/
int
(
PacketSize
))
*
int
(
PacketSize
);
template
<
typename
XprType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Scalar
run
(
const
Evaluator
&
eval
,
const
Func
&
func
,
const
XprType
&
xpr
)
{
EIGEN_ONLY_USED_FOR_DEBUG
(
xpr
)
eigen_assert
(
xpr
.
rows
()
>
0
&&
xpr
.
cols
()
>
0
&&
"you are using an empty matrix"
);
if
(
VectorizedSize
>
0
)
{
Scalar
res
=
func
.
predux
(
redux_vec_linear_unroller
<
Func
,
Evaluator
,
0
,
Size
/
PacketSize
>::
template
run
<
PacketType
>(
eval
,
func
));
if
(
VectorizedSize
!=
Size
)
res
=
func
(
res
,
redux_novec_linear_unroller
<
Func
,
Evaluator
,
VectorizedSize
,
Size
-
VectorizedSize
>::
run
(
eval
,
func
));
return
res
;
}
else
{
return
redux_novec_linear_unroller
<
Func
,
Evaluator
,
0
,
Size
>::
run
(
eval
,
func
);
}
}
};
// evaluator adaptor
template
<
typename
XprType_
>
class
redux_evaluator
:
public
internal
::
evaluator
<
XprType_
>
{
typedef
internal
::
evaluator
<
XprType_
>
Base
;
public:
typedef
XprType_
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
redux_evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
)
{}
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
typedef
typename
XprType
::
PacketScalar
PacketScalar
;
enum
{
MaxRowsAtCompileTime
=
XprType
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
XprType
::
MaxColsAtCompileTime
,
// TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime
// from the evaluator
Flags
=
Base
::
Flags
&
~
DirectAccessBit
,
IsRowMajor
=
XprType
::
IsRowMajor
,
SizeAtCompileTime
=
XprType
::
SizeAtCompileTime
,
InnerSizeAtCompileTime
=
XprType
::
InnerSizeAtCompileTime
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeffByOuterInner
(
Index
outer
,
Index
inner
)
const
{
return
Base
::
coeff
(
IsRowMajor
?
outer
:
inner
,
IsRowMajor
?
inner
:
outer
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetByOuterInner
(
Index
outer
,
Index
inner
)
const
{
return
Base
::
template
packet
<
LoadMode
,
PacketType
>(
IsRowMajor
?
outer
:
inner
,
IsRowMajor
?
inner
:
outer
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegmentByOuterInner
(
Index
outer
,
Index
inner
,
Index
begin
,
Index
count
)
const
{
return
Base
::
template
packetSegment
<
LoadMode
,
PacketType
>(
IsRowMajor
?
outer
:
inner
,
IsRowMajor
?
inner
:
outer
,
begin
,
count
);
}
};
}
// end namespace internal
/***************************************************************************
* Part 4 : public API
***************************************************************************/
/** \returns the result of a full redux operation on the whole matrix or vector using \a func
*
* The template parameter \a BinaryOp is the type of the functor \a func which must be
* an associative operator. Both current C++98 and C++11 functor styles are handled.
*
* \warning the matrix must be not empty, otherwise an assertion is triggered.
*
* \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
*/
template
<
typename
Derived
>
template
<
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
DenseBase
<
Derived
>::
redux
(
const
Func
&
func
)
const
{
eigen_assert
(
this
->
rows
()
>
0
&&
this
->
cols
()
>
0
&&
"you are using an empty matrix"
);
typedef
typename
internal
::
redux_evaluator
<
Derived
>
ThisEvaluator
;
ThisEvaluator
thisEval
(
derived
());
// The initial expression is passed to the reducer as an additional argument instead of
// passing it as a member of redux_evaluator to help
return
internal
::
redux_impl
<
Func
,
ThisEvaluator
>::
run
(
thisEval
,
func
,
derived
());
}
/** \returns the minimum of all coefficients of \c *this.
* In case \c *this contains NaN, NaNPropagation determines the behavior:
* NaNPropagation == PropagateFast : undefined
* NaNPropagation == PropagateNaN : result is NaN
* NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN
* \warning the matrix must be not empty, otherwise an assertion is triggered.
*/
template
<
typename
Derived
>
template
<
int
NaNPropagation
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
DenseBase
<
Derived
>::
minCoeff
()
const
{
return
derived
().
redux
(
Eigen
::
internal
::
scalar_min_op
<
Scalar
,
Scalar
,
NaNPropagation
>
());
}
/** \returns the maximum of all coefficients of \c *this.
* In case \c *this contains NaN, NaNPropagation determines the behavior:
* NaNPropagation == PropagateFast : undefined
* NaNPropagation == PropagateNaN : result is NaN
* NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
* \warning the matrix must be not empty, otherwise an assertion is triggered.
*/
template
<
typename
Derived
>
template
<
int
NaNPropagation
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
DenseBase
<
Derived
>::
maxCoeff
()
const
{
return
derived
().
redux
(
Eigen
::
internal
::
scalar_max_op
<
Scalar
,
Scalar
,
NaNPropagation
>
());
}
/** \returns the sum of all coefficients of \c *this
*
* If \c *this is empty, then the value 0 is returned.
*
* \sa trace(), prod(), mean()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
DenseBase
<
Derived
>::
sum
()
const
{
if
(
SizeAtCompileTime
==
0
||
(
SizeAtCompileTime
==
Dynamic
&&
size
()
==
0
))
return
Scalar
(
0
);
return
derived
().
redux
(
Eigen
::
internal
::
scalar_sum_op
<
Scalar
,
Scalar
>
());
}
/** \returns the mean of all coefficients of *this
*
* \sa trace(), prod(), sum()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
DenseBase
<
Derived
>::
mean
()
const
{
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning(disable : 2259)
#endif
return
Scalar
(
derived
().
redux
(
Eigen
::
internal
::
scalar_sum_op
<
Scalar
,
Scalar
>
()))
/
Scalar
(
this
->
size
());
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
}
/** \returns the product of all coefficients of *this
*
* Example: \include MatrixBase_prod.cpp
* Output: \verbinclude MatrixBase_prod.out
*
* \sa sum(), mean(), trace()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
DenseBase
<
Derived
>::
prod
()
const
{
if
(
SizeAtCompileTime
==
0
||
(
SizeAtCompileTime
==
Dynamic
&&
size
()
==
0
))
return
Scalar
(
1
);
return
derived
().
redux
(
Eigen
::
internal
::
scalar_product_op
<
Scalar
>
());
}
/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
*
* \c *this can be any matrix, not necessarily square.
*
* \sa diagonal(), sum()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
internal
::
traits
<
Derived
>::
Scalar
MatrixBase
<
Derived
>::
trace
()
const
{
return
derived
().
diagonal
().
sum
();
}
}
// end namespace Eigen
#endif // EIGEN_REDUX_H
eigen-master/Eigen/src/Core/Ref.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REF_H
#define EIGEN_REF_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
PlainObjectType_
,
int
Options_
,
typename
StrideType_
>
struct
traits
<
Ref
<
PlainObjectType_
,
Options_
,
StrideType_
>
>
:
public
traits
<
Map
<
PlainObjectType_
,
Options_
,
StrideType_
>
>
{
typedef
PlainObjectType_
PlainObjectType
;
typedef
StrideType_
StrideType
;
enum
{
Options
=
Options_
,
Flags
=
traits
<
Map
<
PlainObjectType_
,
Options_
,
StrideType_
>
>::
Flags
|
NestByRefBit
,
Alignment
=
traits
<
Map
<
PlainObjectType_
,
Options_
,
StrideType_
>
>::
Alignment
,
InnerStrideAtCompileTime
=
traits
<
Map
<
PlainObjectType_
,
Options_
,
StrideType_
>
>::
InnerStrideAtCompileTime
,
OuterStrideAtCompileTime
=
traits
<
Map
<
PlainObjectType_
,
Options_
,
StrideType_
>
>::
OuterStrideAtCompileTime
};
template
<
typename
Derived
>
struct
match
{
enum
{
IsVectorAtCompileTime
=
PlainObjectType
::
IsVectorAtCompileTime
||
Derived
::
IsVectorAtCompileTime
,
HasDirectAccess
=
internal
::
has_direct_access
<
Derived
>::
ret
,
StorageOrderMatch
=
IsVectorAtCompileTime
||
((
PlainObjectType
::
Flags
&
RowMajorBit
)
==
(
Derived
::
Flags
&
RowMajorBit
)),
InnerStrideMatch
=
int
(
InnerStrideAtCompileTime
)
==
int
(
Dynamic
)
||
int
(
InnerStrideAtCompileTime
)
==
int
(
Derived
::
InnerStrideAtCompileTime
)
||
(
int
(
InnerStrideAtCompileTime
)
==
0
&&
int
(
Derived
::
InnerStrideAtCompileTime
)
==
1
),
OuterStrideMatch
=
IsVectorAtCompileTime
||
int
(
OuterStrideAtCompileTime
)
==
int
(
Dynamic
)
||
int
(
OuterStrideAtCompileTime
)
==
int
(
Derived
::
OuterStrideAtCompileTime
),
// NOTE, this indirection of evaluator<Derived>::Alignment is needed
// to workaround a very strange bug in MSVC related to the instantiation
// of has_*ary_operator in evaluator<CwiseNullaryOp>.
// This line is surprisingly very sensitive. For instance, simply adding parenthesis
// as "DerivedAlignment = (int(evaluator<Derived>::Alignment))," will make MSVC fail...
DerivedAlignment
=
int
(
evaluator
<
Derived
>::
Alignment
),
AlignmentMatch
=
(
int
(
traits
<
PlainObjectType
>::
Alignment
)
==
int
(
Unaligned
))
||
(
DerivedAlignment
>=
int
(
Alignment
)),
// FIXME the first condition is not very clear, it should
// be replaced by the required alignment
ScalarTypeMatch
=
internal
::
is_same
<
typename
PlainObjectType
::
Scalar
,
typename
Derived
::
Scalar
>::
value
,
MatchAtCompileTime
=
HasDirectAccess
&&
StorageOrderMatch
&&
InnerStrideMatch
&&
OuterStrideMatch
&&
AlignmentMatch
&&
ScalarTypeMatch
};
typedef
std
::
conditional_t
<
MatchAtCompileTime
,
internal
::
true_type
,
internal
::
false_type
>
type
;
};
};
template
<
typename
Derived
>
struct
traits
<
RefBase
<
Derived
>
>
:
public
traits
<
Derived
>
{};
}
// namespace internal
template
<
typename
Derived
>
class
RefBase
:
public
MapBase
<
Derived
>
{
typedef
typename
internal
::
traits
<
Derived
>::
PlainObjectType
PlainObjectType
;
typedef
typename
internal
::
traits
<
Derived
>::
StrideType
StrideType
;
public:
typedef
MapBase
<
Derived
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
RefBase
)
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
{
return
StrideType
::
InnerStrideAtCompileTime
!=
0
?
m_stride
.
inner
()
:
1
;
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
{
return
StrideType
::
OuterStrideAtCompileTime
!=
0
?
m_stride
.
outer
()
:
IsVectorAtCompileTime
?
this
->
size
()
:
int
(
Flags
)
&
RowMajorBit
?
this
->
cols
()
:
this
->
rows
();
}
EIGEN_DEVICE_FUNC
RefBase
()
:
Base
(
0
,
RowsAtCompileTime
==
Dynamic
?
0
:
RowsAtCompileTime
,
ColsAtCompileTime
==
Dynamic
?
0
:
ColsAtCompileTime
),
// Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
m_stride
(
StrideType
::
OuterStrideAtCompileTime
==
Dynamic
?
0
:
StrideType
::
OuterStrideAtCompileTime
,
StrideType
::
InnerStrideAtCompileTime
==
Dynamic
?
0
:
StrideType
::
InnerStrideAtCompileTime
)
{}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
RefBase
)
protected:
typedef
Stride
<
StrideType
::
OuterStrideAtCompileTime
,
StrideType
::
InnerStrideAtCompileTime
>
StrideBase
;
// Resolves inner stride if default 0.
static
EIGEN_DEVICE_FUNC
constexpr
Index
resolveInnerStride
(
Index
inner
)
{
return
inner
==
0
?
1
:
inner
;
}
// Resolves outer stride if default 0.
static
EIGEN_DEVICE_FUNC
constexpr
Index
resolveOuterStride
(
Index
inner
,
Index
outer
,
Index
rows
,
Index
cols
,
bool
isVectorAtCompileTime
,
bool
isRowMajor
)
{
return
outer
==
0
?
isVectorAtCompileTime
?
inner
*
rows
*
cols
:
isRowMajor
?
inner
*
cols
:
inner
*
rows
:
outer
;
}
// Returns true if construction is valid, false if there is a stride mismatch,
// and fails if there is a size mismatch.
template
<
typename
Expression
>
EIGEN_DEVICE_FUNC
bool
construct
(
Expression
&
expr
)
{
// Check matrix sizes. If this is a compile-time vector, we do allow
// implicitly transposing.
EIGEN_STATIC_ASSERT
(
EIGEN_PREDICATE_SAME_MATRIX_SIZE
(
PlainObjectType
,
Expression
)
// If it is a vector, the transpose sizes might match.
||
(
PlainObjectType
::
IsVectorAtCompileTime
&&
((
int
(
PlainObjectType
::
RowsAtCompileTime
)
==
Eigen
::
Dynamic
||
int
(
Expression
::
ColsAtCompileTime
)
==
Eigen
::
Dynamic
||
int
(
PlainObjectType
::
RowsAtCompileTime
)
==
int
(
Expression
::
ColsAtCompileTime
))
&&
(
int
(
PlainObjectType
::
ColsAtCompileTime
)
==
Eigen
::
Dynamic
||
int
(
Expression
::
RowsAtCompileTime
)
==
Eigen
::
Dynamic
||
int
(
PlainObjectType
::
ColsAtCompileTime
)
==
int
(
Expression
::
RowsAtCompileTime
)))),
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES
)
// Determine runtime rows and columns.
Index
rows
=
expr
.
rows
();
Index
cols
=
expr
.
cols
();
if
(
PlainObjectType
::
RowsAtCompileTime
==
1
)
{
eigen_assert
(
expr
.
rows
()
==
1
||
expr
.
cols
()
==
1
);
rows
=
1
;
cols
=
expr
.
size
();
}
else
if
(
PlainObjectType
::
ColsAtCompileTime
==
1
)
{
eigen_assert
(
expr
.
rows
()
==
1
||
expr
.
cols
()
==
1
);
rows
=
expr
.
size
();
cols
=
1
;
}
// Verify that the sizes are valid.
eigen_assert
((
PlainObjectType
::
RowsAtCompileTime
==
Dynamic
)
||
(
PlainObjectType
::
RowsAtCompileTime
==
rows
));
eigen_assert
((
PlainObjectType
::
ColsAtCompileTime
==
Dynamic
)
||
(
PlainObjectType
::
ColsAtCompileTime
==
cols
));
// If this is a vector, we might be transposing, which means that stride should swap.
const
bool
transpose
=
PlainObjectType
::
IsVectorAtCompileTime
&&
(
rows
!=
expr
.
rows
());
// If the storage format differs, we also need to swap the stride.
const
bool
row_major
=
((
PlainObjectType
::
Flags
)
&
RowMajorBit
)
!=
0
;
const
bool
expr_row_major
=
(
Expression
::
Flags
&
RowMajorBit
)
!=
0
;
const
bool
storage_differs
=
(
row_major
!=
expr_row_major
);
const
bool
swap_stride
=
(
transpose
!=
storage_differs
);
// Determine expr's actual strides, resolving any defaults if zero.
const
Index
expr_inner_actual
=
resolveInnerStride
(
expr
.
innerStride
());
const
Index
expr_outer_actual
=
resolveOuterStride
(
expr_inner_actual
,
expr
.
outerStride
(),
expr
.
rows
(),
expr
.
cols
(),
Expression
::
IsVectorAtCompileTime
!=
0
,
expr_row_major
);
// If this is a column-major row vector or row-major column vector, the inner-stride
// is arbitrary, so set it to either the compile-time inner stride or 1.
const
bool
row_vector
=
(
rows
==
1
);
const
bool
col_vector
=
(
cols
==
1
);
const
Index
inner_stride
=
((
!
row_major
&&
row_vector
)
||
(
row_major
&&
col_vector
))
?
(
StrideType
::
InnerStrideAtCompileTime
>
0
?
Index
(
StrideType
::
InnerStrideAtCompileTime
)
:
1
)
:
swap_stride
?
expr_outer_actual
:
expr_inner_actual
;
// If this is a column-major column vector or row-major row vector, the outer-stride
// is arbitrary, so set it to either the compile-time outer stride or vector size.
const
Index
outer_stride
=
((
!
row_major
&&
col_vector
)
||
(
row_major
&&
row_vector
))
?
(
StrideType
::
OuterStrideAtCompileTime
>
0
?
Index
(
StrideType
::
OuterStrideAtCompileTime
)
:
rows
*
cols
*
inner_stride
)
:
swap_stride
?
expr_inner_actual
:
expr_outer_actual
;
// Check if given inner/outer strides are compatible with compile-time strides.
const
bool
inner_valid
=
(
StrideType
::
InnerStrideAtCompileTime
==
Dynamic
)
||
(
resolveInnerStride
(
Index
(
StrideType
::
InnerStrideAtCompileTime
))
==
inner_stride
);
if
(
!
inner_valid
)
{
return
false
;
}
const
bool
outer_valid
=
(
StrideType
::
OuterStrideAtCompileTime
==
Dynamic
)
||
(
resolveOuterStride
(
inner_stride
,
Index
(
StrideType
::
OuterStrideAtCompileTime
),
rows
,
cols
,
PlainObjectType
::
IsVectorAtCompileTime
!=
0
,
row_major
)
==
outer_stride
);
if
(
!
outer_valid
)
{
return
false
;
}
internal
::
construct_at
<
Base
>
(
this
,
expr
.
data
(),
rows
,
cols
);
internal
::
construct_at
(
&
m_stride
,
(
StrideType
::
OuterStrideAtCompileTime
==
0
)
?
0
:
outer_stride
,
(
StrideType
::
InnerStrideAtCompileTime
==
0
)
?
0
:
inner_stride
);
return
true
;
}
StrideBase
m_stride
;
};
/** \class Ref
* \ingroup Core_Module
*
* \brief A matrix or vector expression mapping an existing expression
*
* \tparam PlainObjectType the equivalent matrix type of the mapped data
* \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32,
* \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies
* strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), but accepts a
* variable outer stride (leading dimension). This can be overridden by specifying strides. The type passed here must be
* a specialization of the Stride template, see examples below.
*
* This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the
* number of copies. A Ref<> object can represent either a const expression or a l-value: \code
* // in-out argument:
* void foo1(Ref<VectorXf> x);
*
* // read-only const argument:
* void foo2(const Ref<const VectorXf>& x);
* \endcode
*
* In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation
* issue will be triggered. By default, a Ref<VectorXf> can reference any dense vector expression of float having a
* contiguous memory layout. Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float
* whose column's elements are contiguously stored with the possibility to have a constant space in-between each column,
* i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) can be greater than the number
* of rows.
*
* In the const case, if the input expression does not match the above requirement, then it is evaluated into a
* temporary before being passed to the function. Here are some examples: \code MatrixXf A; VectorXf a; foo1(a.head());
* // OK foo1(A.col()); // OK foo1(A.row()); // Compilation error because here innerstride!=1
* foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object
* foo2(A.row().transpose()); // The row is copied into a contiguous temporary
* foo2(2*a); // The expression is evaluated into a temporary
* foo2(A.col().segment(2,4)); // No temporary
* \endcode
*
* The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters.
* Here is an example accepting an innerstride!=1:
* \code
* // in-out argument:
* void foo3(Ref<VectorXf,0,InnerStride<> > x);
* foo3(A.row()); // OK
* \endcode
* The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to
* exploit vectorization, and will involve more expensive address computations even if the input is contiguously stored
* in memory. To overcome this issue, one might propose to overload internally calling a template function, e.g.: \code
* // in the .h:
* void foo(const Ref<MatrixXf>& A);
* void foo(const Ref<MatrixXf,0,Stride<> >& A);
*
* // in the .cpp:
* template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
* ... // crazy code goes here
* }
* void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
* void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
* \endcode
*
* See also the following stackoverflow questions for further references:
* - <a href="http://stackoverflow.com/questions/21132538/correct-usage-of-the-eigenref-class">Correct usage of the
* Eigen::Ref<> class</a>
*
* \sa PlainObjectBase::Map(), \ref TopicStorageOrders
*/
template
<
typename
PlainObjectType
,
int
Options
,
typename
StrideType
>
class
Ref
:
public
RefBase
<
Ref
<
PlainObjectType
,
Options
,
StrideType
>
>
{
private:
typedef
internal
::
traits
<
Ref
>
Traits
;
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
Ref
(
const
PlainObjectBase
<
Derived
>&
expr
,
std
::
enable_if_t
<
bool
(
Traits
::
template
match
<
Derived
>
::
MatchAtCompileTime
),
Derived
>*
=
0
);
public:
typedef
RefBase
<
Ref
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Ref
)
#ifndef EIGEN_PARSED_BY_DOXYGEN
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
Ref
(
PlainObjectBase
<
Derived
>&
expr
,
std
::
enable_if_t
<
bool
(
Traits
::
template
match
<
Derived
>
::
MatchAtCompileTime
),
Derived
>*
=
0
)
{
EIGEN_STATIC_ASSERT
(
bool
(
Traits
::
template
match
<
Derived
>
::
MatchAtCompileTime
),
STORAGE_LAYOUT_DOES_NOT_MATCH
);
// Construction must pass since we will not create temporary storage in the non-const case.
const
bool
success
=
Base
::
construct
(
expr
.
derived
());
EIGEN_UNUSED_VARIABLE
(
success
)
eigen_assert
(
success
);
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
Ref
(
const
DenseBase
<
Derived
>&
expr
,
std
::
enable_if_t
<
bool
(
Traits
::
template
match
<
Derived
>
::
MatchAtCompileTime
),
Derived
>*
=
0
)
#else
/** Implicit constructor from any dense expression */
template
<
typename
Derived
>
inline
Ref
(
DenseBase
<
Derived
>&
expr
)
#endif
{
EIGEN_STATIC_ASSERT
(
bool
(
internal
::
is_lvalue
<
Derived
>::
value
),
THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY
);
EIGEN_STATIC_ASSERT
(
bool
(
Traits
::
template
match
<
Derived
>
::
MatchAtCompileTime
),
STORAGE_LAYOUT_DOES_NOT_MATCH
);
EIGEN_STATIC_ASSERT
(
!
Derived
::
IsPlainObjectBase
,
THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY
);
// Construction must pass since we will not create temporary storage in the non-const case.
const
bool
success
=
Base
::
construct
(
expr
.
const_cast_derived
());
EIGEN_UNUSED_VARIABLE
(
success
)
eigen_assert
(
success
);
}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
Ref
)
};
// this is the const ref version
template
<
typename
TPlainObjectType
,
int
Options
,
typename
StrideType
>
class
Ref
<
const
TPlainObjectType
,
Options
,
StrideType
>
:
public
RefBase
<
Ref
<
const
TPlainObjectType
,
Options
,
StrideType
>
>
{
typedef
internal
::
traits
<
Ref
>
Traits
;
static
constexpr
bool
may_map_m_object_successfully
=
(
static_cast
<
int
>
(
StrideType
::
InnerStrideAtCompileTime
)
==
0
||
static_cast
<
int
>
(
StrideType
::
InnerStrideAtCompileTime
)
==
1
||
static_cast
<
int
>
(
StrideType
::
InnerStrideAtCompileTime
)
==
Dynamic
)
&&
(
TPlainObjectType
::
IsVectorAtCompileTime
||
static_cast
<
int
>
(
StrideType
::
OuterStrideAtCompileTime
)
==
0
||
static_cast
<
int
>
(
StrideType
::
OuterStrideAtCompileTime
)
==
Dynamic
||
static_cast
<
int
>
(
StrideType
::
OuterStrideAtCompileTime
)
==
static_cast
<
int
>
(
TPlainObjectType
::
InnerSizeAtCompileTime
)
||
static_cast
<
int
>
(
TPlainObjectType
::
InnerSizeAtCompileTime
)
==
Dynamic
);
public:
typedef
RefBase
<
Ref
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Ref
)
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
Ref
(
const
DenseBase
<
Derived
>&
expr
,
std
::
enable_if_t
<
bool
(
Traits
::
template
match
<
Derived
>
::
ScalarTypeMatch
),
Derived
>*
=
0
)
{
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << ","
// << match_helper<Derived>::InnerStrideMatch << "\n"; std::cout << int(StrideType::OuterStrideAtCompileTime)
// << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; std::cout <<
// int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
EIGEN_STATIC_ASSERT
(
Traits
::
template
match
<
Derived
>
::
type
::
value
||
may_map_m_object_successfully
,
STORAGE_LAYOUT_DOES_NOT_MATCH
);
construct
(
expr
.
derived
(),
typename
Traits
::
template
match
<
Derived
>
::
type
());
}
EIGEN_DEVICE_FUNC
inline
Ref
(
const
Ref
&
other
)
:
Base
(
other
)
{
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
EIGEN_DEVICE_FUNC
inline
Ref
(
Ref
&&
other
)
{
if
(
other
.
data
()
==
other
.
m_object
.
data
())
{
m_object
=
std
::
move
(
other
.
m_object
);
Base
::
construct
(
m_object
);
}
else
Base
::
construct
(
other
);
}
template
<
typename
OtherRef
>
EIGEN_DEVICE_FUNC
inline
Ref
(
const
RefBase
<
OtherRef
>&
other
)
{
EIGEN_STATIC_ASSERT
(
Traits
::
template
match
<
OtherRef
>
::
type
::
value
||
may_map_m_object_successfully
,
STORAGE_LAYOUT_DOES_NOT_MATCH
);
construct
(
other
.
derived
(),
typename
Traits
::
template
match
<
OtherRef
>
::
type
());
}
protected:
template
<
typename
Expression
>
EIGEN_DEVICE_FUNC
void
construct
(
const
Expression
&
expr
,
internal
::
true_type
)
{
// Check if we can use the underlying expr's storage directly, otherwise call the copy version.
if
(
!
Base
::
construct
(
expr
))
{
construct
(
expr
,
internal
::
false_type
());
}
}
template
<
typename
Expression
>
EIGEN_DEVICE_FUNC
void
construct
(
const
Expression
&
expr
,
internal
::
false_type
)
{
internal
::
call_assignment_no_alias
(
m_object
,
expr
,
internal
::
assign_op
<
Scalar
,
Scalar
>
());
const
bool
success
=
Base
::
construct
(
m_object
);
EIGEN_ONLY_USED_FOR_DEBUG
(
success
)
eigen_assert
(
success
);
}
protected:
TPlainObjectType
m_object
;
};
}
// end namespace Eigen
#endif // EIGEN_REF_H
eigen-master/Eigen/src/Core/Replicate.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REPLICATE_H
#define EIGEN_REPLICATE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
MatrixType
,
int
RowFactor
,
int
ColFactor
>
struct
traits
<
Replicate
<
MatrixType
,
RowFactor
,
ColFactor
>
>
:
traits
<
MatrixType
>
{
typedef
typename
MatrixType
::
Scalar
Scalar
;
typedef
typename
traits
<
MatrixType
>::
StorageKind
StorageKind
;
typedef
typename
traits
<
MatrixType
>::
XprKind
XprKind
;
typedef
typename
ref_selector
<
MatrixType
>::
type
MatrixTypeNested
;
typedef
std
::
remove_reference_t
<
MatrixTypeNested
>
MatrixTypeNested_
;
enum
{
RowsAtCompileTime
=
RowFactor
==
Dynamic
||
int
(
MatrixType
::
RowsAtCompileTime
)
==
Dynamic
?
Dynamic
:
RowFactor
*
MatrixType
::
RowsAtCompileTime
,
ColsAtCompileTime
=
ColFactor
==
Dynamic
||
int
(
MatrixType
::
ColsAtCompileTime
)
==
Dynamic
?
Dynamic
:
ColFactor
*
MatrixType
::
ColsAtCompileTime
,
// FIXME we don't propagate the max sizes !!!
MaxRowsAtCompileTime
=
RowsAtCompileTime
,
MaxColsAtCompileTime
=
ColsAtCompileTime
,
IsRowMajor
=
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
?
1
:
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
?
0
:
(
MatrixType
::
Flags
&
RowMajorBit
)
?
1
:
0
,
// FIXME enable DirectAccess with negative strides?
Flags
=
IsRowMajor
?
RowMajorBit
:
0
};
};
}
// namespace internal
/**
* \class Replicate
* \ingroup Core_Module
*
* \brief Expression of the multiple replication of a matrix or vector
*
* \tparam MatrixType the type of the object we are replicating
* \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic.
* \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic.
*
* This class represents an expression of the multiple replication of a matrix or vector.
* It is the return type of DenseBase::replicate() and most of the time
* this is the only way it is used.
*
* \sa DenseBase::replicate()
*/
template
<
typename
MatrixType
,
int
RowFactor
,
int
ColFactor
>
class
Replicate
:
public
internal
::
dense_xpr_base
<
Replicate
<
MatrixType
,
RowFactor
,
ColFactor
>
>::
type
{
typedef
typename
internal
::
traits
<
Replicate
>::
MatrixTypeNested
MatrixTypeNested
;
typedef
typename
internal
::
traits
<
Replicate
>::
MatrixTypeNested_
MatrixTypeNested_
;
public:
typedef
typename
internal
::
dense_xpr_base
<
Replicate
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Replicate
)
typedef
internal
::
remove_all_t
<
MatrixType
>
NestedExpression
;
template
<
typename
OriginalMatrixType
>
EIGEN_DEVICE_FUNC
inline
explicit
Replicate
(
const
OriginalMatrixType
&
matrix
)
:
m_matrix
(
matrix
),
m_rowFactor
(
RowFactor
),
m_colFactor
(
ColFactor
)
{
EIGEN_STATIC_ASSERT
((
internal
::
is_same
<
std
::
remove_const_t
<
MatrixType
>
,
OriginalMatrixType
>::
value
),
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE
)
eigen_assert
(
RowFactor
!=
Dynamic
&&
ColFactor
!=
Dynamic
);
}
template
<
typename
OriginalMatrixType
>
EIGEN_DEVICE_FUNC
inline
Replicate
(
const
OriginalMatrixType
&
matrix
,
Index
rowFactor
,
Index
colFactor
)
:
m_matrix
(
matrix
),
m_rowFactor
(
rowFactor
),
m_colFactor
(
colFactor
)
{
EIGEN_STATIC_ASSERT
((
internal
::
is_same
<
std
::
remove_const_t
<
MatrixType
>
,
OriginalMatrixType
>::
value
),
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE
)
}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
{
return
m_matrix
.
rows
()
*
m_rowFactor
.
value
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
{
return
m_matrix
.
cols
()
*
m_colFactor
.
value
();
}
EIGEN_DEVICE_FUNC
const
MatrixTypeNested_
&
nestedExpression
()
const
{
return
m_matrix
;
}
protected:
MatrixTypeNested
m_matrix
;
const
internal
::
variable_if_dynamic
<
Index
,
RowFactor
>
m_rowFactor
;
const
internal
::
variable_if_dynamic
<
Index
,
ColFactor
>
m_colFactor
;
};
/**
* \return an expression of the replication of \c *this
*
* Example: \include MatrixBase_replicate.cpp
* Output: \verbinclude MatrixBase_replicate.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
*/
template
<
typename
Derived
>
template
<
int
RowFactor
,
int
ColFactor
>
EIGEN_DEVICE_FUNC
const
Replicate
<
Derived
,
RowFactor
,
ColFactor
>
DenseBase
<
Derived
>::
replicate
()
const
{
return
Replicate
<
Derived
,
RowFactor
,
ColFactor
>
(
derived
());
}
/**
* \return an expression of the replication of each column (or row) of \c *this
*
* Example: \include DirectionWise_replicate_int.cpp
* Output: \verbinclude DirectionWise_replicate_int.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
*/
template
<
typename
ExpressionType
,
int
Direction
>
EIGEN_DEVICE_FUNC
const
typename
VectorwiseOp
<
ExpressionType
,
Direction
>::
ReplicateReturnType
VectorwiseOp
<
ExpressionType
,
Direction
>::
replicate
(
Index
factor
)
const
{
return
typename
VectorwiseOp
<
ExpressionType
,
Direction
>::
ReplicateReturnType
(
_expression
(),
Direction
==
Vertical
?
factor
:
1
,
Direction
==
Horizontal
?
factor
:
1
);
}
}
// end namespace Eigen
#endif // EIGEN_REPLICATE_H
eigen-master/Eigen/src/Core/Reshaped.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2014 yoco <peter.xiau@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_RESHAPED_H
#define EIGEN_RESHAPED_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class Reshaped
* \ingroup Core_Module
*
* \brief Expression of a fixed-size or dynamic-size reshape
*
* \tparam XprType the type of the expression in which we are taking a reshape
* \tparam Rows the number of rows of the reshape we are taking at compile time (optional)
* \tparam Cols the number of columns of the reshape we are taking at compile time (optional)
* \tparam Order can be ColMajor or RowMajor, default is ColMajor.
*
* This class represents an expression of either a fixed-size or dynamic-size reshape.
* It is the return type of DenseBase::reshaped(NRowsType,NColsType) and
* most of the time this is the only way it is used.
*
* If you want to directly manipulate reshaped expressions,
* for instance if you want to write a function returning such an expression,
* it is advised to use the \em auto keyword for such use cases.
*
* Here is an example illustrating the dynamic case:
* \include class_Reshaped.cpp
* Output: \verbinclude class_Reshaped.out
*
* Here is an example illustrating the fixed-size case:
* \include class_FixedReshaped.cpp
* Output: \verbinclude class_FixedReshaped.out
*
* \sa DenseBase::reshaped(NRowsType,NColsType)
*/
namespace
internal
{
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
>
struct
traits
<
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
>
:
traits
<
XprType
>
{
typedef
typename
traits
<
XprType
>::
Scalar
Scalar
;
typedef
typename
traits
<
XprType
>::
StorageKind
StorageKind
;
typedef
typename
traits
<
XprType
>::
XprKind
XprKind
;
enum
{
MatrixRows
=
traits
<
XprType
>::
RowsAtCompileTime
,
MatrixCols
=
traits
<
XprType
>::
ColsAtCompileTime
,
RowsAtCompileTime
=
Rows
,
ColsAtCompileTime
=
Cols
,
MaxRowsAtCompileTime
=
Rows
,
MaxColsAtCompileTime
=
Cols
,
XpxStorageOrder
=
((
int
(
traits
<
XprType
>::
Flags
)
&
RowMajorBit
)
==
RowMajorBit
)
?
RowMajor
:
ColMajor
,
ReshapedStorageOrder
=
(
RowsAtCompileTime
==
1
&&
ColsAtCompileTime
!=
1
)
?
RowMajor
:
(
ColsAtCompileTime
==
1
&&
RowsAtCompileTime
!=
1
)
?
ColMajor
:
XpxStorageOrder
,
HasSameStorageOrderAsXprType
=
(
ReshapedStorageOrder
==
XpxStorageOrder
),
InnerSize
=
(
ReshapedStorageOrder
==
int
(
RowMajor
))
?
int
(
ColsAtCompileTime
)
:
int
(
RowsAtCompileTime
),
InnerStrideAtCompileTime
=
HasSameStorageOrderAsXprType
?
int
(
inner_stride_at_compile_time
<
XprType
>::
ret
)
:
Dynamic
,
OuterStrideAtCompileTime
=
Dynamic
,
HasDirectAccess
=
internal
::
has_direct_access
<
XprType
>::
ret
&&
(
Order
==
int
(
XpxStorageOrder
))
&&
((
evaluator
<
XprType
>::
Flags
&
LinearAccessBit
)
==
LinearAccessBit
),
MaskPacketAccessBit
=
(
InnerSize
==
Dynamic
||
(
InnerSize
%
packet_traits
<
Scalar
>::
size
)
==
0
)
&&
(
InnerStrideAtCompileTime
==
1
)
?
PacketAccessBit
:
0
,
// MaskAlignedBit = ((OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16)
// == 0)) ? AlignedBit : 0,
FlagsLinearAccessBit
=
(
RowsAtCompileTime
==
1
||
ColsAtCompileTime
==
1
)
?
LinearAccessBit
:
0
,
FlagsLvalueBit
=
is_lvalue
<
XprType
>::
value
?
LvalueBit
:
0
,
FlagsRowMajorBit
=
(
ReshapedStorageOrder
==
int
(
RowMajor
))
?
RowMajorBit
:
0
,
FlagsDirectAccessBit
=
HasDirectAccess
?
DirectAccessBit
:
0
,
Flags0
=
traits
<
XprType
>::
Flags
&
((
HereditaryBits
&
~
RowMajorBit
)
|
MaskPacketAccessBit
),
Flags
=
(
Flags0
|
FlagsLinearAccessBit
|
FlagsLvalueBit
|
FlagsRowMajorBit
|
FlagsDirectAccessBit
)
};
};
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
,
bool
HasDirectAccess
>
class
ReshapedImpl_dense
;
}
// end namespace internal
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
,
typename
StorageKind
>
class
ReshapedImpl
;
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
>
class
Reshaped
:
public
ReshapedImpl
<
XprType
,
Rows
,
Cols
,
Order
,
typename
internal
::
traits
<
XprType
>::
StorageKind
>
{
typedef
ReshapedImpl
<
XprType
,
Rows
,
Cols
,
Order
,
typename
internal
::
traits
<
XprType
>::
StorageKind
>
Impl
;
public:
// typedef typename Impl::Base Base;
typedef
Impl
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
Reshaped
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
Reshaped
)
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline
Reshaped
(
XprType
&
xpr
)
:
Impl
(
xpr
)
{
EIGEN_STATIC_ASSERT
(
RowsAtCompileTime
!=
Dynamic
&&
ColsAtCompileTime
!=
Dynamic
,
THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE
)
eigen_assert
(
Rows
*
Cols
==
xpr
.
rows
()
*
xpr
.
cols
());
}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline
Reshaped
(
XprType
&
xpr
,
Index
reshapeRows
,
Index
reshapeCols
)
:
Impl
(
xpr
,
reshapeRows
,
reshapeCols
)
{
eigen_assert
((
RowsAtCompileTime
==
Dynamic
||
RowsAtCompileTime
==
reshapeRows
)
&&
(
ColsAtCompileTime
==
Dynamic
||
ColsAtCompileTime
==
reshapeCols
));
eigen_assert
(
reshapeRows
*
reshapeCols
==
xpr
.
rows
()
*
xpr
.
cols
());
}
};
// The generic default implementation for dense reshape simply forward to the internal::ReshapedImpl_dense
// that must be specialized for direct and non-direct access...
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
>
class
ReshapedImpl
<
XprType
,
Rows
,
Cols
,
Order
,
Dense
>
:
public
internal
::
ReshapedImpl_dense
<
XprType
,
Rows
,
Cols
,
Order
,
internal
::
traits
<
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
>::
HasDirectAccess
>
{
typedef
internal
::
ReshapedImpl_dense
<
XprType
,
Rows
,
Cols
,
Order
,
internal
::
traits
<
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
>::
HasDirectAccess
>
Impl
;
public:
typedef
Impl
Base
;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
ReshapedImpl
)
EIGEN_DEVICE_FUNC
inline
ReshapedImpl
(
XprType
&
xpr
)
:
Impl
(
xpr
)
{}
EIGEN_DEVICE_FUNC
inline
ReshapedImpl
(
XprType
&
xpr
,
Index
reshapeRows
,
Index
reshapeCols
)
:
Impl
(
xpr
,
reshapeRows
,
reshapeCols
)
{}
};
namespace
internal
{
/** \internal Internal implementation of dense Reshaped in the general case. */
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
>
class
ReshapedImpl_dense
<
XprType
,
Rows
,
Cols
,
Order
,
false
>
:
public
internal
::
dense_xpr_base
<
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
>::
type
{
typedef
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
ReshapedType
;
public:
typedef
typename
internal
::
dense_xpr_base
<
ReshapedType
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
ReshapedType
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
ReshapedImpl_dense
)
typedef
typename
internal
::
ref_selector
<
XprType
>::
non_const_type
MatrixTypeNested
;
typedef
internal
::
remove_all_t
<
XprType
>
NestedExpression
;
class
InnerIterator
;
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline
ReshapedImpl_dense
(
XprType
&
xpr
)
:
m_xpr
(
xpr
),
m_rows
(
Rows
),
m_cols
(
Cols
)
{}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline
ReshapedImpl_dense
(
XprType
&
xpr
,
Index
nRows
,
Index
nCols
)
:
m_xpr
(
xpr
),
m_rows
(
nRows
),
m_cols
(
nCols
)
{}
EIGEN_DEVICE_FUNC
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
Index
cols
()
const
{
return
m_cols
;
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** \sa MapBase::data() */
EIGEN_DEVICE_FUNC
constexpr
const
Scalar
*
data
()
const
;
EIGEN_DEVICE_FUNC
inline
Index
innerStride
()
const
;
EIGEN_DEVICE_FUNC
inline
Index
outerStride
()
const
;
#endif
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
const
internal
::
remove_all_t
<
XprType
>&
nestedExpression
()
const
{
return
m_xpr
;
}
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
std
::
remove_reference_t
<
XprType
>&
nestedExpression
()
{
return
m_xpr
;
}
protected:
MatrixTypeNested
m_xpr
;
const
internal
::
variable_if_dynamic
<
Index
,
Rows
>
m_rows
;
const
internal
::
variable_if_dynamic
<
Index
,
Cols
>
m_cols
;
};
/** \internal Internal implementation of dense Reshaped in the direct access case. */
template
<
typename
XprType
,
int
Rows
,
int
Cols
,
int
Order
>
class
ReshapedImpl_dense
<
XprType
,
Rows
,
Cols
,
Order
,
true
>
:
public
MapBase
<
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
>
{
typedef
Reshaped
<
XprType
,
Rows
,
Cols
,
Order
>
ReshapedType
;
typedef
typename
internal
::
ref_selector
<
XprType
>::
non_const_type
XprTypeNested
;
public:
typedef
MapBase
<
ReshapedType
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
ReshapedType
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
ReshapedImpl_dense
)
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline
ReshapedImpl_dense
(
XprType
&
xpr
)
:
Base
(
xpr
.
data
()),
m_xpr
(
xpr
)
{}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline
ReshapedImpl_dense
(
XprType
&
xpr
,
Index
nRows
,
Index
nCols
)
:
Base
(
xpr
.
data
(),
nRows
,
nCols
),
m_xpr
(
xpr
)
{}
EIGEN_DEVICE_FUNC
const
internal
::
remove_all_t
<
XprTypeNested
>&
nestedExpression
()
const
{
return
m_xpr
;
}
EIGEN_DEVICE_FUNC
XprType
&
nestedExpression
()
{
return
m_xpr
;
}
/** \sa MapBase::innerStride() */
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
{
return
m_xpr
.
innerStride
();
}
/** \sa MapBase::outerStride() */
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
{
return
(((
Flags
&
RowMajorBit
)
==
RowMajorBit
)
?
this
->
cols
()
:
this
->
rows
())
*
m_xpr
.
innerStride
();
}
protected:
XprTypeNested
m_xpr
;
};
// Evaluators
template
<
typename
ArgType
,
int
Rows
,
int
Cols
,
int
Order
,
bool
HasDirectAccess
>
struct
reshaped_evaluator
;
template
<
typename
ArgType
,
int
Rows
,
int
Cols
,
int
Order
>
struct
evaluator
<
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
>
:
reshaped_evaluator
<
ArgType
,
Rows
,
Cols
,
Order
,
traits
<
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
>::
HasDirectAccess
>
{
typedef
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
// TODO: should check for smaller packet types
typedef
typename
packet_traits
<
Scalar
>::
type
PacketScalar
;
enum
{
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
,
HasDirectAccess
=
traits
<
XprType
>::
HasDirectAccess
,
// RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
// ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
// MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
// MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
//
// InnerStrideAtCompileTime = traits<XprType>::HasSameStorageOrderAsXprType
// ? int(inner_stride_at_compile_time<ArgType>::ret)
// : Dynamic,
// OuterStrideAtCompileTime = Dynamic,
FlagsLinearAccessBit
=
(
traits
<
XprType
>::
RowsAtCompileTime
==
1
||
traits
<
XprType
>::
ColsAtCompileTime
==
1
||
HasDirectAccess
)
?
LinearAccessBit
:
0
,
FlagsRowMajorBit
=
(
traits
<
XprType
>::
ReshapedStorageOrder
==
int
(
RowMajor
))
?
RowMajorBit
:
0
,
FlagsDirectAccessBit
=
HasDirectAccess
?
DirectAccessBit
:
0
,
Flags0
=
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
&
~
RowMajorBit
),
Flags
=
Flags0
|
FlagsLinearAccessBit
|
FlagsRowMajorBit
|
FlagsDirectAccessBit
,
PacketAlignment
=
unpacket_traits
<
PacketScalar
>::
alignment
,
Alignment
=
evaluator
<
ArgType
>::
Alignment
};
typedef
reshaped_evaluator
<
ArgType
,
Rows
,
Cols
,
Order
,
HasDirectAccess
>
reshaped_evaluator_type
;
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
&
xpr
)
:
reshaped_evaluator_type
(
xpr
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
};
template
<
typename
ArgType
,
int
Rows
,
int
Cols
,
int
Order
>
struct
reshaped_evaluator
<
ArgType
,
Rows
,
Cols
,
Order
,
/* HasDirectAccess */
false
>
:
evaluator_base
<
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
>
{
typedef
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
XprType
;
enum
{
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
/* TODO + cost of index computations */
,
Flags
=
(
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
/*| LinearAccessBit | DirectAccessBit*/
)),
Alignment
=
0
};
EIGEN_DEVICE_FUNC
explicit
reshaped_evaluator
(
const
XprType
&
xpr
)
:
m_argImpl
(
xpr
.
nestedExpression
()),
m_xpr
(
xpr
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
typedef
std
::
pair
<
Index
,
Index
>
RowCol
;
EIGEN_DEVICE_FUNC
inline
RowCol
index_remap
(
Index
rowId
,
Index
colId
)
const
{
if
(
Order
==
ColMajor
)
{
const
Index
nth_elem_idx
=
colId
*
m_xpr
.
rows
()
+
rowId
;
return
RowCol
(
nth_elem_idx
%
m_xpr
.
nestedExpression
().
rows
(),
nth_elem_idx
/
m_xpr
.
nestedExpression
().
rows
());
}
else
{
const
Index
nth_elem_idx
=
colId
+
rowId
*
m_xpr
.
cols
();
return
RowCol
(
nth_elem_idx
/
m_xpr
.
nestedExpression
().
cols
(),
nth_elem_idx
%
m_xpr
.
nestedExpression
().
cols
());
}
}
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
XprType
)
const
RowCol
row_col
=
index_remap
(
rowId
,
colId
);
return
m_argImpl
.
coeffRef
(
row_col
.
first
,
row_col
.
second
);
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
const
{
const
RowCol
row_col
=
index_remap
(
rowId
,
colId
);
return
m_argImpl
.
coeffRef
(
row_col
.
first
,
row_col
.
second
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
CoeffReturnType
coeff
(
Index
rowId
,
Index
colId
)
const
{
const
RowCol
row_col
=
index_remap
(
rowId
,
colId
);
return
m_argImpl
.
coeff
(
row_col
.
first
,
row_col
.
second
);
}
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
index
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
XprType
)
const
RowCol
row_col
=
index_remap
(
Rows
==
1
?
0
:
index
,
Rows
==
1
?
index
:
0
);
return
m_argImpl
.
coeffRef
(
row_col
.
first
,
row_col
.
second
);
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
index
)
const
{
const
RowCol
row_col
=
index_remap
(
Rows
==
1
?
0
:
index
,
Rows
==
1
?
index
:
0
);
return
m_argImpl
.
coeffRef
(
row_col
.
first
,
row_col
.
second
);
}
EIGEN_DEVICE_FUNC
inline
const
CoeffReturnType
coeff
(
Index
index
)
const
{
const
RowCol
row_col
=
index_remap
(
Rows
==
1
?
0
:
index
,
Rows
==
1
?
index
:
0
);
return
m_argImpl
.
coeff
(
row_col
.
first
,
row_col
.
second
);
}
#if 0
EIGEN_DEVICE_FUNC
template<int LoadMode>
inline PacketScalar packet(Index rowId, Index colId) const
{
const RowCol row_col = index_remap(rowId, colId);
return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC
inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
const RowCol row_col = index_remap(rowId, colId);
m_argImpl.const_cast_derived().template writePacket<Unaligned>
(row_col.first, row_col.second, val);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC
inline PacketScalar packet(Index index) const
{
const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index,
RowsAtCompileTime == 1 ? index : 0);
return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC
inline void writePacket(Index index, const PacketScalar& val)
{
const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index,
RowsAtCompileTime == 1 ? index : 0);
return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second, val);
}
#endif
protected:
evaluator
<
ArgType
>
m_argImpl
;
const
XprType
&
m_xpr
;
};
template
<
typename
ArgType
,
int
Rows
,
int
Cols
,
int
Order
>
struct
reshaped_evaluator
<
ArgType
,
Rows
,
Cols
,
Order
,
/* HasDirectAccess */
true
>
:
mapbase_evaluator
<
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
,
typename
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>::
PlainObject
>
{
typedef
Reshaped
<
ArgType
,
Rows
,
Cols
,
Order
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
explicit
reshaped_evaluator
(
const
XprType
&
xpr
)
:
mapbase_evaluator
<
XprType
,
typename
XprType
::
PlainObject
>
(
xpr
)
{
// TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta
// lifetime
eigen_assert
(((
std
::
uintptr_t
(
xpr
.
data
())
%
plain_enum_max
(
1
,
evaluator
<
XprType
>::
Alignment
))
==
0
)
&&
"data is not aligned"
);
}
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_RESHAPED_H
eigen-master/Eigen/src/Core/ReturnByValue.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_RETURNBYVALUE_H
#define EIGEN_RETURNBYVALUE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
Derived
>
struct
traits
<
ReturnByValue
<
Derived
>
>
:
public
traits
<
typename
traits
<
Derived
>::
ReturnType
>
{
enum
{
// We're disabling the DirectAccess because e.g. the constructor of
// the Block-with-DirectAccess expression requires to have a coeffRef method.
// Also, we don't want to have to implement the stride stuff.
Flags
=
(
traits
<
typename
traits
<
Derived
>::
ReturnType
>::
Flags
|
EvalBeforeNestingBit
)
&
~
DirectAccessBit
};
};
/* The ReturnByValue object doesn't even have a coeff() method.
* So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix.
* So internal::nested always gives the plain return matrix type.
*
* FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ??
* Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators
*/
template
<
typename
Derived
,
int
n
,
typename
PlainObject
>
struct
nested_eval
<
ReturnByValue
<
Derived
>
,
n
,
PlainObject
>
{
typedef
typename
traits
<
Derived
>::
ReturnType
type
;
};
}
// end namespace internal
/** \class ReturnByValue
* \ingroup Core_Module
*
*/
template
<
typename
Derived
>
class
ReturnByValue
:
public
internal
::
dense_xpr_base
<
ReturnByValue
<
Derived
>
>::
type
,
internal
::
no_assignment_operator
{
public:
typedef
typename
internal
::
traits
<
Derived
>::
ReturnType
ReturnType
;
typedef
typename
internal
::
dense_xpr_base
<
ReturnByValue
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
ReturnByValue
)
template
<
typename
Dest
>
EIGEN_DEVICE_FUNC
inline
void
evalTo
(
Dest
&
dst
)
const
{
static_cast
<
const
Derived
*>
(
this
)
->
evalTo
(
dst
);
}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
static_cast
<
const
Derived
*>
(
this
)
->
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
static_cast
<
const
Derived
*>
(
this
)
->
cols
();
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
#define Unusable \
YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
class
Unusable
{
Unusable
(
const
Unusable
&
)
{}
Unusable
&
operator
=
(
const
Unusable
&
)
{
return
*
this
;
}
};
const
Unusable
&
coeff
(
Index
)
const
{
return
*
reinterpret_cast
<
const
Unusable
*>
(
this
);
}
const
Unusable
&
coeff
(
Index
,
Index
)
const
{
return
*
reinterpret_cast
<
const
Unusable
*>
(
this
);
}
Unusable
&
coeffRef
(
Index
)
{
return
*
reinterpret_cast
<
Unusable
*>
(
this
);
}
Unusable
&
coeffRef
(
Index
,
Index
)
{
return
*
reinterpret_cast
<
Unusable
*>
(
this
);
}
#undef Unusable
#endif
};
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
DenseBase
<
Derived
>::
operator
=
(
const
ReturnByValue
<
OtherDerived
>&
other
)
{
other
.
evalTo
(
derived
());
return
derived
();
}
namespace
internal
{
// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that
// when a ReturnByValue expression is assigned, the evaluator is not constructed.
// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world
template
<
typename
Derived
>
struct
evaluator
<
ReturnByValue
<
Derived
>
>
:
public
evaluator
<
typename
internal
::
traits
<
Derived
>::
ReturnType
>
{
typedef
ReturnByValue
<
Derived
>
XprType
;
typedef
typename
internal
::
traits
<
Derived
>::
ReturnType
PlainObject
;
typedef
evaluator
<
PlainObject
>
Base
;
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
&
xpr
)
:
m_result
(
xpr
.
rows
(),
xpr
.
cols
())
{
internal
::
construct_at
<
Base
>
(
this
,
m_result
);
xpr
.
evalTo
(
m_result
);
}
protected:
PlainObject
m_result
;
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_RETURNBYVALUE_H
eigen-master/Eigen/src/Core/Reverse.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REVERSE_H
#define EIGEN_REVERSE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
MatrixType
,
int
Direction
>
struct
traits
<
Reverse
<
MatrixType
,
Direction
>
>
:
traits
<
MatrixType
>
{
typedef
typename
MatrixType
::
Scalar
Scalar
;
typedef
typename
traits
<
MatrixType
>::
StorageKind
StorageKind
;
typedef
typename
traits
<
MatrixType
>::
XprKind
XprKind
;
typedef
typename
ref_selector
<
MatrixType
>::
type
MatrixTypeNested
;
typedef
std
::
remove_reference_t
<
MatrixTypeNested
>
MatrixTypeNested_
;
enum
{
RowsAtCompileTime
=
MatrixType
::
RowsAtCompileTime
,
ColsAtCompileTime
=
MatrixType
::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
MatrixType
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
MatrixType
::
MaxColsAtCompileTime
,
Flags
=
MatrixTypeNested_
::
Flags
&
(
RowMajorBit
|
LvalueBit
)
};
};
template
<
typename
PacketType
,
bool
ReversePacket
>
struct
reverse_packet_cond
{
static
inline
PacketType
run
(
const
PacketType
&
x
)
{
return
preverse
(
x
);
}
};
template
<
typename
PacketType
>
struct
reverse_packet_cond
<
PacketType
,
false
>
{
static
inline
PacketType
run
(
const
PacketType
&
x
)
{
return
x
;
}
};
}
// end namespace internal
/** \class Reverse
* \ingroup Core_Module
*
* \brief Expression of the reverse of a vector or matrix
*
* \tparam MatrixType the type of the object of which we are taking the reverse
* \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections
*
* This class represents an expression of the reverse of a vector.
* It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::reverse(), VectorwiseOp::reverse()
*/
template
<
typename
MatrixType
,
int
Direction
>
class
Reverse
:
public
internal
::
dense_xpr_base
<
Reverse
<
MatrixType
,
Direction
>
>::
type
{
public:
typedef
typename
internal
::
dense_xpr_base
<
Reverse
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Reverse
)
typedef
internal
::
remove_all_t
<
MatrixType
>
NestedExpression
;
using
Base
::
IsRowMajor
;
protected:
enum
{
PacketSize
=
internal
::
packet_traits
<
Scalar
>::
size
,
IsColMajor
=
!
IsRowMajor
,
ReverseRow
=
(
Direction
==
Vertical
)
||
(
Direction
==
BothDirections
),
ReverseCol
=
(
Direction
==
Horizontal
)
||
(
Direction
==
BothDirections
),
OffsetRow
=
ReverseRow
&&
IsColMajor
?
PacketSize
:
1
,
OffsetCol
=
ReverseCol
&&
IsRowMajor
?
PacketSize
:
1
,
ReversePacket
=
(
Direction
==
BothDirections
)
||
((
Direction
==
Vertical
)
&&
IsColMajor
)
||
((
Direction
==
Horizontal
)
&&
IsRowMajor
)
};
typedef
internal
::
reverse_packet_cond
<
PacketScalar
,
ReversePacket
>
reverse_packet
;
public:
EIGEN_DEVICE_FUNC
explicit
inline
Reverse
(
const
MatrixType
&
matrix
)
:
m_matrix
(
matrix
)
{}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
Reverse
)
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_matrix
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_matrix
.
cols
();
}
EIGEN_DEVICE_FUNC
inline
Index
innerStride
()
const
{
return
-
m_matrix
.
innerStride
();
}
EIGEN_DEVICE_FUNC
const
internal
::
remove_all_t
<
typename
MatrixType
::
Nested
>&
nestedExpression
()
const
{
return
m_matrix
;
}
protected:
typename
MatrixType
::
Nested
m_matrix
;
};
/** \returns an expression of the reverse of *this.
*
* Example: \include MatrixBase_reverse.cpp
* Output: \verbinclude MatrixBase_reverse.out
*
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
typename
DenseBase
<
Derived
>::
ReverseReturnType
DenseBase
<
Derived
>::
reverse
()
{
return
ReverseReturnType
(
derived
());
}
// reverse const overload moved DenseBase.h due to a CUDA compiler bug
/** This is the "in place" version of reverse: it reverses \c *this.
*
* In most cases it is probably better to simply use the reversed expression
* of a matrix. However, when reversing the matrix data itself is really needed,
* then this "in-place" version is probably the right choice because it provides
* the following additional benefits:
* - less error prone: doing the same operation with .reverse() requires special care:
* \code m = m.reverse().eval(); \endcode
* - this API enables reverse operations without the need for a temporary
* - it allows future optimizations (cache friendliness, etc.)
*
* \sa VectorwiseOp::reverseInPlace(), reverse() */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
void
DenseBase
<
Derived
>::
reverseInPlace
()
{
constexpr
int
HalfRowsAtCompileTime
=
RowsAtCompileTime
==
Dynamic
?
Dynamic
:
RowsAtCompileTime
/
2
;
constexpr
int
HalfColsAtCompileTime
=
ColsAtCompileTime
==
Dynamic
?
Dynamic
:
ColsAtCompileTime
/
2
;
if
(
cols
()
>
rows
())
{
Index
half
=
cols
()
/
2
;
this
->
template
leftCols
<
HalfColsAtCompileTime
>(
half
).
swap
(
this
->
template
rightCols
<
HalfColsAtCompileTime
>(
half
).
reverse
());
if
((
cols
()
%
2
)
==
1
)
{
Index
half2
=
rows
()
/
2
;
col
(
half
).
template
head
<
HalfRowsAtCompileTime
>(
half2
).
swap
(
col
(
half
).
template
tail
<
HalfRowsAtCompileTime
>(
half2
).
reverse
());
}
}
else
{
Index
half
=
rows
()
/
2
;
this
->
template
topRows
<
HalfRowsAtCompileTime
>(
half
).
swap
(
this
->
template
bottomRows
<
HalfRowsAtCompileTime
>(
half
).
reverse
());
if
((
rows
()
%
2
)
==
1
)
{
Index
half2
=
cols
()
/
2
;
row
(
half
).
template
head
<
HalfColsAtCompileTime
>(
half2
).
swap
(
row
(
half
).
template
tail
<
HalfColsAtCompileTime
>(
half2
).
reverse
());
}
}
}
namespace
internal
{
template
<
int
Direction
>
struct
vectorwise_reverse_inplace_impl
;
template
<
>
struct
vectorwise_reverse_inplace_impl
<
Vertical
>
{
template
<
typename
ExpressionType
>
static
void
run
(
ExpressionType
&
xpr
)
{
constexpr
Index
HalfAtCompileTime
=
ExpressionType
::
RowsAtCompileTime
==
Dynamic
?
Dynamic
:
ExpressionType
::
RowsAtCompileTime
/
2
;
Index
half
=
xpr
.
rows
()
/
2
;
xpr
.
template
topRows
<
HalfAtCompileTime
>(
half
).
swap
(
xpr
.
template
bottomRows
<
HalfAtCompileTime
>(
half
).
colwise
().
reverse
());
}
};
template
<
>
struct
vectorwise_reverse_inplace_impl
<
Horizontal
>
{
template
<
typename
ExpressionType
>
static
void
run
(
ExpressionType
&
xpr
)
{
constexpr
Index
HalfAtCompileTime
=
ExpressionType
::
ColsAtCompileTime
==
Dynamic
?
Dynamic
:
ExpressionType
::
ColsAtCompileTime
/
2
;
Index
half
=
xpr
.
cols
()
/
2
;
xpr
.
template
leftCols
<
HalfAtCompileTime
>(
half
).
swap
(
xpr
.
template
rightCols
<
HalfAtCompileTime
>(
half
).
rowwise
().
reverse
());
}
};
}
// end namespace internal
/** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this.
*
* In most cases it is probably better to simply use the reversed expression
* of a matrix. However, when reversing the matrix data itself is really needed,
* then this "in-place" version is probably the right choice because it provides
* the following additional benefits:
* - less error prone: doing the same operation with .reverse() requires special care:
* \code m = m.reverse().eval(); \endcode
* - this API enables reverse operations without the need for a temporary
*
* \sa DenseBase::reverseInPlace(), reverse() */
template
<
typename
ExpressionType
,
int
Direction
>
EIGEN_DEVICE_FUNC
void
VectorwiseOp
<
ExpressionType
,
Direction
>::
reverseInPlace
()
{
internal
::
vectorwise_reverse_inplace_impl
<
Direction
>::
run
(
m_matrix
);
}
}
// end namespace Eigen
#endif // EIGEN_REVERSE_H
eigen-master/Eigen/src/Core/Select.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELECT_H
#define EIGEN_SELECT_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class Select
* \ingroup Core_Module
*
* \brief Expression of a coefficient wise version of the C++ ternary operator ?:
*
* \tparam ConditionMatrixType the type of the \em condition expression which must be a boolean matrix
* \tparam ThenMatrixType the type of the \em then expression
* \tparam ElseMatrixType the type of the \em else expression
*
* This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
* It is the return type of DenseBase::select() and most of the time this is the only way it is used.
*
* \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
*/
namespace
internal
{
template
<
typename
ConditionMatrixType
,
typename
ThenMatrixType
,
typename
ElseMatrixType
>
struct
traits
<
Select
<
ConditionMatrixType
,
ThenMatrixType
,
ElseMatrixType
>
>
:
traits
<
ThenMatrixType
>
{
typedef
typename
traits
<
ThenMatrixType
>::
Scalar
Scalar
;
typedef
Dense
StorageKind
;
typedef
typename
traits
<
ThenMatrixType
>::
XprKind
XprKind
;
typedef
typename
ConditionMatrixType
::
Nested
ConditionMatrixNested
;
typedef
typename
ThenMatrixType
::
Nested
ThenMatrixNested
;
typedef
typename
ElseMatrixType
::
Nested
ElseMatrixNested
;
enum
{
RowsAtCompileTime
=
ConditionMatrixType
::
RowsAtCompileTime
,
ColsAtCompileTime
=
ConditionMatrixType
::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
ConditionMatrixType
::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
ConditionMatrixType
::
MaxColsAtCompileTime
,
Flags
=
(
unsigned
int
)
ThenMatrixType
::
Flags
&
ElseMatrixType
::
Flags
&
RowMajorBit
};
};
}
// namespace internal
template
<
typename
ConditionMatrixType
,
typename
ThenMatrixType
,
typename
ElseMatrixType
>
class
Select
:
public
internal
::
dense_xpr_base
<
Select
<
ConditionMatrixType
,
ThenMatrixType
,
ElseMatrixType
>
>::
type
,
internal
::
no_assignment_operator
{
public:
typedef
typename
internal
::
dense_xpr_base
<
Select
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Select
)
inline
EIGEN_DEVICE_FUNC
Select
(
const
ConditionMatrixType
&
a_conditionMatrix
,
const
ThenMatrixType
&
a_thenMatrix
,
const
ElseMatrixType
&
a_elseMatrix
)
:
m_condition
(
a_conditionMatrix
),
m_then
(
a_thenMatrix
),
m_else
(
a_elseMatrix
)
{
eigen_assert
(
m_condition
.
rows
()
==
m_then
.
rows
()
&&
m_condition
.
rows
()
==
m_else
.
rows
());
eigen_assert
(
m_condition
.
cols
()
==
m_then
.
cols
()
&&
m_condition
.
cols
()
==
m_else
.
cols
());
}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_condition
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_condition
.
cols
();
}
inline
EIGEN_DEVICE_FUNC
const
Scalar
coeff
(
Index
i
,
Index
j
)
const
{
if
(
m_condition
.
coeff
(
i
,
j
))
return
m_then
.
coeff
(
i
,
j
);
else
return
m_else
.
coeff
(
i
,
j
);
}
inline
EIGEN_DEVICE_FUNC
const
Scalar
coeff
(
Index
i
)
const
{
if
(
m_condition
.
coeff
(
i
))
return
m_then
.
coeff
(
i
);
else
return
m_else
.
coeff
(
i
);
}
inline
EIGEN_DEVICE_FUNC
const
ConditionMatrixType
&
conditionMatrix
()
const
{
return
m_condition
;
}
inline
EIGEN_DEVICE_FUNC
const
ThenMatrixType
&
thenMatrix
()
const
{
return
m_then
;
}
inline
EIGEN_DEVICE_FUNC
const
ElseMatrixType
&
elseMatrix
()
const
{
return
m_else
;
}
protected:
typename
ConditionMatrixType
::
Nested
m_condition
;
typename
ThenMatrixType
::
Nested
m_then
;
typename
ElseMatrixType
::
Nested
m_else
;
};
/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
* if \c *this(i,j) != Scalar(0), and \a elseMatrix(i,j) otherwise.
*
* Example: \include MatrixBase_select.cpp
* Output: \verbinclude MatrixBase_select.out
*
* \sa DenseBase::bitwiseSelect(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&)
*/
template
<
typename
Derived
>
template
<
typename
ThenDerived
,
typename
ElseDerived
>
inline
EIGEN_DEVICE_FUNC
CwiseTernaryOp
<
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
ElseDerived
>::
Scalar
,
typename
DenseBase
<
Derived
>::
Scalar
>
,
ThenDerived
,
ElseDerived
,
Derived
>
DenseBase
<
Derived
>::
select
(
const
DenseBase
<
ThenDerived
>&
thenMatrix
,
const
DenseBase
<
ElseDerived
>&
elseMatrix
)
const
{
using
Op
=
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
ElseDerived
>::
Scalar
,
Scalar
>
;
return
CwiseTernaryOp
<
Op
,
ThenDerived
,
ElseDerived
,
Derived
>
(
thenMatrix
.
derived
(),
elseMatrix
.
derived
(),
derived
(),
Op
());
}
/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
* the \em else expression being a scalar value.
*
* \sa DenseBase::booleanSelect(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
*/
template
<
typename
Derived
>
template
<
typename
ThenDerived
>
inline
EIGEN_DEVICE_FUNC
CwiseTernaryOp
<
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
Derived
>::
Scalar
>
,
ThenDerived
,
typename
DenseBase
<
ThenDerived
>::
ConstantReturnType
,
Derived
>
DenseBase
<
Derived
>::
select
(
const
DenseBase
<
ThenDerived
>&
thenMatrix
,
const
typename
DenseBase
<
ThenDerived
>::
Scalar
&
elseScalar
)
const
{
using
ElseConstantType
=
typename
DenseBase
<
ThenDerived
>::
ConstantReturnType
;
using
Op
=
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
ThenDerived
>::
Scalar
,
Scalar
>
;
return
CwiseTernaryOp
<
Op
,
ThenDerived
,
ElseConstantType
,
Derived
>
(
thenMatrix
.
derived
(),
ElseConstantType
(
rows
(),
cols
(),
elseScalar
),
derived
(),
Op
());
}
/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
* the \em then expression being a scalar value.
*
* \sa DenseBase::booleanSelect(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
*/
template
<
typename
Derived
>
template
<
typename
ElseDerived
>
inline
EIGEN_DEVICE_FUNC
CwiseTernaryOp
<
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ElseDerived
>::
Scalar
,
typename
DenseBase
<
ElseDerived
>::
Scalar
,
typename
DenseBase
<
Derived
>::
Scalar
>
,
typename
DenseBase
<
ElseDerived
>::
ConstantReturnType
,
ElseDerived
,
Derived
>
DenseBase
<
Derived
>::
select
(
const
typename
DenseBase
<
ElseDerived
>::
Scalar
&
thenScalar
,
const
DenseBase
<
ElseDerived
>&
elseMatrix
)
const
{
using
ThenConstantType
=
typename
DenseBase
<
ElseDerived
>::
ConstantReturnType
;
using
Op
=
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ElseDerived
>::
Scalar
,
typename
DenseBase
<
ElseDerived
>::
Scalar
,
Scalar
>
;
return
CwiseTernaryOp
<
Op
,
ThenConstantType
,
ElseDerived
,
Derived
>
(
ThenConstantType
(
rows
(),
cols
(),
thenScalar
),
elseMatrix
.
derived
(),
derived
(),
Op
());
}
}
// end namespace Eigen
#endif // EIGEN_SELECT_H
eigen-master/Eigen/src/Core/SelfAdjointView.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINTMATRIX_H
#define EIGEN_SELFADJOINTMATRIX_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class SelfAdjointView
* \ingroup Core_Module
*
*
* \brief Expression of a selfadjoint matrix from a triangular part of a dense matrix
*
* \tparam MatrixType the type of the dense matrix storing the coefficients
* \tparam TriangularPart can be either \c #Lower or \c #Upper
*
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
* and most of the time this is the only way that it is used.
*
* \sa class TriangularBase, MatrixBase::selfadjointView()
*/
namespace
internal
{
template
<
typename
MatrixType
,
unsigned
int
UpLo
>
struct
traits
<
SelfAdjointView
<
MatrixType
,
UpLo
>
>
:
traits
<
MatrixType
>
{
typedef
typename
ref_selector
<
MatrixType
>::
non_const_type
MatrixTypeNested
;
typedef
remove_all_t
<
MatrixTypeNested
>
MatrixTypeNestedCleaned
;
typedef
MatrixType
ExpressionType
;
typedef
typename
MatrixType
::
PlainObject
FullMatrixType
;
enum
{
Mode
=
UpLo
|
SelfAdjoint
,
FlagsLvalueBit
=
is_lvalue
<
MatrixType
>::
value
?
LvalueBit
:
0
,
Flags
=
MatrixTypeNestedCleaned
::
Flags
&
(
HereditaryBits
|
FlagsLvalueBit
)
&
(
~
(
PacketAccessBit
|
DirectAccessBit
|
LinearAccessBit
))
// FIXME these flags should be preserved
};
};
}
// namespace internal
template
<
typename
MatrixType_
,
unsigned
int
UpLo
>
class
SelfAdjointView
:
public
TriangularBase
<
SelfAdjointView
<
MatrixType_
,
UpLo
>
>
{
public:
EIGEN_STATIC_ASSERT
(
UpLo
==
Lower
||
UpLo
==
Upper
,
SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY
)
typedef
MatrixType_
MatrixType
;
typedef
TriangularBase
<
SelfAdjointView
>
Base
;
typedef
typename
internal
::
traits
<
SelfAdjointView
>::
MatrixTypeNested
MatrixTypeNested
;
typedef
typename
internal
::
traits
<
SelfAdjointView
>::
MatrixTypeNestedCleaned
MatrixTypeNestedCleaned
;
typedef
MatrixTypeNestedCleaned
NestedExpression
;
/** \brief The type of coefficients in this matrix */
typedef
typename
internal
::
traits
<
SelfAdjointView
>::
Scalar
Scalar
;
typedef
typename
MatrixType
::
StorageIndex
StorageIndex
;
typedef
internal
::
remove_all_t
<
typename
MatrixType
::
ConjugateReturnType
>
MatrixConjugateReturnType
;
typedef
SelfAdjointView
<
std
::
add_const_t
<
MatrixType
>
,
UpLo
>
ConstSelfAdjointView
;
enum
{
Mode
=
internal
::
traits
<
SelfAdjointView
>::
Mode
,
Flags
=
internal
::
traits
<
SelfAdjointView
>::
Flags
,
TransposeMode
=
((
int
(
Mode
)
&
int
(
Upper
))
?
Lower
:
0
)
|
((
int
(
Mode
)
&
int
(
Lower
))
?
Upper
:
0
)
};
typedef
typename
MatrixType
::
PlainObject
PlainObject
;
EIGEN_DEVICE_FUNC
explicit
inline
SelfAdjointView
(
MatrixType
&
matrix
)
:
m_matrix
(
matrix
)
{}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_matrix
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_matrix
.
cols
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
noexcept
{
return
m_matrix
.
outerStride
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
noexcept
{
return
m_matrix
.
innerStride
();
}
/** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part
*/
EIGEN_DEVICE_FUNC
inline
Scalar
coeff
(
Index
row
,
Index
col
)
const
{
Base
::
check_coordinates_internal
(
row
,
col
);
return
m_matrix
.
coeff
(
row
,
col
);
}
/** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part
*/
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
SelfAdjointView
);
Base
::
check_coordinates_internal
(
row
,
col
);
return
m_matrix
.
coeffRef
(
row
,
col
);
}
/** \internal */
EIGEN_DEVICE_FUNC
const
MatrixTypeNestedCleaned
&
_expression
()
const
{
return
m_matrix
;
}
EIGEN_DEVICE_FUNC
const
MatrixTypeNestedCleaned
&
nestedExpression
()
const
{
return
m_matrix
;
}
EIGEN_DEVICE_FUNC
MatrixTypeNestedCleaned
&
nestedExpression
()
{
return
m_matrix
;
}
/** Efficient triangular matrix times vector/matrix product */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
const
Product
<
SelfAdjointView
,
OtherDerived
>
operator
*
(
const
MatrixBase
<
OtherDerived
>&
rhs
)
const
{
return
Product
<
SelfAdjointView
,
OtherDerived
>
(
*
this
,
rhs
.
derived
());
}
/** Efficient vector/matrix times triangular matrix product */
template
<
typename
OtherDerived
>
friend
EIGEN_DEVICE_FUNC
const
Product
<
OtherDerived
,
SelfAdjointView
>
operator
*
(
const
MatrixBase
<
OtherDerived
>&
lhs
,
const
SelfAdjointView
&
rhs
)
{
return
Product
<
OtherDerived
,
SelfAdjointView
>
(
lhs
.
derived
(),
rhs
);
}
friend
EIGEN_DEVICE_FUNC
const
SelfAdjointView
<
const
EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE
(
Scalar
,
MatrixType
,
product
),
UpLo
>
operator
*
(
const
Scalar
&
s
,
const
SelfAdjointView
&
mat
)
{
return
(
s
*
mat
.
nestedExpression
()).
template
selfadjointView
<
UpLo
>();
}
/** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$
* \returns a reference to \c *this
*
* The vectors \a u and \c v \b must be column vectors, however they can be
* a adjoint expression without any overhead. Only the meaningful triangular
* part of the matrix is updated, the rest is left unchanged.
*
* \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
*/
template
<
typename
DerivedU
,
typename
DerivedV
>
EIGEN_DEVICE_FUNC
SelfAdjointView
&
rankUpdate
(
const
MatrixBase
<
DerivedU
>&
u
,
const
MatrixBase
<
DerivedV
>&
v
,
const
Scalar
&
alpha
=
Scalar
(
1
));
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
*
* \returns a reference to \c *this
*
* Note that to perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
* call this function with u.adjoint().
*
* \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
*/
template
<
typename
DerivedU
>
EIGEN_DEVICE_FUNC
SelfAdjointView
&
rankUpdate
(
const
MatrixBase
<
DerivedU
>&
u
,
const
Scalar
&
alpha
=
Scalar
(
1
));
/** \returns an expression of a triangular view extracted from the current selfadjoint view of a given triangular part
*
* The parameter \a TriMode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
* \c #Lower, \c #StrictlyLower, \c #UnitLower.
*
* If \c TriMode references the same triangular part than \c *this, then this method simply return a \c TriangularView
* of the nested expression, otherwise, the nested expression is first transposed, thus returning a \c
* TriangularView<Transpose<MatrixType>> object.
*
* \sa MatrixBase::triangularView(), class TriangularView
*/
template
<
unsigned
int
TriMode
>
EIGEN_DEVICE_FUNC
std
::
conditional_t
<
(
TriMode
&
(
Upper
|
Lower
))
==
(
UpLo
&
(
Upper
|
Lower
)),
TriangularView
<
MatrixType
,
TriMode
>
,
TriangularView
<
typename
MatrixType
::
AdjointReturnType
,
TriMode
>
>
triangularView
()
const
{
std
::
conditional_t
<
(
TriMode
&
(
Upper
|
Lower
))
==
(
UpLo
&
(
Upper
|
Lower
)),
MatrixType
&
,
typename
MatrixType
::
ConstTransposeReturnType
>
tmp1
(
m_matrix
);
std
::
conditional_t
<
(
TriMode
&
(
Upper
|
Lower
))
==
(
UpLo
&
(
Upper
|
Lower
)),
MatrixType
&
,
typename
MatrixType
::
AdjointReturnType
>
tmp2
(
tmp1
);
return
std
::
conditional_t
<
(
TriMode
&
(
Upper
|
Lower
))
==
(
UpLo
&
(
Upper
|
Lower
)),
TriangularView
<
MatrixType
,
TriMode
>
,
TriangularView
<
typename
MatrixType
::
AdjointReturnType
,
TriMode
>
>
(
tmp2
);
}
typedef
SelfAdjointView
<
const
MatrixConjugateReturnType
,
UpLo
>
ConjugateReturnType
;
/** \sa MatrixBase::conjugate() const */
EIGEN_DEVICE_FUNC
inline
const
ConjugateReturnType
conjugate
()
const
{
return
ConjugateReturnType
(
m_matrix
.
conjugate
());
}
/** \returns an expression of the complex conjugate of \c *this if Cond==true,
* returns \c *this otherwise.
*/
template
<
bool
Cond
>
EIGEN_DEVICE_FUNC
inline
std
::
conditional_t
<
Cond
,
ConjugateReturnType
,
ConstSelfAdjointView
>
conjugateIf
()
const
{
typedef
std
::
conditional_t
<
Cond
,
ConjugateReturnType
,
ConstSelfAdjointView
>
ReturnType
;
return
ReturnType
(
m_matrix
.
template
conjugateIf
<
Cond
>());
}
typedef
SelfAdjointView
<
const
typename
MatrixType
::
AdjointReturnType
,
TransposeMode
>
AdjointReturnType
;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
inline
const
AdjointReturnType
adjoint
()
const
{
return
AdjointReturnType
(
m_matrix
.
adjoint
());
}
typedef
SelfAdjointView
<
typename
MatrixType
::
TransposeReturnType
,
TransposeMode
>
TransposeReturnType
;
/** \sa MatrixBase::transpose() */
template
<
class
Dummy
=
int
>
EIGEN_DEVICE_FUNC
inline
TransposeReturnType
transpose
(
std
::
enable_if_t
<
Eigen
::
internal
::
is_lvalue
<
MatrixType
>::
value
,
Dummy
*>
=
nullptr
)
{
typename
MatrixType
::
TransposeReturnType
tmp
(
m_matrix
);
return
TransposeReturnType
(
tmp
);
}
typedef
SelfAdjointView
<
const
typename
MatrixType
::
ConstTransposeReturnType
,
TransposeMode
>
ConstTransposeReturnType
;
/** \sa MatrixBase::transpose() const */
EIGEN_DEVICE_FUNC
inline
const
ConstTransposeReturnType
transpose
()
const
{
return
ConstTransposeReturnType
(
m_matrix
.
transpose
());
}
/** \returns a const expression of the main diagonal of the matrix \c *this
*
* This method simply returns the diagonal of the nested expression, thus by-passing the SelfAdjointView decorator.
*
* \sa MatrixBase::diagonal(), class Diagonal */
EIGEN_DEVICE_FUNC
typename
MatrixType
::
ConstDiagonalReturnType
diagonal
()
const
{
return
typename
MatrixType
::
ConstDiagonalReturnType
(
m_matrix
);
}
/////////// Cholesky module ///////////
const
LLT
<
PlainObject
,
UpLo
>
llt
()
const
;
const
LDLT
<
PlainObject
,
UpLo
>
ldlt
()
const
;
/////////// Eigenvalue module ///////////
/** Real part of #Scalar */
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
/** Return type of eigenvalues() */
typedef
Matrix
<
RealScalar
,
internal
::
traits
<
MatrixType
>::
ColsAtCompileTime
,
1
>
EigenvaluesReturnType
;
EIGEN_DEVICE_FUNC
EigenvaluesReturnType
eigenvalues
()
const
;
EIGEN_DEVICE_FUNC
RealScalar
operatorNorm
()
const
;
protected:
MatrixTypeNested
m_matrix
;
};
// template<typename OtherDerived, typename MatrixType, unsigned int UpLo>
// internal::selfadjoint_matrix_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >
// operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView<MatrixType,UpLo>& rhs)
// {
// return internal::matrix_selfadjoint_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo>
// >(lhs.derived(),rhs);
// }
// selfadjoint to dense matrix
namespace
internal
{
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
// in the future selfadjoint-ness should be defined by the expression traits
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to
// make it work)
template
<
typename
MatrixType
,
unsigned
int
Mode
>
struct
evaluator_traits
<
SelfAdjointView
<
MatrixType
,
Mode
>
>
{
typedef
typename
storage_kind_to_evaluator_kind
<
typename
MatrixType
::
StorageKind
>::
Kind
Kind
;
typedef
SelfAdjointShape
Shape
;
};
template
<
int
UpLo
,
int
SetOpposite
,
typename
DstEvaluatorTypeT
,
typename
SrcEvaluatorTypeT
,
typename
Functor
,
int
Version
>
class
triangular_dense_assignment_kernel
<
UpLo
,
SelfAdjoint
,
SetOpposite
,
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
,
Version
>
:
public
generic_dense_assignment_kernel
<
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
,
Version
>
{
protected:
typedef
generic_dense_assignment_kernel
<
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
,
Version
>
Base
;
typedef
typename
Base
::
DstXprType
DstXprType
;
typedef
typename
Base
::
SrcXprType
SrcXprType
;
using
Base
::
m_dst
;
using
Base
::
m_functor
;
using
Base
::
m_src
;
public:
typedef
typename
Base
::
DstEvaluatorType
DstEvaluatorType
;
typedef
typename
Base
::
SrcEvaluatorType
SrcEvaluatorType
;
typedef
typename
Base
::
Scalar
Scalar
;
typedef
typename
Base
::
AssignmentTraits
AssignmentTraits
;
EIGEN_DEVICE_FUNC
triangular_dense_assignment_kernel
(
DstEvaluatorType
&
dst
,
const
SrcEvaluatorType
&
src
,
const
Functor
&
func
,
DstXprType
&
dstExpr
)
:
Base
(
dst
,
src
,
func
,
dstExpr
)
{}
EIGEN_DEVICE_FUNC
void
assignCoeff
(
Index
row
,
Index
col
)
{
eigen_internal_assert
(
row
!=
col
);
Scalar
tmp
=
m_src
.
coeff
(
row
,
col
);
m_functor
.
assignCoeff
(
m_dst
.
coeffRef
(
row
,
col
),
tmp
);
m_functor
.
assignCoeff
(
m_dst
.
coeffRef
(
col
,
row
),
numext
::
conj
(
tmp
));
}
EIGEN_DEVICE_FUNC
void
assignDiagonalCoeff
(
Index
id
)
{
Base
::
assignCoeff
(
id
,
id
);
}
EIGEN_DEVICE_FUNC
void
assignOppositeCoeff
(
Index
,
Index
)
{
eigen_internal_assert
(
false
&&
"should never be called"
);
}
};
}
// end namespace internal
/***************************************************************************
* Implementation of MatrixBase methods
***************************************************************************/
/** This is the const version of MatrixBase::selfadjointView() */
template
<
typename
Derived
>
template
<
unsigned
int
UpLo
>
EIGEN_DEVICE_FUNC
typename
MatrixBase
<
Derived
>::
template
ConstSelfAdjointViewReturnType
<
UpLo
>
::
Type
MatrixBase
<
Derived
>::
selfadjointView
()
const
{
return
typename
ConstSelfAdjointViewReturnType
<
UpLo
>::
Type
(
derived
());
}
/** \returns an expression of a symmetric/self-adjoint view extracted from the upper or lower triangular part of the
* current matrix
*
* The parameter \a UpLo can be either \c #Upper or \c #Lower
*
* Example: \include MatrixBase_selfadjointView.cpp
* Output: \verbinclude MatrixBase_selfadjointView.out
*
* \sa class SelfAdjointView
*/
template
<
typename
Derived
>
template
<
unsigned
int
UpLo
>
EIGEN_DEVICE_FUNC
typename
MatrixBase
<
Derived
>::
template
SelfAdjointViewReturnType
<
UpLo
>
::
Type
MatrixBase
<
Derived
>::
selfadjointView
()
{
return
typename
SelfAdjointViewReturnType
<
UpLo
>::
Type
(
derived
());
}
}
// end namespace Eigen
#endif // EIGEN_SELFADJOINTMATRIX_H
eigen-master/Eigen/src/Core/SelfCwiseBinaryOp.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFCWISEBINARYOP_H
#define EIGEN_SELFCWISEBINARYOP_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
// TODO generalize the scalar type of 'other'
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
operator
*=
(
const
Scalar
&
other
)
{
internal
::
call_assignment
(
this
->
derived
(),
PlainObject
::
Constant
(
rows
(),
cols
(),
other
),
internal
::
mul_assign_op
<
Scalar
,
Scalar
>
());
return
derived
();
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
operator
/=
(
const
Scalar
&
other
)
{
internal
::
call_assignment
(
this
->
derived
(),
PlainObject
::
Constant
(
rows
(),
cols
(),
other
),
internal
::
div_assign_op
<
Scalar
,
Scalar
>
());
return
derived
();
}
}
// end namespace Eigen
#endif // EIGEN_SELFCWISEBINARYOP_H
Prev
1
2
3
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment