Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
lietorch
Commits
266d4fd9
Commit
266d4fd9
authored
Jun 03, 2025
by
zhanggzh
Browse files
add lietorch src code and eigen src code, update readme
parent
e7df8655
Changes
148
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
8511 additions
and
0 deletions
+8511
-0
eigen-master/Eigen/src/Core/ArrayWrapper.h
eigen-master/Eigen/src/Core/ArrayWrapper.h
+165
-0
eigen-master/Eigen/src/Core/Assign.h
eigen-master/Eigen/src/Core/Assign.h
+80
-0
eigen-master/Eigen/src/Core/AssignEvaluator.h
eigen-master/Eigen/src/Core/AssignEvaluator.h
+1057
-0
eigen-master/Eigen/src/Core/Assign_MKL.h
eigen-master/Eigen/src/Core/Assign_MKL.h
+183
-0
eigen-master/Eigen/src/Core/BandMatrix.h
eigen-master/Eigen/src/Core/BandMatrix.h
+338
-0
eigen-master/Eigen/src/Core/Block.h
eigen-master/Eigen/src/Core/Block.h
+429
-0
eigen-master/Eigen/src/Core/CommaInitializer.h
eigen-master/Eigen/src/Core/CommaInitializer.h
+149
-0
eigen-master/Eigen/src/Core/ConditionEstimator.h
eigen-master/Eigen/src/Core/ConditionEstimator.h
+173
-0
eigen-master/Eigen/src/Core/CoreEvaluators.h
eigen-master/Eigen/src/Core/CoreEvaluators.h
+2051
-0
eigen-master/Eigen/src/Core/CoreIterators.h
eigen-master/Eigen/src/Core/CoreIterators.h
+141
-0
eigen-master/Eigen/src/Core/CwiseBinaryOp.h
eigen-master/Eigen/src/Core/CwiseBinaryOp.h
+166
-0
eigen-master/Eigen/src/Core/CwiseNullaryOp.h
eigen-master/Eigen/src/Core/CwiseNullaryOp.h
+977
-0
eigen-master/Eigen/src/Core/CwiseTernaryOp.h
eigen-master/Eigen/src/Core/CwiseTernaryOp.h
+171
-0
eigen-master/Eigen/src/Core/CwiseUnaryOp.h
eigen-master/Eigen/src/Core/CwiseUnaryOp.h
+91
-0
eigen-master/Eigen/src/Core/CwiseUnaryView.h
eigen-master/Eigen/src/Core/CwiseUnaryView.h
+167
-0
eigen-master/Eigen/src/Core/DenseBase.h
eigen-master/Eigen/src/Core/DenseBase.h
+661
-0
eigen-master/Eigen/src/Core/DenseCoeffsBase.h
eigen-master/Eigen/src/Core/DenseCoeffsBase.h
+562
-0
eigen-master/Eigen/src/Core/DenseStorage.h
eigen-master/Eigen/src/Core/DenseStorage.h
+578
-0
eigen-master/Eigen/src/Core/DeviceWrapper.h
eigen-master/Eigen/src/Core/DeviceWrapper.h
+153
-0
eigen-master/Eigen/src/Core/Diagonal.h
eigen-master/Eigen/src/Core/Diagonal.h
+219
-0
No files found.
Too many changes to show.
To preserve performance only
148 of 148+
files are displayed.
Plain diff
Email patch
eigen-master/Eigen/src/Core/ArrayWrapper.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ARRAYWRAPPER_H
#define EIGEN_ARRAYWRAPPER_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class ArrayWrapper
* \ingroup Core_Module
*
* \brief Expression of a mathematical vector or matrix as an array object
*
* This class is the return type of MatrixBase::array(), and most of the time
* this is the only way it is use.
*
* \sa MatrixBase::array(), class MatrixWrapper
*/
namespace
internal
{
template
<
typename
ExpressionType
>
struct
traits
<
ArrayWrapper
<
ExpressionType
>
>
:
public
traits
<
remove_all_t
<
typename
ExpressionType
::
Nested
>
>
{
typedef
ArrayXpr
XprKind
;
// Let's remove NestByRefBit
enum
{
Flags0
=
traits
<
remove_all_t
<
typename
ExpressionType
::
Nested
>
>::
Flags
,
LvalueBitFlag
=
is_lvalue
<
ExpressionType
>::
value
?
LvalueBit
:
0
,
Flags
=
(
Flags0
&
~
(
NestByRefBit
|
LvalueBit
))
|
LvalueBitFlag
};
};
}
// namespace internal
template
<
typename
ExpressionType
>
class
ArrayWrapper
:
public
ArrayBase
<
ArrayWrapper
<
ExpressionType
>
>
{
public:
typedef
ArrayBase
<
ArrayWrapper
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
ArrayWrapper
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
ArrayWrapper
)
typedef
internal
::
remove_all_t
<
ExpressionType
>
NestedExpression
;
typedef
std
::
conditional_t
<
internal
::
is_lvalue
<
ExpressionType
>::
value
,
Scalar
,
const
Scalar
>
ScalarWithConstIfNotLvalue
;
typedef
typename
internal
::
ref_selector
<
ExpressionType
>::
non_const_type
NestedExpressionType
;
using
Base
::
coeffRef
;
EIGEN_DEVICE_FUNC
explicit
EIGEN_STRONG_INLINE
ArrayWrapper
(
ExpressionType
&
matrix
)
:
m_expression
(
matrix
)
{}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_expression
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_expression
.
cols
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
noexcept
{
return
m_expression
.
outerStride
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
noexcept
{
return
m_expression
.
innerStride
();
}
EIGEN_DEVICE_FUNC
constexpr
ScalarWithConstIfNotLvalue
*
data
()
{
return
m_expression
.
data
();
}
EIGEN_DEVICE_FUNC
constexpr
const
Scalar
*
data
()
const
{
return
m_expression
.
data
();
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
const
{
return
m_expression
.
coeffRef
(
rowId
,
colId
);
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
index
)
const
{
return
m_expression
.
coeffRef
(
index
);
}
template
<
typename
Dest
>
EIGEN_DEVICE_FUNC
inline
void
evalTo
(
Dest
&
dst
)
const
{
dst
=
m_expression
;
}
EIGEN_DEVICE_FUNC
const
internal
::
remove_all_t
<
NestedExpressionType
>&
nestedExpression
()
const
{
return
m_expression
;
}
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index) */
EIGEN_DEVICE_FUNC
void
resize
(
Index
newSize
)
{
m_expression
.
resize
(
newSize
);
}
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index,Index)*/
EIGEN_DEVICE_FUNC
void
resize
(
Index
rows
,
Index
cols
)
{
m_expression
.
resize
(
rows
,
cols
);
}
protected:
NestedExpressionType
m_expression
;
};
/** \class MatrixWrapper
* \ingroup Core_Module
*
* \brief Expression of an array as a mathematical vector or matrix
*
* This class is the return type of ArrayBase::matrix(), and most of the time
* this is the only way it is use.
*
* \sa MatrixBase::matrix(), class ArrayWrapper
*/
namespace
internal
{
template
<
typename
ExpressionType
>
struct
traits
<
MatrixWrapper
<
ExpressionType
>
>
:
public
traits
<
remove_all_t
<
typename
ExpressionType
::
Nested
>
>
{
typedef
MatrixXpr
XprKind
;
// Let's remove NestByRefBit
enum
{
Flags0
=
traits
<
remove_all_t
<
typename
ExpressionType
::
Nested
>
>::
Flags
,
LvalueBitFlag
=
is_lvalue
<
ExpressionType
>::
value
?
LvalueBit
:
0
,
Flags
=
(
Flags0
&
~
(
NestByRefBit
|
LvalueBit
))
|
LvalueBitFlag
};
};
}
// namespace internal
template
<
typename
ExpressionType
>
class
MatrixWrapper
:
public
MatrixBase
<
MatrixWrapper
<
ExpressionType
>
>
{
public:
typedef
MatrixBase
<
MatrixWrapper
<
ExpressionType
>
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
MatrixWrapper
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
MatrixWrapper
)
typedef
internal
::
remove_all_t
<
ExpressionType
>
NestedExpression
;
typedef
std
::
conditional_t
<
internal
::
is_lvalue
<
ExpressionType
>::
value
,
Scalar
,
const
Scalar
>
ScalarWithConstIfNotLvalue
;
typedef
typename
internal
::
ref_selector
<
ExpressionType
>::
non_const_type
NestedExpressionType
;
using
Base
::
coeffRef
;
EIGEN_DEVICE_FUNC
explicit
inline
MatrixWrapper
(
ExpressionType
&
matrix
)
:
m_expression
(
matrix
)
{}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_expression
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_expression
.
cols
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
noexcept
{
return
m_expression
.
outerStride
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
noexcept
{
return
m_expression
.
innerStride
();
}
EIGEN_DEVICE_FUNC
constexpr
ScalarWithConstIfNotLvalue
*
data
()
{
return
m_expression
.
data
();
}
EIGEN_DEVICE_FUNC
constexpr
const
Scalar
*
data
()
const
{
return
m_expression
.
data
();
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
const
{
return
m_expression
.
derived
().
coeffRef
(
rowId
,
colId
);
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
index
)
const
{
return
m_expression
.
coeffRef
(
index
);
}
EIGEN_DEVICE_FUNC
const
internal
::
remove_all_t
<
NestedExpressionType
>&
nestedExpression
()
const
{
return
m_expression
;
}
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index) */
EIGEN_DEVICE_FUNC
void
resize
(
Index
newSize
)
{
m_expression
.
resize
(
newSize
);
}
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index,Index)*/
EIGEN_DEVICE_FUNC
void
resize
(
Index
rows
,
Index
cols
)
{
m_expression
.
resize
(
rows
,
cols
);
}
protected:
NestedExpressionType
m_expression
;
};
}
// end namespace Eigen
#endif // EIGEN_ARRAYWRAPPER_H
eigen-master/Eigen/src/Core/Assign.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ASSIGN_H
#define EIGEN_ASSIGN_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
lazyAssign
(
const
DenseBase
<
OtherDerived
>&
other
)
{
enum
{
SameType
=
internal
::
is_same
<
typename
Derived
::
Scalar
,
typename
OtherDerived
::
Scalar
>::
value
};
EIGEN_STATIC_ASSERT_LVALUE
(
Derived
)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
Derived
,
OtherDerived
)
EIGEN_STATIC_ASSERT
(
SameType
,
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY
)
eigen_assert
(
rows
()
==
other
.
rows
()
&&
cols
()
==
other
.
cols
());
internal
::
call_assignment_no_alias
(
derived
(),
other
.
derived
());
return
derived
();
}
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
operator
=
(
const
DenseBase
<
OtherDerived
>&
other
)
{
internal
::
call_assignment
(
derived
(),
other
.
derived
());
return
derived
();
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
operator
=
(
const
DenseBase
&
other
)
{
internal
::
call_assignment
(
derived
(),
other
.
derived
());
return
derived
();
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
operator
=
(
const
MatrixBase
&
other
)
{
internal
::
call_assignment
(
derived
(),
other
.
derived
());
return
derived
();
}
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
operator
=
(
const
DenseBase
<
OtherDerived
>&
other
)
{
internal
::
call_assignment
(
derived
(),
other
.
derived
());
return
derived
();
}
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
operator
=
(
const
EigenBase
<
OtherDerived
>&
other
)
{
internal
::
call_assignment
(
derived
(),
other
.
derived
());
return
derived
();
}
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
operator
=
(
const
ReturnByValue
<
OtherDerived
>&
other
)
{
other
.
derived
().
evalTo
(
derived
());
return
derived
();
}
}
// end namespace Eigen
#endif // EIGEN_ASSIGN_H
eigen-master/Eigen/src/Core/AssignEvaluator.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ASSIGN_EVALUATOR_H
#define EIGEN_ASSIGN_EVALUATOR_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
// This implementation is based on Assign.h
namespace
internal
{
/***************************************************************************
* Part 1 : the logic deciding a strategy for traversal and unrolling *
***************************************************************************/
// copy_using_evaluator_traits is based on assign_traits
template
<
typename
DstEvaluator
,
typename
SrcEvaluator
,
typename
AssignFunc
,
int
MaxPacketSize
=
Dynamic
>
struct
copy_using_evaluator_traits
{
using
Src
=
typename
SrcEvaluator
::
XprType
;
using
Dst
=
typename
DstEvaluator
::
XprType
;
using
DstScalar
=
typename
Dst
::
Scalar
;
static
constexpr
int
DstFlags
=
DstEvaluator
::
Flags
;
static
constexpr
int
SrcFlags
=
SrcEvaluator
::
Flags
;
public:
static
constexpr
int
DstAlignment
=
DstEvaluator
::
Alignment
;
static
constexpr
int
SrcAlignment
=
SrcEvaluator
::
Alignment
;
static
constexpr
int
JointAlignment
=
plain_enum_min
(
DstAlignment
,
SrcAlignment
);
static
constexpr
bool
DstHasDirectAccess
=
bool
(
DstFlags
&
DirectAccessBit
);
static
constexpr
bool
SrcIsRowMajor
=
bool
(
SrcFlags
&
RowMajorBit
);
static
constexpr
bool
DstIsRowMajor
=
bool
(
DstFlags
&
RowMajorBit
);
static
constexpr
bool
IsVectorAtCompileTime
=
Dst
::
IsVectorAtCompileTime
;
static
constexpr
int
RowsAtCompileTime
=
size_prefer_fixed
(
Src
::
RowsAtCompileTime
,
Dst
::
RowsAtCompileTime
);
static
constexpr
int
ColsAtCompileTime
=
size_prefer_fixed
(
Src
::
ColsAtCompileTime
,
Dst
::
ColsAtCompileTime
);
static
constexpr
int
SizeAtCompileTime
=
size_at_compile_time
(
RowsAtCompileTime
,
ColsAtCompileTime
);
static
constexpr
int
MaxRowsAtCompileTime
=
min_size_prefer_fixed
(
Src
::
MaxRowsAtCompileTime
,
Dst
::
MaxRowsAtCompileTime
);
static
constexpr
int
MaxColsAtCompileTime
=
min_size_prefer_fixed
(
Src
::
MaxColsAtCompileTime
,
Dst
::
MaxColsAtCompileTime
);
static
constexpr
int
MaxSizeAtCompileTime
=
min_size_prefer_fixed
(
Src
::
MaxSizeAtCompileTime
,
Dst
::
MaxSizeAtCompileTime
);
static
constexpr
int
InnerSizeAtCompileTime
=
IsVectorAtCompileTime
?
SizeAtCompileTime
:
DstIsRowMajor
?
ColsAtCompileTime
:
RowsAtCompileTime
;
static
constexpr
int
MaxInnerSizeAtCompileTime
=
IsVectorAtCompileTime
?
MaxSizeAtCompileTime
:
DstIsRowMajor
?
MaxColsAtCompileTime
:
MaxRowsAtCompileTime
;
static
constexpr
int
RestrictedInnerSize
=
min_size_prefer_fixed
(
MaxInnerSizeAtCompileTime
,
MaxPacketSize
);
static
constexpr
int
RestrictedLinearSize
=
min_size_prefer_fixed
(
MaxSizeAtCompileTime
,
MaxPacketSize
);
static
constexpr
int
OuterStride
=
outer_stride_at_compile_time
<
Dst
>::
ret
;
// TODO distinguish between linear traversal and inner-traversals
using
LinearPacketType
=
typename
find_best_packet
<
DstScalar
,
RestrictedLinearSize
>::
type
;
using
InnerPacketType
=
typename
find_best_packet
<
DstScalar
,
RestrictedInnerSize
>::
type
;
static
constexpr
int
LinearPacketSize
=
unpacket_traits
<
LinearPacketType
>::
size
;
static
constexpr
int
InnerPacketSize
=
unpacket_traits
<
InnerPacketType
>::
size
;
public:
static
constexpr
int
LinearRequiredAlignment
=
unpacket_traits
<
LinearPacketType
>::
alignment
;
static
constexpr
int
InnerRequiredAlignment
=
unpacket_traits
<
InnerPacketType
>::
alignment
;
private:
static
constexpr
bool
StorageOrdersAgree
=
DstIsRowMajor
==
SrcIsRowMajor
;
static
constexpr
bool
MightVectorize
=
StorageOrdersAgree
&&
bool
(
DstFlags
&
SrcFlags
&
ActualPacketAccessBit
)
&&
bool
(
functor_traits
<
AssignFunc
>::
PacketAccess
);
static
constexpr
bool
MayInnerVectorize
=
MightVectorize
&&
(
InnerSizeAtCompileTime
!=
Dynamic
)
&&
(
InnerSizeAtCompileTime
%
InnerPacketSize
==
0
)
&&
(
OuterStride
!=
Dynamic
)
&&
(
OuterStride
%
InnerPacketSize
==
0
)
&&
(
EIGEN_UNALIGNED_VECTORIZE
||
JointAlignment
>=
InnerRequiredAlignment
);
static
constexpr
bool
MayLinearize
=
StorageOrdersAgree
&&
(
DstFlags
&
SrcFlags
&
LinearAccessBit
);
static
constexpr
bool
MayLinearVectorize
=
MightVectorize
&&
MayLinearize
&&
DstHasDirectAccess
&&
(
EIGEN_UNALIGNED_VECTORIZE
||
(
DstAlignment
>=
LinearRequiredAlignment
)
||
MaxSizeAtCompileTime
==
Dynamic
)
&&
(
MaxSizeAtCompileTime
==
Dynamic
||
MaxSizeAtCompileTime
>=
LinearPacketSize
);
/* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
so it's only good for large enough sizes. */
static
constexpr
int
InnerSizeThreshold
=
(
EIGEN_UNALIGNED_VECTORIZE
?
1
:
3
)
*
InnerPacketSize
;
static
constexpr
bool
MaySliceVectorize
=
MightVectorize
&&
DstHasDirectAccess
&&
(
MaxInnerSizeAtCompileTime
==
Dynamic
||
MaxInnerSizeAtCompileTime
>=
InnerSizeThreshold
);
/* slice vectorization can be slow, so we only want it if the slices are big, which is
indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
in a fixed-size matrix
However, with EIGEN_UNALIGNED_VECTORIZE and unrolling, slice vectorization is still worth it */
public:
static
constexpr
int
Traversal
=
SizeAtCompileTime
==
0
?
AllAtOnceTraversal
:
(
MayLinearVectorize
&&
(
LinearPacketSize
>
InnerPacketSize
))
?
LinearVectorizedTraversal
:
MayInnerVectorize
?
InnerVectorizedTraversal
:
MayLinearVectorize
?
LinearVectorizedTraversal
:
MaySliceVectorize
?
SliceVectorizedTraversal
:
MayLinearize
?
LinearTraversal
:
DefaultTraversal
;
static
constexpr
bool
Vectorized
=
Traversal
==
InnerVectorizedTraversal
||
Traversal
==
LinearVectorizedTraversal
||
Traversal
==
SliceVectorizedTraversal
;
using
PacketType
=
std
::
conditional_t
<
Traversal
==
LinearVectorizedTraversal
,
LinearPacketType
,
InnerPacketType
>
;
private:
static
constexpr
int
ActualPacketSize
=
Vectorized
?
unpacket_traits
<
PacketType
>::
size
:
1
;
static
constexpr
int
UnrollingLimit
=
EIGEN_UNROLLING_LIMIT
*
ActualPacketSize
;
static
constexpr
int
CoeffReadCost
=
int
(
DstEvaluator
::
CoeffReadCost
)
+
int
(
SrcEvaluator
::
CoeffReadCost
);
static
constexpr
bool
MayUnrollCompletely
=
(
SizeAtCompileTime
!=
Dynamic
)
&&
(
SizeAtCompileTime
*
CoeffReadCost
<=
UnrollingLimit
);
static
constexpr
bool
MayUnrollInner
=
(
InnerSizeAtCompileTime
!=
Dynamic
)
&&
(
InnerSizeAtCompileTime
*
CoeffReadCost
<=
UnrollingLimit
);
public:
static
constexpr
int
Unrolling
=
(
Traversal
==
InnerVectorizedTraversal
||
Traversal
==
DefaultTraversal
)
?
(
MayUnrollCompletely
?
CompleteUnrolling
:
MayUnrollInner
?
InnerUnrolling
:
NoUnrolling
)
:
Traversal
==
LinearVectorizedTraversal
?
(
MayUnrollCompletely
&&
(
EIGEN_UNALIGNED_VECTORIZE
||
(
DstAlignment
>=
LinearRequiredAlignment
))
?
CompleteUnrolling
:
NoUnrolling
)
:
Traversal
==
LinearTraversal
?
(
MayUnrollCompletely
?
CompleteUnrolling
:
NoUnrolling
)
#if EIGEN_UNALIGNED_VECTORIZE
:
Traversal
==
SliceVectorizedTraversal
?
(
MayUnrollInner
?
InnerUnrolling
:
NoUnrolling
)
#endif
:
NoUnrolling
;
static
constexpr
bool
UsePacketSegment
=
has_packet_segment
<
PacketType
>::
value
;
#ifdef EIGEN_DEBUG_ASSIGN
static
void
debug
()
{
std
::
cerr
<<
"DstXpr: "
<<
typeid
(
typename
DstEvaluator
::
XprType
).
name
()
<<
std
::
endl
;
std
::
cerr
<<
"SrcXpr: "
<<
typeid
(
typename
SrcEvaluator
::
XprType
).
name
()
<<
std
::
endl
;
std
::
cerr
.
setf
(
std
::
ios
::
hex
,
std
::
ios
::
basefield
);
std
::
cerr
<<
"DstFlags"
<<
" = "
<<
DstFlags
<<
" ("
<<
demangle_flags
(
DstFlags
)
<<
" )"
<<
std
::
endl
;
std
::
cerr
<<
"SrcFlags"
<<
" = "
<<
SrcFlags
<<
" ("
<<
demangle_flags
(
SrcFlags
)
<<
" )"
<<
std
::
endl
;
std
::
cerr
.
unsetf
(
std
::
ios
::
hex
);
EIGEN_DEBUG_VAR
(
DstAlignment
)
EIGEN_DEBUG_VAR
(
SrcAlignment
)
EIGEN_DEBUG_VAR
(
LinearRequiredAlignment
)
EIGEN_DEBUG_VAR
(
InnerRequiredAlignment
)
EIGEN_DEBUG_VAR
(
JointAlignment
)
EIGEN_DEBUG_VAR
(
InnerSizeAtCompileTime
)
EIGEN_DEBUG_VAR
(
MaxInnerSizeAtCompileTime
)
EIGEN_DEBUG_VAR
(
LinearPacketSize
)
EIGEN_DEBUG_VAR
(
InnerPacketSize
)
EIGEN_DEBUG_VAR
(
ActualPacketSize
)
EIGEN_DEBUG_VAR
(
StorageOrdersAgree
)
EIGEN_DEBUG_VAR
(
MightVectorize
)
EIGEN_DEBUG_VAR
(
MayLinearize
)
EIGEN_DEBUG_VAR
(
MayInnerVectorize
)
EIGEN_DEBUG_VAR
(
MayLinearVectorize
)
EIGEN_DEBUG_VAR
(
MaySliceVectorize
)
std
::
cerr
<<
"Traversal"
<<
" = "
<<
Traversal
<<
" ("
<<
demangle_traversal
(
Traversal
)
<<
")"
<<
std
::
endl
;
EIGEN_DEBUG_VAR
(
SrcEvaluator
::
CoeffReadCost
)
EIGEN_DEBUG_VAR
(
DstEvaluator
::
CoeffReadCost
)
EIGEN_DEBUG_VAR
(
Dst
::
SizeAtCompileTime
)
EIGEN_DEBUG_VAR
(
UnrollingLimit
)
EIGEN_DEBUG_VAR
(
MayUnrollCompletely
)
EIGEN_DEBUG_VAR
(
MayUnrollInner
)
std
::
cerr
<<
"Unrolling"
<<
" = "
<<
Unrolling
<<
" ("
<<
demangle_unrolling
(
Unrolling
)
<<
")"
<<
std
::
endl
;
std
::
cerr
<<
std
::
endl
;
}
#endif
};
/***************************************************************************
* Part 2 : meta-unrollers
***************************************************************************/
/************************
*** Default traversal ***
************************/
template
<
typename
Kernel
,
int
Index_
,
int
Stop
>
struct
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
{
static
constexpr
int
Outer
=
Index_
/
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
static
constexpr
int
Inner
=
Index_
%
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
kernel
.
assignCoeffByOuterInner
(
Outer
,
Inner
);
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
<
Kernel
,
Index_
+
1
,
Stop
>::
run
(
kernel
);
}
};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
<
Kernel
,
Stop
,
Stop
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
)
{}
};
template
<
typename
Kernel
,
int
Index_
,
int
Stop
>
struct
copy_using_evaluator_DefaultTraversal_InnerUnrolling
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
,
Index
outer
)
{
kernel
.
assignCoeffByOuterInner
(
outer
,
Index_
);
copy_using_evaluator_DefaultTraversal_InnerUnrolling
<
Kernel
,
Index_
+
1
,
Stop
>::
run
(
kernel
,
outer
);
}
};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_DefaultTraversal_InnerUnrolling
<
Kernel
,
Stop
,
Stop
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
,
Index
)
{}
};
/***********************
*** Linear traversal ***
***********************/
template
<
typename
Kernel
,
int
Index_
,
int
Stop
>
struct
copy_using_evaluator_LinearTraversal_CompleteUnrolling
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
kernel
.
assignCoeff
(
Index_
);
copy_using_evaluator_LinearTraversal_CompleteUnrolling
<
Kernel
,
Index_
+
1
,
Stop
>::
run
(
kernel
);
}
};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_LinearTraversal_CompleteUnrolling
<
Kernel
,
Stop
,
Stop
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
)
{}
};
/**************************
*** Inner vectorization ***
**************************/
template
<
typename
Kernel
,
int
Index_
,
int
Stop
>
struct
copy_using_evaluator_innervec_CompleteUnrolling
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
Outer
=
Index_
/
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
static
constexpr
int
Inner
=
Index_
%
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
static
constexpr
int
NextIndex
=
Index_
+
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
SrcAlignment
;
static
constexpr
int
DstAlignment
=
Kernel
::
AssignmentTraits
::
DstAlignment
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
)
{
kernel
.
template
assignPacketByOuterInner
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
Outer
,
Inner
);
copy_using_evaluator_innervec_CompleteUnrolling
<
Kernel
,
NextIndex
,
Stop
>::
run
(
kernel
);
}
};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_innervec_CompleteUnrolling
<
Kernel
,
Stop
,
Stop
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
)
{}
};
template
<
typename
Kernel
,
int
Index_
,
int
Stop
,
int
SrcAlignment
,
int
DstAlignment
>
struct
copy_using_evaluator_innervec_InnerUnrolling
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
NextIndex
=
Index_
+
unpacket_traits
<
PacketType
>::
size
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
,
Index
outer
)
{
kernel
.
template
assignPacketByOuterInner
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
outer
,
Index_
);
copy_using_evaluator_innervec_InnerUnrolling
<
Kernel
,
NextIndex
,
Stop
,
SrcAlignment
,
DstAlignment
>::
run
(
kernel
,
outer
);
}
};
template
<
typename
Kernel
,
int
Stop
,
int
SrcAlignment
,
int
DstAlignment
>
struct
copy_using_evaluator_innervec_InnerUnrolling
<
Kernel
,
Stop
,
Stop
,
SrcAlignment
,
DstAlignment
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
,
Index
)
{}
};
template
<
typename
Kernel
,
int
Start
,
int
Stop
,
int
SrcAlignment
,
int
DstAlignment
,
bool
UsePacketSegment
>
struct
copy_using_evaluator_innervec_segment
{
using
PacketType
=
typename
Kernel
::
PacketType
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
,
Index
outer
)
{
kernel
.
template
assignPacketSegmentByOuterInner
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
outer
,
Start
,
0
,
Stop
-
Start
);
}
};
template
<
typename
Kernel
,
int
Start
,
int
Stop
,
int
SrcAlignment
,
int
DstAlignment
>
struct
copy_using_evaluator_innervec_segment
<
Kernel
,
Start
,
Stop
,
SrcAlignment
,
DstAlignment
,
/*UsePacketSegment*/
false
>
:
copy_using_evaluator_DefaultTraversal_InnerUnrolling
<
Kernel
,
Start
,
Stop
>
{};
template
<
typename
Kernel
,
int
Stop
,
int
SrcAlignment
,
int
DstAlignment
>
struct
copy_using_evaluator_innervec_segment
<
Kernel
,
Stop
,
Stop
,
SrcAlignment
,
DstAlignment
,
/*UsePacketSegment*/
true
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
,
Index
)
{}
};
template
<
typename
Kernel
,
int
Stop
,
int
SrcAlignment
,
int
DstAlignment
>
struct
copy_using_evaluator_innervec_segment
<
Kernel
,
Stop
,
Stop
,
SrcAlignment
,
DstAlignment
,
/*UsePacketSegment*/
false
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
,
Index
)
{}
};
/***************************************************************************
* Part 3 : implementation of all cases
***************************************************************************/
// dense_assignment_loop is based on assign_impl
template
<
typename
Kernel
,
int
Traversal
=
Kernel
::
AssignmentTraits
::
Traversal
,
int
Unrolling
=
Kernel
::
AssignmentTraits
::
Unrolling
>
struct
dense_assignment_loop_impl
;
template
<
typename
Kernel
,
int
Traversal
=
Kernel
::
AssignmentTraits
::
Traversal
,
int
Unrolling
=
Kernel
::
AssignmentTraits
::
Unrolling
>
struct
dense_assignment_loop
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
#ifdef __cpp_lib_is_constant_evaluated
if
(
internal
::
is_constant_evaluated
())
dense_assignment_loop_impl
<
Kernel
,
Traversal
==
AllAtOnceTraversal
?
AllAtOnceTraversal
:
DefaultTraversal
,
NoUnrolling
>::
run
(
kernel
);
else
#endif
dense_assignment_loop_impl
<
Kernel
,
Traversal
,
Unrolling
>::
run
(
kernel
);
}
};
/************************
***** Special Cases *****
************************/
// Zero-sized assignment is a no-op.
template
<
typename
Kernel
,
int
Unrolling
>
struct
dense_assignment_loop_impl
<
Kernel
,
AllAtOnceTraversal
,
Unrolling
>
{
static
constexpr
int
SizeAtCompileTime
=
Kernel
::
AssignmentTraits
::
SizeAtCompileTime
;
EIGEN_DEVICE_FUNC
static
void
EIGEN_STRONG_INLINE
constexpr
run
(
Kernel
&
/*kernel*/
)
{
EIGEN_STATIC_ASSERT
(
SizeAtCompileTime
==
0
,
EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT
)
}
};
/************************
*** Default traversal ***
************************/
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
DefaultTraversal
,
NoUnrolling
>
{
EIGEN_DEVICE_FUNC
static
void
EIGEN_STRONG_INLINE
constexpr
run
(
Kernel
&
kernel
)
{
for
(
Index
outer
=
0
;
outer
<
kernel
.
outerSize
();
++
outer
)
{
for
(
Index
inner
=
0
;
inner
<
kernel
.
innerSize
();
++
inner
)
{
kernel
.
assignCoeffByOuterInner
(
outer
,
inner
);
}
}
}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
DefaultTraversal
,
CompleteUnrolling
>
{
static
constexpr
int
SizeAtCompileTime
=
Kernel
::
AssignmentTraits
::
SizeAtCompileTime
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
<
Kernel
,
0
,
SizeAtCompileTime
>::
run
(
kernel
);
}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
DefaultTraversal
,
InnerUnrolling
>
{
static
constexpr
int
InnerSizeAtCompileTime
=
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
const
Index
outerSize
=
kernel
.
outerSize
();
for
(
Index
outer
=
0
;
outer
<
outerSize
;
++
outer
)
copy_using_evaluator_DefaultTraversal_InnerUnrolling
<
Kernel
,
0
,
InnerSizeAtCompileTime
>::
run
(
kernel
,
outer
);
}
};
/***************************
*** Linear vectorization ***
***************************/
// The goal of unaligned_dense_assignment_loop is simply to factorize the handling
// of the non vectorizable beginning and ending parts
template
<
typename
PacketType
,
int
DstAlignment
,
int
SrcAlignment
,
bool
UsePacketSegment
,
bool
Skip
>
struct
unaligned_dense_assignment_loop
{
// if Skip == true, then do nothing
template
<
typename
Kernel
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
/*kernel*/
,
Index
/*start*/
,
Index
/*end*/
)
{}
template
<
typename
Kernel
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
/*kernel*/
,
Index
/*outer*/
,
Index
/*innerStart*/
,
Index
/*innerEnd*/
)
{}
};
template
<
typename
PacketType
,
int
DstAlignment
,
int
SrcAlignment
>
struct
unaligned_dense_assignment_loop
<
PacketType
,
DstAlignment
,
SrcAlignment
,
/*UsePacketSegment*/
true
,
/*Skip*/
false
>
{
template
<
typename
Kernel
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
,
Index
start
,
Index
end
)
{
Index
count
=
end
-
start
;
eigen_assert
(
count
<=
unpacket_traits
<
PacketType
>::
size
);
if
(
count
>
0
)
kernel
.
template
assignPacketSegment
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
start
,
0
,
count
);
}
template
<
typename
Kernel
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
,
Index
outer
,
Index
start
,
Index
end
)
{
Index
count
=
end
-
start
;
eigen_assert
(
count
<=
unpacket_traits
<
PacketType
>::
size
);
if
(
count
>
0
)
kernel
.
template
assignPacketSegmentByOuterInner
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
outer
,
start
,
0
,
count
);
}
};
template
<
typename
PacketType
,
int
DstAlignment
,
int
SrcAlignment
>
struct
unaligned_dense_assignment_loop
<
PacketType
,
DstAlignment
,
SrcAlignment
,
/*UsePacketSegment*/
false
,
/*Skip*/
false
>
{
template
<
typename
Kernel
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
,
Index
start
,
Index
end
)
{
for
(
Index
index
=
start
;
index
<
end
;
++
index
)
kernel
.
assignCoeff
(
index
);
}
template
<
typename
Kernel
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
,
Index
outer
,
Index
innerStart
,
Index
innerEnd
)
{
for
(
Index
inner
=
innerStart
;
inner
<
innerEnd
;
++
inner
)
kernel
.
assignCoeffByOuterInner
(
outer
,
inner
);
}
};
template
<
typename
Kernel
,
int
Index_
,
int
Stop
>
struct
copy_using_evaluator_linearvec_CompleteUnrolling
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
SrcAlignment
;
static
constexpr
int
DstAlignment
=
Kernel
::
AssignmentTraits
::
DstAlignment
;
static
constexpr
int
NextIndex
=
Index_
+
unpacket_traits
<
PacketType
>::
size
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
)
{
kernel
.
template
assignPacket
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
Index_
);
copy_using_evaluator_linearvec_CompleteUnrolling
<
Kernel
,
NextIndex
,
Stop
>::
run
(
kernel
);
}
};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_linearvec_CompleteUnrolling
<
Kernel
,
Stop
,
Stop
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
)
{}
};
template
<
typename
Kernel
,
int
Index_
,
int
Stop
,
bool
UsePacketSegment
>
struct
copy_using_evaluator_linearvec_segment
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
SrcAlignment
;
static
constexpr
int
DstAlignment
=
Kernel
::
AssignmentTraits
::
DstAlignment
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
)
{
kernel
.
template
assignPacketSegment
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
Index_
,
0
,
Stop
-
Index_
);
}
};
template
<
typename
Kernel
,
int
Index_
,
int
Stop
>
struct
copy_using_evaluator_linearvec_segment
<
Kernel
,
Index_
,
Stop
,
/*UsePacketSegment*/
false
>
:
copy_using_evaluator_LinearTraversal_CompleteUnrolling
<
Kernel
,
Index_
,
Stop
>
{};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_linearvec_segment
<
Kernel
,
Stop
,
Stop
,
/*UsePacketSegment*/
true
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
)
{}
};
template
<
typename
Kernel
,
int
Stop
>
struct
copy_using_evaluator_linearvec_segment
<
Kernel
,
Stop
,
Stop
,
/*UsePacketSegment*/
false
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
)
{}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
LinearVectorizedTraversal
,
NoUnrolling
>
{
using
Scalar
=
typename
Kernel
::
Scalar
;
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
JointAlignment
;
static
constexpr
int
DstAlignment
=
plain_enum_max
(
Kernel
::
AssignmentTraits
::
DstAlignment
,
alignof
(
Scalar
));
static
constexpr
int
RequestedAlignment
=
unpacket_traits
<
PacketType
>::
alignment
;
static
constexpr
bool
Alignable
=
(
DstAlignment
>=
RequestedAlignment
)
||
((
RequestedAlignment
-
DstAlignment
)
%
sizeof
(
Scalar
)
==
0
);
static
constexpr
int
Alignment
=
Alignable
?
RequestedAlignment
:
DstAlignment
;
static
constexpr
bool
DstIsAligned
=
DstAlignment
>=
Alignment
;
static
constexpr
bool
UsePacketSegment
=
Kernel
::
AssignmentTraits
::
UsePacketSegment
;
using
head_loop
=
unaligned_dense_assignment_loop
<
PacketType
,
DstAlignment
,
SrcAlignment
,
UsePacketSegment
,
DstIsAligned
>
;
using
tail_loop
=
unaligned_dense_assignment_loop
<
PacketType
,
Alignment
,
SrcAlignment
,
UsePacketSegment
,
false
>
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
const
Index
size
=
kernel
.
size
();
const
Index
alignedStart
=
DstIsAligned
?
0
:
first_aligned
<
Alignment
>
(
kernel
.
dstDataPtr
(),
size
);
const
Index
alignedEnd
=
alignedStart
+
numext
::
round_down
(
size
-
alignedStart
,
PacketSize
);
head_loop
::
run
(
kernel
,
0
,
alignedStart
);
for
(
Index
index
=
alignedStart
;
index
<
alignedEnd
;
index
+=
PacketSize
)
kernel
.
template
assignPacket
<
Alignment
,
SrcAlignment
,
PacketType
>(
index
);
tail_loop
::
run
(
kernel
,
alignedEnd
,
size
);
}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
LinearVectorizedTraversal
,
CompleteUnrolling
>
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
Size
=
Kernel
::
AssignmentTraits
::
SizeAtCompileTime
;
static
constexpr
int
AlignedSize
=
numext
::
round_down
(
Size
,
PacketSize
);
static
constexpr
bool
UsePacketSegment
=
Kernel
::
AssignmentTraits
::
UsePacketSegment
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
copy_using_evaluator_linearvec_CompleteUnrolling
<
Kernel
,
0
,
AlignedSize
>::
run
(
kernel
);
copy_using_evaluator_linearvec_segment
<
Kernel
,
AlignedSize
,
Size
,
UsePacketSegment
>::
run
(
kernel
);
}
};
/**************************
*** Inner vectorization ***
**************************/
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
InnerVectorizedTraversal
,
NoUnrolling
>
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
JointAlignment
;
static
constexpr
int
DstAlignment
=
Kernel
::
AssignmentTraits
::
DstAlignment
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
const
Index
innerSize
=
kernel
.
innerSize
();
const
Index
outerSize
=
kernel
.
outerSize
();
for
(
Index
outer
=
0
;
outer
<
outerSize
;
++
outer
)
for
(
Index
inner
=
0
;
inner
<
innerSize
;
inner
+=
PacketSize
)
kernel
.
template
assignPacketByOuterInner
<
DstAlignment
,
SrcAlignment
,
PacketType
>(
outer
,
inner
);
}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
InnerVectorizedTraversal
,
CompleteUnrolling
>
{
static
constexpr
int
SizeAtCompileTime
=
Kernel
::
AssignmentTraits
::
SizeAtCompileTime
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
)
{
copy_using_evaluator_innervec_CompleteUnrolling
<
Kernel
,
0
,
SizeAtCompileTime
>::
run
(
kernel
);
}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
InnerVectorizedTraversal
,
InnerUnrolling
>
{
static
constexpr
int
InnerSize
=
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
SrcAlignment
;
static
constexpr
int
DstAlignment
=
Kernel
::
AssignmentTraits
::
DstAlignment
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
Kernel
&
kernel
)
{
const
Index
outerSize
=
kernel
.
outerSize
();
for
(
Index
outer
=
0
;
outer
<
outerSize
;
++
outer
)
copy_using_evaluator_innervec_InnerUnrolling
<
Kernel
,
0
,
InnerSize
,
SrcAlignment
,
DstAlignment
>::
run
(
kernel
,
outer
);
}
};
/***********************
*** Linear traversal ***
***********************/
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
LinearTraversal
,
NoUnrolling
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
const
Index
size
=
kernel
.
size
();
for
(
Index
i
=
0
;
i
<
size
;
++
i
)
kernel
.
assignCoeff
(
i
);
}
};
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
LinearTraversal
,
CompleteUnrolling
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
copy_using_evaluator_LinearTraversal_CompleteUnrolling
<
Kernel
,
0
,
Kernel
::
AssignmentTraits
::
SizeAtCompileTime
>::
run
(
kernel
);
}
};
/**************************
*** Slice vectorization ***
***************************/
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
SliceVectorizedTraversal
,
NoUnrolling
>
{
using
Scalar
=
typename
Kernel
::
Scalar
;
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
SrcAlignment
=
Kernel
::
AssignmentTraits
::
JointAlignment
;
static
constexpr
int
DstAlignment
=
plain_enum_max
(
Kernel
::
AssignmentTraits
::
DstAlignment
,
alignof
(
Scalar
));
static
constexpr
int
RequestedAlignment
=
unpacket_traits
<
PacketType
>::
alignment
;
static
constexpr
bool
Alignable
=
(
DstAlignment
>=
RequestedAlignment
)
||
((
RequestedAlignment
-
DstAlignment
)
%
sizeof
(
Scalar
)
==
0
);
static
constexpr
int
Alignment
=
Alignable
?
RequestedAlignment
:
DstAlignment
;
static
constexpr
bool
DstIsAligned
=
DstAlignment
>=
Alignment
;
static
constexpr
bool
UsePacketSegment
=
Kernel
::
AssignmentTraits
::
UsePacketSegment
;
using
head_loop
=
unaligned_dense_assignment_loop
<
PacketType
,
DstAlignment
,
Unaligned
,
UsePacketSegment
,
!
Alignable
>
;
using
tail_loop
=
unaligned_dense_assignment_loop
<
PacketType
,
Alignment
,
Unaligned
,
UsePacketSegment
,
false
>
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
const
Scalar
*
dst_ptr
=
kernel
.
dstDataPtr
();
const
Index
innerSize
=
kernel
.
innerSize
();
const
Index
outerSize
=
kernel
.
outerSize
();
const
Index
alignedStep
=
Alignable
?
(
PacketSize
-
kernel
.
outerStride
()
%
PacketSize
)
%
PacketSize
:
0
;
Index
alignedStart
=
((
!
Alignable
)
||
DstIsAligned
)
?
0
:
internal
::
first_aligned
<
Alignment
>
(
dst_ptr
,
innerSize
);
for
(
Index
outer
=
0
;
outer
<
outerSize
;
++
outer
)
{
const
Index
alignedEnd
=
alignedStart
+
numext
::
round_down
(
innerSize
-
alignedStart
,
PacketSize
);
head_loop
::
run
(
kernel
,
outer
,
0
,
alignedStart
);
// do the vectorizable part of the assignment
for
(
Index
inner
=
alignedStart
;
inner
<
alignedEnd
;
inner
+=
PacketSize
)
kernel
.
template
assignPacketByOuterInner
<
Alignment
,
Unaligned
,
PacketType
>(
outer
,
inner
);
tail_loop
::
run
(
kernel
,
outer
,
alignedEnd
,
innerSize
);
alignedStart
=
numext
::
mini
((
alignedStart
+
alignedStep
)
%
PacketSize
,
innerSize
);
}
}
};
#if EIGEN_UNALIGNED_VECTORIZE
template
<
typename
Kernel
>
struct
dense_assignment_loop_impl
<
Kernel
,
SliceVectorizedTraversal
,
InnerUnrolling
>
{
using
PacketType
=
typename
Kernel
::
PacketType
;
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
InnerSize
=
Kernel
::
AssignmentTraits
::
InnerSizeAtCompileTime
;
static
constexpr
int
VectorizableSize
=
numext
::
round_down
(
InnerSize
,
PacketSize
);
static
constexpr
bool
UsePacketSegment
=
Kernel
::
AssignmentTraits
::
UsePacketSegment
;
using
packet_loop
=
copy_using_evaluator_innervec_InnerUnrolling
<
Kernel
,
0
,
VectorizableSize
,
Unaligned
,
Unaligned
>
;
using
packet_segment_loop
=
copy_using_evaluator_innervec_segment
<
Kernel
,
VectorizableSize
,
InnerSize
,
Unaligned
,
Unaligned
,
UsePacketSegment
>
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
)
{
for
(
Index
outer
=
0
;
outer
<
kernel
.
outerSize
();
++
outer
)
{
packet_loop
::
run
(
kernel
,
outer
);
packet_segment_loop
::
run
(
kernel
,
outer
);
}
}
};
#endif
/***************************************************************************
* Part 4 : Generic dense assignment kernel
***************************************************************************/
// This class generalize the assignment of a coefficient (or packet) from one dense evaluator
// to another dense writable evaluator.
// It is parametrized by the two evaluators, and the actual assignment functor.
// This abstraction level permits to keep the evaluation loops as simple and as generic as possible.
// One can customize the assignment using this generic dense_assignment_kernel with different
// functors, or by completely overloading it, by-passing a functor.
template
<
typename
DstEvaluatorTypeT
,
typename
SrcEvaluatorTypeT
,
typename
Functor
,
int
Version
=
Specialized
>
class
generic_dense_assignment_kernel
{
protected:
typedef
typename
DstEvaluatorTypeT
::
XprType
DstXprType
;
typedef
typename
SrcEvaluatorTypeT
::
XprType
SrcXprType
;
public:
typedef
DstEvaluatorTypeT
DstEvaluatorType
;
typedef
SrcEvaluatorTypeT
SrcEvaluatorType
;
typedef
typename
DstEvaluatorType
::
Scalar
Scalar
;
typedef
copy_using_evaluator_traits
<
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
>
AssignmentTraits
;
typedef
typename
AssignmentTraits
::
PacketType
PacketType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
generic_dense_assignment_kernel
(
DstEvaluatorType
&
dst
,
const
SrcEvaluatorType
&
src
,
const
Functor
&
func
,
DstXprType
&
dstExpr
)
:
m_dst
(
dst
),
m_src
(
src
),
m_functor
(
func
),
m_dstExpr
(
dstExpr
)
{
#ifdef EIGEN_DEBUG_ASSIGN
AssignmentTraits
::
debug
();
#endif
}
EIGEN_DEVICE_FUNC
constexpr
Index
size
()
const
noexcept
{
return
m_dstExpr
.
size
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
innerSize
()
const
noexcept
{
return
m_dstExpr
.
innerSize
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerSize
()
const
noexcept
{
return
m_dstExpr
.
outerSize
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
rows
()
const
noexcept
{
return
m_dstExpr
.
rows
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
m_dstExpr
.
cols
();
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
noexcept
{
return
m_dstExpr
.
outerStride
();
}
EIGEN_DEVICE_FUNC
DstEvaluatorType
&
dstEvaluator
()
noexcept
{
return
m_dst
;
}
EIGEN_DEVICE_FUNC
const
SrcEvaluatorType
&
srcEvaluator
()
const
noexcept
{
return
m_src
;
}
/// Assign src(row,col) to dst(row,col) through the assignment functor.
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
assignCoeff
(
Index
row
,
Index
col
)
{
m_functor
.
assignCoeff
(
m_dst
.
coeffRef
(
row
,
col
),
m_src
.
coeff
(
row
,
col
));
}
/// \sa assignCoeff(Index,Index)
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignCoeff
(
Index
index
)
{
m_functor
.
assignCoeff
(
m_dst
.
coeffRef
(
index
),
m_src
.
coeff
(
index
));
}
/// \sa assignCoeff(Index,Index)
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
assignCoeffByOuterInner
(
Index
outer
,
Index
inner
)
{
Index
row
=
rowIndexByOuterInner
(
outer
,
inner
);
Index
col
=
colIndexByOuterInner
(
outer
,
inner
);
assignCoeff
(
row
,
col
);
}
template
<
int
StoreMode
,
int
LoadMode
,
typename
Packet
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignPacket
(
Index
row
,
Index
col
)
{
m_functor
.
template
assignPacket
<
StoreMode
>(
&
m_dst
.
coeffRef
(
row
,
col
),
m_src
.
template
packet
<
LoadMode
,
Packet
>(
row
,
col
));
}
template
<
int
StoreMode
,
int
LoadMode
,
typename
Packet
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignPacket
(
Index
index
)
{
m_functor
.
template
assignPacket
<
StoreMode
>(
&
m_dst
.
coeffRef
(
index
),
m_src
.
template
packet
<
LoadMode
,
Packet
>(
index
));
}
template
<
int
StoreMode
,
int
LoadMode
,
typename
Packet
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignPacketByOuterInner
(
Index
outer
,
Index
inner
)
{
Index
row
=
rowIndexByOuterInner
(
outer
,
inner
);
Index
col
=
colIndexByOuterInner
(
outer
,
inner
);
assignPacket
<
StoreMode
,
LoadMode
,
Packet
>
(
row
,
col
);
}
template
<
int
StoreMode
,
int
LoadMode
,
typename
Packet
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignPacketSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
{
m_functor
.
template
assignPacketSegment
<
StoreMode
>(
&
m_dst
.
coeffRef
(
row
,
col
),
m_src
.
template
packetSegment
<
LoadMode
,
Packet
>(
row
,
col
,
begin
,
count
),
begin
,
count
);
}
template
<
int
StoreMode
,
int
LoadMode
,
typename
Packet
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignPacketSegment
(
Index
index
,
Index
begin
,
Index
count
)
{
m_functor
.
template
assignPacketSegment
<
StoreMode
>(
&
m_dst
.
coeffRef
(
index
),
m_src
.
template
packetSegment
<
LoadMode
,
Packet
>(
index
,
begin
,
count
),
begin
,
count
);
}
template
<
int
StoreMode
,
int
LoadMode
,
typename
Packet
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
assignPacketSegmentByOuterInner
(
Index
outer
,
Index
inner
,
Index
begin
,
Index
count
)
{
Index
row
=
rowIndexByOuterInner
(
outer
,
inner
);
Index
col
=
colIndexByOuterInner
(
outer
,
inner
);
assignPacketSegment
<
StoreMode
,
LoadMode
,
Packet
>
(
row
,
col
,
begin
,
count
);
}
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
Index
rowIndexByOuterInner
(
Index
outer
,
Index
inner
)
{
typedef
typename
DstEvaluatorType
::
ExpressionTraits
Traits
;
return
int
(
Traits
::
RowsAtCompileTime
)
==
1
?
0
:
int
(
Traits
::
ColsAtCompileTime
)
==
1
?
inner
:
int
(
DstEvaluatorType
::
Flags
)
&
RowMajorBit
?
outer
:
inner
;
}
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
Index
colIndexByOuterInner
(
Index
outer
,
Index
inner
)
{
typedef
typename
DstEvaluatorType
::
ExpressionTraits
Traits
;
return
int
(
Traits
::
ColsAtCompileTime
)
==
1
?
0
:
int
(
Traits
::
RowsAtCompileTime
)
==
1
?
inner
:
int
(
DstEvaluatorType
::
Flags
)
&
RowMajorBit
?
inner
:
outer
;
}
EIGEN_DEVICE_FUNC
const
Scalar
*
dstDataPtr
()
const
{
return
m_dstExpr
.
data
();
}
protected:
DstEvaluatorType
&
m_dst
;
const
SrcEvaluatorType
&
m_src
;
const
Functor
&
m_functor
;
// TODO find a way to avoid the needs of the original expression
DstXprType
&
m_dstExpr
;
};
// Special kernel used when computing small products whose operands have dynamic dimensions. It ensures that the
// PacketSize used is no larger than 4, thereby increasing the chance that vectorized instructions will be used
// when computing the product.
template
<
typename
DstEvaluatorTypeT
,
typename
SrcEvaluatorTypeT
,
typename
Functor
>
class
restricted_packet_dense_assignment_kernel
:
public
generic_dense_assignment_kernel
<
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
,
BuiltIn
>
{
protected:
typedef
generic_dense_assignment_kernel
<
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
,
BuiltIn
>
Base
;
public:
typedef
typename
Base
::
Scalar
Scalar
;
typedef
typename
Base
::
DstXprType
DstXprType
;
typedef
copy_using_evaluator_traits
<
DstEvaluatorTypeT
,
SrcEvaluatorTypeT
,
Functor
,
4
>
AssignmentTraits
;
typedef
typename
AssignmentTraits
::
PacketType
PacketType
;
EIGEN_DEVICE_FUNC
restricted_packet_dense_assignment_kernel
(
DstEvaluatorTypeT
&
dst
,
const
SrcEvaluatorTypeT
&
src
,
const
Functor
&
func
,
DstXprType
&
dstExpr
)
:
Base
(
dst
,
src
,
func
,
dstExpr
)
{}
};
/***************************************************************************
* Part 5 : Entry point for dense rectangular assignment
***************************************************************************/
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize_if_allowed
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
/*func*/
)
{
EIGEN_ONLY_USED_FOR_DEBUG
(
dst
);
EIGEN_ONLY_USED_FOR_DEBUG
(
src
);
eigen_assert
(
dst
.
rows
()
==
src
.
rows
()
&&
dst
.
cols
()
==
src
.
cols
());
}
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
T1
,
typename
T2
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize_if_allowed
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
assign_op
<
T1
,
T2
>&
/*func*/
)
{
Index
dstRows
=
src
.
rows
();
Index
dstCols
=
src
.
cols
();
if
(((
dst
.
rows
()
!=
dstRows
)
||
(
dst
.
cols
()
!=
dstCols
)))
dst
.
resize
(
dstRows
,
dstCols
);
eigen_assert
(
dst
.
rows
()
==
dstRows
&&
dst
.
cols
()
==
dstCols
);
}
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_dense_assignment_loop
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
func
)
{
typedef
evaluator
<
DstXprType
>
DstEvaluatorType
;
typedef
evaluator
<
SrcXprType
>
SrcEvaluatorType
;
SrcEvaluatorType
srcEvaluator
(
src
);
// NOTE To properly handle A = (A*A.transpose())/s with A rectangular,
// we need to resize the destination after the source evaluator has been created.
resize_if_allowed
(
dst
,
src
,
func
);
DstEvaluatorType
dstEvaluator
(
dst
);
typedef
generic_dense_assignment_kernel
<
DstEvaluatorType
,
SrcEvaluatorType
,
Functor
>
Kernel
;
Kernel
kernel
(
dstEvaluator
,
srcEvaluator
,
func
,
dst
.
const_cast_derived
());
dense_assignment_loop
<
Kernel
>::
run
(
kernel
);
}
template
<
typename
DstXprType
,
typename
SrcXprType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
call_dense_assignment_loop
(
DstXprType
&
dst
,
const
SrcXprType
&
src
)
{
call_dense_assignment_loop
(
dst
,
src
,
internal
::
assign_op
<
typename
DstXprType
::
Scalar
,
typename
SrcXprType
::
Scalar
>
());
}
/***************************************************************************
* Part 6 : Generic assignment
***************************************************************************/
// Based on the respective shapes of the destination and source,
// the class AssignmentKind determine the kind of assignment mechanism.
// AssignmentKind must define a Kind typedef.
template
<
typename
DstShape
,
typename
SrcShape
>
struct
AssignmentKind
;
// Assignment kind defined in this file:
struct
Dense2Dense
{};
struct
EigenBase2EigenBase
{};
template
<
typename
,
typename
>
struct
AssignmentKind
{
typedef
EigenBase2EigenBase
Kind
;
};
template
<
>
struct
AssignmentKind
<
DenseShape
,
DenseShape
>
{
typedef
Dense2Dense
Kind
;
};
// This is the main assignment class
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
,
typename
Kind
=
typename
AssignmentKind
<
typename
evaluator_traits
<
DstXprType
>
::
Shape
,
typename
evaluator_traits
<
SrcXprType
>::
Shape
>::
Kind
,
typename
EnableIf
=
void
>
struct
Assignment
;
// The only purpose of this call_assignment() function is to deal with noalias() / "assume-aliasing" and automatic
// transposition. Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite
// complicated. So this intermediate function removes everything related to "assume-aliasing" such that Assignment does
// not has to bother about these annoying details.
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment
(
Dst
&
dst
,
const
Src
&
src
)
{
call_assignment
(
dst
,
src
,
internal
::
assign_op
<
typename
Dst
::
Scalar
,
typename
Src
::
Scalar
>
());
}
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
call_assignment
(
const
Dst
&
dst
,
const
Src
&
src
)
{
call_assignment
(
dst
,
src
,
internal
::
assign_op
<
typename
Dst
::
Scalar
,
typename
Src
::
Scalar
>
());
}
// Deal with "assume-aliasing"
template
<
typename
Dst
,
typename
Src
,
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment
(
Dst
&
dst
,
const
Src
&
src
,
const
Func
&
func
,
std
::
enable_if_t
<
evaluator_assume_aliasing
<
Src
>::
value
,
void
*>
=
0
)
{
typename
plain_matrix_type
<
Src
>::
type
tmp
(
src
);
call_assignment_no_alias
(
dst
,
tmp
,
func
);
}
template
<
typename
Dst
,
typename
Src
,
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment
(
Dst
&
dst
,
const
Src
&
src
,
const
Func
&
func
,
std
::
enable_if_t
<!
evaluator_assume_aliasing
<
Src
>::
value
,
void
*>
=
0
)
{
call_assignment_no_alias
(
dst
,
src
,
func
);
}
// by-pass "assume-aliasing"
// When there is no aliasing, we require that 'dst' has been properly resized
template
<
typename
Dst
,
template
<
typename
>
class
StorageBase
,
typename
Src
,
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment
(
NoAlias
<
Dst
,
StorageBase
>&
dst
,
const
Src
&
src
,
const
Func
&
func
)
{
call_assignment_no_alias
(
dst
.
expression
(),
src
,
func
);
}
template
<
typename
Dst
,
typename
Src
,
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment_no_alias
(
Dst
&
dst
,
const
Src
&
src
,
const
Func
&
func
)
{
enum
{
NeedToTranspose
=
((
int
(
Dst
::
RowsAtCompileTime
)
==
1
&&
int
(
Src
::
ColsAtCompileTime
)
==
1
)
||
(
int
(
Dst
::
ColsAtCompileTime
)
==
1
&&
int
(
Src
::
RowsAtCompileTime
)
==
1
))
&&
int
(
Dst
::
SizeAtCompileTime
)
!=
1
};
typedef
std
::
conditional_t
<
NeedToTranspose
,
Transpose
<
Dst
>
,
Dst
>
ActualDstTypeCleaned
;
typedef
std
::
conditional_t
<
NeedToTranspose
,
Transpose
<
Dst
>
,
Dst
&>
ActualDstType
;
ActualDstType
actualDst
(
dst
);
// TODO check whether this is the right place to perform these checks:
EIGEN_STATIC_ASSERT_LVALUE
(
Dst
)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
ActualDstTypeCleaned
,
Src
)
EIGEN_CHECK_BINARY_COMPATIBILIY
(
Func
,
typename
ActualDstTypeCleaned
::
Scalar
,
typename
Src
::
Scalar
);
Assignment
<
ActualDstTypeCleaned
,
Src
,
Func
>::
run
(
actualDst
,
src
,
func
);
}
template
<
typename
Dst
,
typename
Src
,
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
call_restricted_packet_assignment_no_alias
(
Dst
&
dst
,
const
Src
&
src
,
const
Func
&
func
)
{
typedef
evaluator
<
Dst
>
DstEvaluatorType
;
typedef
evaluator
<
Src
>
SrcEvaluatorType
;
typedef
restricted_packet_dense_assignment_kernel
<
DstEvaluatorType
,
SrcEvaluatorType
,
Func
>
Kernel
;
EIGEN_STATIC_ASSERT_LVALUE
(
Dst
)
EIGEN_CHECK_BINARY_COMPATIBILIY
(
Func
,
typename
Dst
::
Scalar
,
typename
Src
::
Scalar
);
SrcEvaluatorType
srcEvaluator
(
src
);
resize_if_allowed
(
dst
,
src
,
func
);
DstEvaluatorType
dstEvaluator
(
dst
);
Kernel
kernel
(
dstEvaluator
,
srcEvaluator
,
func
,
dst
.
const_cast_derived
());
dense_assignment_loop
<
Kernel
>::
run
(
kernel
);
}
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment_no_alias
(
Dst
&
dst
,
const
Src
&
src
)
{
call_assignment_no_alias
(
dst
,
src
,
internal
::
assign_op
<
typename
Dst
::
Scalar
,
typename
Src
::
Scalar
>
());
}
template
<
typename
Dst
,
typename
Src
,
typename
Func
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment_no_alias_no_transpose
(
Dst
&
dst
,
const
Src
&
src
,
const
Func
&
func
)
{
// TODO check whether this is the right place to perform these checks:
EIGEN_STATIC_ASSERT_LVALUE
(
Dst
)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
Dst
,
Src
)
EIGEN_CHECK_BINARY_COMPATIBILIY
(
Func
,
typename
Dst
::
Scalar
,
typename
Src
::
Scalar
);
Assignment
<
Dst
,
Src
,
Func
>::
run
(
dst
,
src
,
func
);
}
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment_no_alias_no_transpose
(
Dst
&
dst
,
const
Src
&
src
)
{
call_assignment_no_alias_no_transpose
(
dst
,
src
,
internal
::
assign_op
<
typename
Dst
::
Scalar
,
typename
Src
::
Scalar
>
());
}
// forward declaration
template
<
typename
Dst
,
typename
Src
>
EIGEN_DEVICE_FUNC
void
check_for_aliasing
(
const
Dst
&
dst
,
const
Src
&
src
);
// Generic Dense to Dense assignment
// Note that the last template argument "Weak" is needed to make it possible to perform
// both partial specialization+SFINAE without ambiguous specialization
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
,
typename
Weak
>
struct
Assignment
<
DstXprType
,
SrcXprType
,
Functor
,
Dense2Dense
,
Weak
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
constexpr
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
func
)
{
#ifndef EIGEN_NO_DEBUG
if
(
!
internal
::
is_constant_evaluated
())
{
internal
::
check_for_aliasing
(
dst
,
src
);
}
#endif
call_dense_assignment_loop
(
dst
,
src
,
func
);
}
};
template
<
typename
DstXprType
,
typename
SrcPlainObject
,
typename
Weak
>
struct
Assignment
<
DstXprType
,
CwiseNullaryOp
<
scalar_constant_op
<
typename
DstXprType
::
Scalar
>
,
SrcPlainObject
>
,
assign_op
<
typename
DstXprType
::
Scalar
,
typename
DstXprType
::
Scalar
>
,
Dense2Dense
,
Weak
>
{
using
Scalar
=
typename
DstXprType
::
Scalar
;
using
NullaryOp
=
scalar_constant_op
<
Scalar
>
;
using
SrcXprType
=
CwiseNullaryOp
<
NullaryOp
,
SrcPlainObject
>
;
using
Functor
=
assign_op
<
Scalar
,
Scalar
>
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
/*func*/
)
{
eigen_fill_impl
<
DstXprType
>::
run
(
dst
,
src
);
}
};
template
<
typename
DstXprType
,
typename
SrcPlainObject
,
typename
Weak
>
struct
Assignment
<
DstXprType
,
CwiseNullaryOp
<
scalar_zero_op
<
typename
DstXprType
::
Scalar
>
,
SrcPlainObject
>
,
assign_op
<
typename
DstXprType
::
Scalar
,
typename
DstXprType
::
Scalar
>
,
Dense2Dense
,
Weak
>
{
using
Scalar
=
typename
DstXprType
::
Scalar
;
using
NullaryOp
=
scalar_zero_op
<
Scalar
>
;
using
SrcXprType
=
CwiseNullaryOp
<
NullaryOp
,
SrcPlainObject
>
;
using
Functor
=
assign_op
<
Scalar
,
Scalar
>
;
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
/*func*/
)
{
eigen_zero_impl
<
DstXprType
>::
run
(
dst
,
src
);
}
};
// Generic assignment through evalTo.
// TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism.
// Note that the last template argument "Weak" is needed to make it possible to perform
// both partial specialization+SFINAE without ambiguous specialization
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
,
typename
Weak
>
struct
Assignment
<
DstXprType
,
SrcXprType
,
Functor
,
EigenBase2EigenBase
,
Weak
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
assign_op
<
typename
DstXprType
::
Scalar
,
typename
SrcXprType
::
Scalar
>&
/*func*/
)
{
Index
dstRows
=
src
.
rows
();
Index
dstCols
=
src
.
cols
();
if
((
dst
.
rows
()
!=
dstRows
)
||
(
dst
.
cols
()
!=
dstCols
))
dst
.
resize
(
dstRows
,
dstCols
);
eigen_assert
(
dst
.
rows
()
==
src
.
rows
()
&&
dst
.
cols
()
==
src
.
cols
());
src
.
evalTo
(
dst
);
}
// NOTE The following two functions are templated to avoid their instantiation if not needed
// This is needed because some expressions supports evalTo only and/or have 'void' as scalar type.
template
<
typename
SrcScalarType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
add_assign_op
<
typename
DstXprType
::
Scalar
,
SrcScalarType
>&
/*func*/
)
{
Index
dstRows
=
src
.
rows
();
Index
dstCols
=
src
.
cols
();
if
((
dst
.
rows
()
!=
dstRows
)
||
(
dst
.
cols
()
!=
dstCols
))
dst
.
resize
(
dstRows
,
dstCols
);
eigen_assert
(
dst
.
rows
()
==
src
.
rows
()
&&
dst
.
cols
()
==
src
.
cols
());
src
.
addTo
(
dst
);
}
template
<
typename
SrcScalarType
>
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
internal
::
sub_assign_op
<
typename
DstXprType
::
Scalar
,
SrcScalarType
>&
/*func*/
)
{
Index
dstRows
=
src
.
rows
();
Index
dstCols
=
src
.
cols
();
if
((
dst
.
rows
()
!=
dstRows
)
||
(
dst
.
cols
()
!=
dstCols
))
dst
.
resize
(
dstRows
,
dstCols
);
eigen_assert
(
dst
.
rows
()
==
src
.
rows
()
&&
dst
.
cols
()
==
src
.
cols
());
src
.
subTo
(
dst
);
}
};
}
// namespace internal
}
// end namespace Eigen
#endif // EIGEN_ASSIGN_EVALUATOR_H
eigen-master/Eigen/src/Core/Assign_MKL.h
0 → 100644
View file @
266d4fd9
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to Intel(R) MKL
* MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()
********************************************************************************
*/
#ifndef EIGEN_ASSIGN_VML_H
#define EIGEN_ASSIGN_VML_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
Dst
,
typename
Src
>
class
vml_assign_traits
{
private:
enum
{
DstHasDirectAccess
=
Dst
::
Flags
&
DirectAccessBit
,
SrcHasDirectAccess
=
Src
::
Flags
&
DirectAccessBit
,
StorageOrdersAgree
=
(
int
(
Dst
::
IsRowMajor
)
==
int
(
Src
::
IsRowMajor
)),
InnerSize
=
int
(
Dst
::
IsVectorAtCompileTime
)
?
int
(
Dst
::
SizeAtCompileTime
)
:
int
(
Dst
::
Flags
)
&
RowMajorBit
?
int
(
Dst
::
ColsAtCompileTime
)
:
int
(
Dst
::
RowsAtCompileTime
),
InnerMaxSize
=
int
(
Dst
::
IsVectorAtCompileTime
)
?
int
(
Dst
::
MaxSizeAtCompileTime
)
:
int
(
Dst
::
Flags
)
&
RowMajorBit
?
int
(
Dst
::
MaxColsAtCompileTime
)
:
int
(
Dst
::
MaxRowsAtCompileTime
),
MaxSizeAtCompileTime
=
Dst
::
SizeAtCompileTime
,
MightEnableVml
=
StorageOrdersAgree
&&
DstHasDirectAccess
&&
SrcHasDirectAccess
&&
Src
::
InnerStrideAtCompileTime
==
1
&&
Dst
::
InnerStrideAtCompileTime
==
1
,
MightLinearize
=
MightEnableVml
&&
(
int
(
Dst
::
Flags
)
&
int
(
Src
::
Flags
)
&
LinearAccessBit
),
VmlSize
=
MightLinearize
?
MaxSizeAtCompileTime
:
InnerMaxSize
,
LargeEnough
=
VmlSize
==
Dynamic
||
VmlSize
>=
EIGEN_MKL_VML_THRESHOLD
};
public:
enum
{
EnableVml
=
MightEnableVml
&&
LargeEnough
,
Traversal
=
MightLinearize
?
LinearTraversal
:
DefaultTraversal
};
};
#define EIGEN_PP_EXPAND(ARG) ARG
#if !defined(EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
#define EIGEN_VMLMODE_EXPAND_xLA , VML_HA
#else
#define EIGEN_VMLMODE_EXPAND_xLA , VML_LA
#endif
#define EIGEN_VMLMODE_EXPAND_x_
#define EIGEN_VMLMODE_PREFIX_xLA vm
#define EIGEN_VMLMODE_PREFIX_x_ v
#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x, VMLMODE)
#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \
template <typename DstXprType, typename SrcXprNested> \
struct Assignment<DstXprType, CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested>, \
assign_op<EIGENTYPE, EIGENTYPE>, Dense2Dense, \
std::enable_if_t<vml_assign_traits<DstXprType, SrcXprNested>::EnableVml>> { \
typedef CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested> SrcXprType; \
static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE, EIGENTYPE> &func) { \
resize_if_allowed(dst, src, func); \
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
if (vml_assign_traits<DstXprType, SrcXprNested>::Traversal == (int)LinearTraversal) { \
VMLOP(dst.size(), (const VMLTYPE *)src.nestedExpression().data(), \
(VMLTYPE *)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} else { \
const Index outerSize = dst.outerSize(); \
for (Index outer = 0; outer < outerSize; ++outer) { \
const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer, 0)) \
: &(src.nestedExpression().coeffRef(0, outer)); \
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer, 0)) : &(dst.coeffRef(0, outer)); \
VMLOP(dst.innerSize(), (const VMLTYPE *)src_ptr, \
(VMLTYPE *)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} \
} \
} \
};
#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), s##VMLOP), float, float, VMLMODE) \
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), d##VMLOP), double, double, VMLMODE)
#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) \
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), c##VMLOP), scomplex, \
MKL_Complex8, VMLMODE) \
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), z##VMLOP), dcomplex, \
MKL_Complex16, VMLMODE)
#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE) \
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
sin
,
Sin
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
asin
,
Asin
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
sinh
,
Sinh
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
cos
,
Cos
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
acos
,
Acos
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
cosh
,
Cosh
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
tan
,
Tan
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
atan
,
Atan
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
tanh
,
Tanh
,
LA
)
// EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs, _)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
exp
,
Exp
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
log
,
Ln
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
log10
,
Log10
,
LA
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS
(
sqrt
,
Sqrt
,
_
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL
(
square
,
Sqr
,
_
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX
(
arg
,
Arg
,
_
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL
(
round
,
Round
,
_
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL
(
floor
,
Floor
,
_
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL
(
ceil
,
Ceil
,
_
)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL
(
cbrt
,
Cbrt
,
_
)
#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \
template <typename DstXprType, typename SrcXprNested, typename Plain> \
struct Assignment<DstXprType, \
CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE, EIGENTYPE>, SrcXprNested, \
const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>, Plain>>, \
assign_op<EIGENTYPE, EIGENTYPE>, Dense2Dense, \
std::enable_if_t<vml_assign_traits<DstXprType, SrcXprNested>::EnableVml>> { \
typedef CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE, EIGENTYPE>, SrcXprNested, \
const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>, Plain>> \
SrcXprType; \
static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE, EIGENTYPE> &func) { \
resize_if_allowed(dst, src, func); \
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
VMLTYPE exponent = reinterpret_cast<const VMLTYPE &>(src.rhs().functor().m_other); \
if (vml_assign_traits<DstXprType, SrcXprNested>::Traversal == LinearTraversal) { \
VMLOP(dst.size(), (const VMLTYPE *)src.lhs().data(), exponent, \
(VMLTYPE *)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} else { \
const Index outerSize = dst.outerSize(); \
for (Index outer = 0; outer < outerSize; ++outer) { \
const EIGENTYPE *src_ptr = \
src.IsRowMajor ? &(src.lhs().coeffRef(outer, 0)) : &(src.lhs().coeffRef(0, outer)); \
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer, 0)) : &(dst.coeffRef(0, outer)); \
VMLOP(dst.innerSize(), (const VMLTYPE *)src_ptr, exponent, \
(VMLTYPE *)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} \
} \
} \
};
EIGEN_MKL_VML_DECLARE_POW_CALL
(
pow
,
vmsPowx
,
float
,
float
,
LA
)
EIGEN_MKL_VML_DECLARE_POW_CALL
(
pow
,
vmdPowx
,
double
,
double
,
LA
)
EIGEN_MKL_VML_DECLARE_POW_CALL
(
pow
,
vmcPowx
,
scomplex
,
MKL_Complex8
,
LA
)
EIGEN_MKL_VML_DECLARE_POW_CALL
(
pow
,
vmzPowx
,
dcomplex
,
MKL_Complex16
,
LA
)
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_ASSIGN_VML_H
eigen-master/Eigen/src/Core/BandMatrix.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BANDMATRIX_H
#define EIGEN_BANDMATRIX_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
Derived
>
class
BandMatrixBase
:
public
EigenBase
<
Derived
>
{
public:
enum
{
Flags
=
internal
::
traits
<
Derived
>::
Flags
,
CoeffReadCost
=
internal
::
traits
<
Derived
>::
CoeffReadCost
,
RowsAtCompileTime
=
internal
::
traits
<
Derived
>::
RowsAtCompileTime
,
ColsAtCompileTime
=
internal
::
traits
<
Derived
>::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
internal
::
traits
<
Derived
>::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
internal
::
traits
<
Derived
>::
MaxColsAtCompileTime
,
Supers
=
internal
::
traits
<
Derived
>::
Supers
,
Subs
=
internal
::
traits
<
Derived
>::
Subs
,
Options
=
internal
::
traits
<
Derived
>::
Options
};
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
Matrix
<
Scalar
,
RowsAtCompileTime
,
ColsAtCompileTime
>
DenseMatrixType
;
typedef
typename
DenseMatrixType
::
StorageIndex
StorageIndex
;
typedef
typename
internal
::
traits
<
Derived
>::
CoefficientsType
CoefficientsType
;
typedef
EigenBase
<
Derived
>
Base
;
protected:
enum
{
DataRowsAtCompileTime
=
((
Supers
!=
Dynamic
)
&&
(
Subs
!=
Dynamic
))
?
1
+
Supers
+
Subs
:
Dynamic
,
SizeAtCompileTime
=
min_size_prefer_dynamic
(
RowsAtCompileTime
,
ColsAtCompileTime
)
};
public:
using
Base
::
cols
;
using
Base
::
derived
;
using
Base
::
rows
;
/** \returns the number of super diagonals */
inline
Index
supers
()
const
{
return
derived
().
supers
();
}
/** \returns the number of sub diagonals */
inline
Index
subs
()
const
{
return
derived
().
subs
();
}
/** \returns an expression of the underlying coefficient matrix */
inline
const
CoefficientsType
&
coeffs
()
const
{
return
derived
().
coeffs
();
}
/** \returns an expression of the underlying coefficient matrix */
inline
CoefficientsType
&
coeffs
()
{
return
derived
().
coeffs
();
}
/** \returns a vector expression of the \a i -th column,
* only the meaningful part is returned.
* \warning the internal storage must be column major. */
inline
Block
<
CoefficientsType
,
Dynamic
,
1
>
col
(
Index
i
)
{
EIGEN_STATIC_ASSERT
((
int
(
Options
)
&
int
(
RowMajor
))
==
0
,
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES
);
Index
start
=
0
;
Index
len
=
coeffs
().
rows
();
if
(
i
<=
supers
())
{
start
=
supers
()
-
i
;
len
=
(
std
::
min
)(
rows
(),
std
::
max
<
Index
>
(
0
,
coeffs
().
rows
()
-
(
supers
()
-
i
)));
}
else
if
(
i
>=
rows
()
-
subs
())
len
=
std
::
max
<
Index
>
(
0
,
coeffs
().
rows
()
-
(
i
+
1
-
rows
()
+
subs
()));
return
Block
<
CoefficientsType
,
Dynamic
,
1
>
(
coeffs
(),
start
,
i
,
len
,
1
);
}
/** \returns a vector expression of the main diagonal */
inline
Block
<
CoefficientsType
,
1
,
SizeAtCompileTime
>
diagonal
()
{
return
Block
<
CoefficientsType
,
1
,
SizeAtCompileTime
>
(
coeffs
(),
supers
(),
0
,
1
,
(
std
::
min
)(
rows
(),
cols
()));
}
/** \returns a vector expression of the main diagonal (const version) */
inline
const
Block
<
const
CoefficientsType
,
1
,
SizeAtCompileTime
>
diagonal
()
const
{
return
Block
<
const
CoefficientsType
,
1
,
SizeAtCompileTime
>
(
coeffs
(),
supers
(),
0
,
1
,
(
std
::
min
)(
rows
(),
cols
()));
}
template
<
int
Index
>
struct
DiagonalIntReturnType
{
enum
{
ReturnOpposite
=
(
int
(
Options
)
&
int
(
SelfAdjoint
))
&&
(((
Index
)
>
0
&&
Supers
==
0
)
||
((
Index
)
<
0
&&
Subs
==
0
)),
Conjugate
=
ReturnOpposite
&&
NumTraits
<
Scalar
>::
IsComplex
,
ActualIndex
=
ReturnOpposite
?
-
Index
:
Index
,
DiagonalSize
=
(
RowsAtCompileTime
==
Dynamic
||
ColsAtCompileTime
==
Dynamic
)
?
Dynamic
:
(
ActualIndex
<
0
?
min_size_prefer_dynamic
(
ColsAtCompileTime
,
RowsAtCompileTime
+
ActualIndex
)
:
min_size_prefer_dynamic
(
RowsAtCompileTime
,
ColsAtCompileTime
-
ActualIndex
))
};
typedef
Block
<
CoefficientsType
,
1
,
DiagonalSize
>
BuildType
;
typedef
std
::
conditional_t
<
Conjugate
,
CwiseUnaryOp
<
internal
::
scalar_conjugate_op
<
Scalar
>
,
BuildType
>
,
BuildType
>
Type
;
};
/** \returns a vector expression of the \a N -th sub or super diagonal */
template
<
int
N
>
inline
typename
DiagonalIntReturnType
<
N
>::
Type
diagonal
()
{
return
typename
DiagonalIntReturnType
<
N
>::
BuildType
(
coeffs
(),
supers
()
-
N
,
(
std
::
max
)(
0
,
N
),
1
,
diagonalLength
(
N
));
}
/** \returns a vector expression of the \a N -th sub or super diagonal */
template
<
int
N
>
inline
const
typename
DiagonalIntReturnType
<
N
>::
Type
diagonal
()
const
{
return
typename
DiagonalIntReturnType
<
N
>::
BuildType
(
coeffs
(),
supers
()
-
N
,
(
std
::
max
)(
0
,
N
),
1
,
diagonalLength
(
N
));
}
/** \returns a vector expression of the \a i -th sub or super diagonal */
inline
Block
<
CoefficientsType
,
1
,
Dynamic
>
diagonal
(
Index
i
)
{
eigen_assert
((
i
<
0
&&
-
i
<=
subs
())
||
(
i
>=
0
&&
i
<=
supers
()));
return
Block
<
CoefficientsType
,
1
,
Dynamic
>
(
coeffs
(),
supers
()
-
i
,
std
::
max
<
Index
>
(
0
,
i
),
1
,
diagonalLength
(
i
));
}
/** \returns a vector expression of the \a i -th sub or super diagonal */
inline
const
Block
<
const
CoefficientsType
,
1
,
Dynamic
>
diagonal
(
Index
i
)
const
{
eigen_assert
((
i
<
0
&&
-
i
<=
subs
())
||
(
i
>=
0
&&
i
<=
supers
()));
return
Block
<
const
CoefficientsType
,
1
,
Dynamic
>
(
coeffs
(),
supers
()
-
i
,
std
::
max
<
Index
>
(
0
,
i
),
1
,
diagonalLength
(
i
));
}
template
<
typename
Dest
>
inline
void
evalTo
(
Dest
&
dst
)
const
{
dst
.
resize
(
rows
(),
cols
());
dst
.
setZero
();
dst
.
diagonal
()
=
diagonal
();
for
(
Index
i
=
1
;
i
<=
supers
();
++
i
)
dst
.
diagonal
(
i
)
=
diagonal
(
i
);
for
(
Index
i
=
1
;
i
<=
subs
();
++
i
)
dst
.
diagonal
(
-
i
)
=
diagonal
(
-
i
);
}
DenseMatrixType
toDenseMatrix
()
const
{
DenseMatrixType
res
(
rows
(),
cols
());
evalTo
(
res
);
return
res
;
}
protected:
inline
Index
diagonalLength
(
Index
i
)
const
{
return
i
<
0
?
(
std
::
min
)(
cols
(),
rows
()
+
i
)
:
(
std
::
min
)(
rows
(),
cols
()
-
i
);
}
};
/**
* \class BandMatrix
* \ingroup Core_Module
*
* \brief Represents a rectangular matrix with a banded storage
*
* \tparam Scalar_ Numeric type, i.e. float, double, int
* \tparam Rows_ Number of rows, or \b Dynamic
* \tparam Cols_ Number of columns, or \b Dynamic
* \tparam Supers_ Number of super diagonal
* \tparam Subs_ Number of sub diagonal
* \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint
* The former controls \ref TopicStorageOrders "storage order", and defaults to
* column-major. The latter controls whether the matrix represents a selfadjoint
* matrix in which case either Supers of Subs have to be null.
*
* \sa class TridiagonalMatrix
*/
template
<
typename
Scalar_
,
int
Rows_
,
int
Cols_
,
int
Supers_
,
int
Subs_
,
int
Options_
>
struct
traits
<
BandMatrix
<
Scalar_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
{
typedef
Scalar_
Scalar
;
typedef
Dense
StorageKind
;
typedef
Eigen
::
Index
StorageIndex
;
enum
{
CoeffReadCost
=
NumTraits
<
Scalar
>::
ReadCost
,
RowsAtCompileTime
=
Rows_
,
ColsAtCompileTime
=
Cols_
,
MaxRowsAtCompileTime
=
Rows_
,
MaxColsAtCompileTime
=
Cols_
,
Flags
=
LvalueBit
,
Supers
=
Supers_
,
Subs
=
Subs_
,
Options
=
Options_
,
DataRowsAtCompileTime
=
((
Supers
!=
Dynamic
)
&&
(
Subs
!=
Dynamic
))
?
1
+
Supers
+
Subs
:
Dynamic
};
typedef
Matrix
<
Scalar
,
DataRowsAtCompileTime
,
ColsAtCompileTime
,
int
(
Options
)
&
int
(
RowMajor
)
?
RowMajor
:
ColMajor
>
CoefficientsType
;
};
template
<
typename
Scalar_
,
int
Rows
,
int
Cols
,
int
Supers
,
int
Subs
,
int
Options
>
class
BandMatrix
:
public
BandMatrixBase
<
BandMatrix
<
Scalar_
,
Rows
,
Cols
,
Supers
,
Subs
,
Options
>
>
{
public:
typedef
typename
internal
::
traits
<
BandMatrix
>::
Scalar
Scalar
;
typedef
typename
internal
::
traits
<
BandMatrix
>::
StorageIndex
StorageIndex
;
typedef
typename
internal
::
traits
<
BandMatrix
>::
CoefficientsType
CoefficientsType
;
explicit
inline
BandMatrix
(
Index
rows
=
Rows
,
Index
cols
=
Cols
,
Index
supers
=
Supers
,
Index
subs
=
Subs
)
:
m_coeffs
(
1
+
supers
+
subs
,
cols
),
m_rows
(
rows
),
m_supers
(
supers
),
m_subs
(
subs
)
{}
/** \returns the number of columns */
constexpr
Index
rows
()
const
{
return
m_rows
.
value
();
}
/** \returns the number of rows */
constexpr
Index
cols
()
const
{
return
m_coeffs
.
cols
();
}
/** \returns the number of super diagonals */
constexpr
Index
supers
()
const
{
return
m_supers
.
value
();
}
/** \returns the number of sub diagonals */
constexpr
Index
subs
()
const
{
return
m_subs
.
value
();
}
inline
const
CoefficientsType
&
coeffs
()
const
{
return
m_coeffs
;
}
inline
CoefficientsType
&
coeffs
()
{
return
m_coeffs
;
}
protected:
CoefficientsType
m_coeffs
;
internal
::
variable_if_dynamic
<
Index
,
Rows
>
m_rows
;
internal
::
variable_if_dynamic
<
Index
,
Supers
>
m_supers
;
internal
::
variable_if_dynamic
<
Index
,
Subs
>
m_subs
;
};
template
<
typename
CoefficientsType_
,
int
Rows_
,
int
Cols_
,
int
Supers_
,
int
Subs_
,
int
Options_
>
class
BandMatrixWrapper
;
template
<
typename
CoefficientsType_
,
int
Rows_
,
int
Cols_
,
int
Supers_
,
int
Subs_
,
int
Options_
>
struct
traits
<
BandMatrixWrapper
<
CoefficientsType_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
{
typedef
typename
CoefficientsType_
::
Scalar
Scalar
;
typedef
typename
CoefficientsType_
::
StorageKind
StorageKind
;
typedef
typename
CoefficientsType_
::
StorageIndex
StorageIndex
;
enum
{
CoeffReadCost
=
internal
::
traits
<
CoefficientsType_
>::
CoeffReadCost
,
RowsAtCompileTime
=
Rows_
,
ColsAtCompileTime
=
Cols_
,
MaxRowsAtCompileTime
=
Rows_
,
MaxColsAtCompileTime
=
Cols_
,
Flags
=
LvalueBit
,
Supers
=
Supers_
,
Subs
=
Subs_
,
Options
=
Options_
,
DataRowsAtCompileTime
=
((
Supers
!=
Dynamic
)
&&
(
Subs
!=
Dynamic
))
?
1
+
Supers
+
Subs
:
Dynamic
};
typedef
CoefficientsType_
CoefficientsType
;
};
template
<
typename
CoefficientsType_
,
int
Rows_
,
int
Cols_
,
int
Supers_
,
int
Subs_
,
int
Options_
>
class
BandMatrixWrapper
:
public
BandMatrixBase
<
BandMatrixWrapper
<
CoefficientsType_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
{
public:
typedef
typename
internal
::
traits
<
BandMatrixWrapper
>::
Scalar
Scalar
;
typedef
typename
internal
::
traits
<
BandMatrixWrapper
>::
CoefficientsType
CoefficientsType
;
typedef
typename
internal
::
traits
<
BandMatrixWrapper
>::
StorageIndex
StorageIndex
;
explicit
inline
BandMatrixWrapper
(
const
CoefficientsType
&
coeffs
,
Index
rows
=
Rows_
,
Index
cols
=
Cols_
,
Index
supers
=
Supers_
,
Index
subs
=
Subs_
)
:
m_coeffs
(
coeffs
),
m_rows
(
rows
),
m_supers
(
supers
),
m_subs
(
subs
)
{
EIGEN_UNUSED_VARIABLE
(
cols
);
// eigen_assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
}
/** \returns the number of columns */
constexpr
Index
rows
()
const
{
return
m_rows
.
value
();
}
/** \returns the number of rows */
constexpr
Index
cols
()
const
{
return
m_coeffs
.
cols
();
}
/** \returns the number of super diagonals */
constexpr
Index
supers
()
const
{
return
m_supers
.
value
();
}
/** \returns the number of sub diagonals */
constexpr
Index
subs
()
const
{
return
m_subs
.
value
();
}
inline
const
CoefficientsType
&
coeffs
()
const
{
return
m_coeffs
;
}
protected:
const
CoefficientsType
&
m_coeffs
;
internal
::
variable_if_dynamic
<
Index
,
Rows_
>
m_rows
;
internal
::
variable_if_dynamic
<
Index
,
Supers_
>
m_supers
;
internal
::
variable_if_dynamic
<
Index
,
Subs_
>
m_subs
;
};
/**
* \class TridiagonalMatrix
* \ingroup Core_Module
*
* \brief Represents a tridiagonal matrix with a compact banded storage
*
* \tparam Scalar Numeric type, i.e. float, double, int
* \tparam Size Number of rows and cols, or \b Dynamic
* \tparam Options Can be 0 or \b SelfAdjoint
*
* \sa class BandMatrix
*/
template
<
typename
Scalar
,
int
Size
,
int
Options
>
class
TridiagonalMatrix
:
public
BandMatrix
<
Scalar
,
Size
,
Size
,
Options
&
SelfAdjoint
?
0
:
1
,
1
,
Options
|
RowMajor
>
{
typedef
BandMatrix
<
Scalar
,
Size
,
Size
,
Options
&
SelfAdjoint
?
0
:
1
,
1
,
Options
|
RowMajor
>
Base
;
typedef
typename
Base
::
StorageIndex
StorageIndex
;
public:
explicit
TridiagonalMatrix
(
Index
size
=
Size
)
:
Base
(
size
,
size
,
Options
&
SelfAdjoint
?
0
:
1
,
1
)
{}
inline
typename
Base
::
template
DiagonalIntReturnType
<
1
>
::
Type
super
()
{
return
Base
::
template
diagonal
<
1
>();
}
inline
const
typename
Base
::
template
DiagonalIntReturnType
<
1
>
::
Type
super
()
const
{
return
Base
::
template
diagonal
<
1
>();
}
inline
typename
Base
::
template
DiagonalIntReturnType
<-
1
>
::
Type
sub
()
{
return
Base
::
template
diagonal
<-
1
>();
}
inline
const
typename
Base
::
template
DiagonalIntReturnType
<-
1
>
::
Type
sub
()
const
{
return
Base
::
template
diagonal
<-
1
>();
}
protected:
};
struct
BandShape
{};
template
<
typename
Scalar_
,
int
Rows_
,
int
Cols_
,
int
Supers_
,
int
Subs_
,
int
Options_
>
struct
evaluator_traits
<
BandMatrix
<
Scalar_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
:
public
evaluator_traits_base
<
BandMatrix
<
Scalar_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
{
typedef
BandShape
Shape
;
};
template
<
typename
CoefficientsType_
,
int
Rows_
,
int
Cols_
,
int
Supers_
,
int
Subs_
,
int
Options_
>
struct
evaluator_traits
<
BandMatrixWrapper
<
CoefficientsType_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
:
public
evaluator_traits_base
<
BandMatrixWrapper
<
CoefficientsType_
,
Rows_
,
Cols_
,
Supers_
,
Subs_
,
Options_
>
>
{
typedef
BandShape
Shape
;
};
template
<
>
struct
AssignmentKind
<
DenseShape
,
BandShape
>
{
typedef
EigenBase2EigenBase
Kind
;
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_BANDMATRIX_H
eigen-master/Eigen/src/Core/Block.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BLOCK_H
#define EIGEN_BLOCK_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
XprType_
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel_
>
struct
traits
<
Block
<
XprType_
,
BlockRows
,
BlockCols
,
InnerPanel_
>>
:
traits
<
XprType_
>
{
typedef
typename
traits
<
XprType_
>::
Scalar
Scalar
;
typedef
typename
traits
<
XprType_
>::
StorageKind
StorageKind
;
typedef
typename
traits
<
XprType_
>::
XprKind
XprKind
;
typedef
typename
ref_selector
<
XprType_
>::
type
XprTypeNested
;
typedef
std
::
remove_reference_t
<
XprTypeNested
>
XprTypeNested_
;
enum
{
MatrixRows
=
traits
<
XprType_
>::
RowsAtCompileTime
,
MatrixCols
=
traits
<
XprType_
>::
ColsAtCompileTime
,
RowsAtCompileTime
=
MatrixRows
==
0
?
0
:
BlockRows
,
ColsAtCompileTime
=
MatrixCols
==
0
?
0
:
BlockCols
,
MaxRowsAtCompileTime
=
BlockRows
==
0
?
0
:
RowsAtCompileTime
!=
Dynamic
?
int
(
RowsAtCompileTime
)
:
int
(
traits
<
XprType_
>::
MaxRowsAtCompileTime
),
MaxColsAtCompileTime
=
BlockCols
==
0
?
0
:
ColsAtCompileTime
!=
Dynamic
?
int
(
ColsAtCompileTime
)
:
int
(
traits
<
XprType_
>::
MaxColsAtCompileTime
),
XprTypeIsRowMajor
=
(
int
(
traits
<
XprType_
>::
Flags
)
&
RowMajorBit
)
!=
0
,
IsRowMajor
=
(
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
)
?
1
:
(
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
)
?
0
:
XprTypeIsRowMajor
,
HasSameStorageOrderAsXprType
=
(
IsRowMajor
==
XprTypeIsRowMajor
),
InnerSize
=
IsRowMajor
?
int
(
ColsAtCompileTime
)
:
int
(
RowsAtCompileTime
),
InnerStrideAtCompileTime
=
HasSameStorageOrderAsXprType
?
int
(
inner_stride_at_compile_time
<
XprType_
>::
ret
)
:
int
(
outer_stride_at_compile_time
<
XprType_
>::
ret
),
OuterStrideAtCompileTime
=
HasSameStorageOrderAsXprType
?
int
(
outer_stride_at_compile_time
<
XprType_
>::
ret
)
:
int
(
inner_stride_at_compile_time
<
XprType_
>::
ret
),
// FIXME, this traits is rather specialized for dense object and it needs to be cleaned further
FlagsLvalueBit
=
is_lvalue
<
XprType_
>::
value
?
LvalueBit
:
0
,
FlagsRowMajorBit
=
IsRowMajor
?
RowMajorBit
:
0
,
Flags
=
(
traits
<
XprType_
>::
Flags
&
(
DirectAccessBit
|
(
InnerPanel_
?
CompressedAccessBit
:
0
)))
|
FlagsLvalueBit
|
FlagsRowMajorBit
,
// FIXME DirectAccessBit should not be handled by expressions
//
// Alignment is needed by MapBase's assertions
// We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the
// respective evaluator
Alignment
=
0
,
InnerPanel
=
InnerPanel_
?
1
:
0
};
};
template
<
typename
XprType
,
int
BlockRows
=
Dynamic
,
int
BlockCols
=
Dynamic
,
bool
InnerPanel
=
false
,
bool
HasDirectAccess
=
internal
::
has_direct_access
<
XprType
>
::
ret
>
class
BlockImpl_dense
;
}
// end namespace internal
template
<
typename
XprType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
,
typename
StorageKind
>
class
BlockImpl
;
/** \class Block
* \ingroup Core_Module
*
* \brief Expression of a fixed-size or dynamic-size block
*
* \tparam XprType the type of the expression in which we are taking a block
* \tparam BlockRows the number of rows of the block we are taking at compile time (optional)
* \tparam BlockCols the number of columns of the block we are taking at compile time (optional)
* \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or
* to set of columns of a column major matrix (optional). The parameter allows to determine
* at compile time whether aligned access is possible on the block expression.
*
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return
* type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
* most of the time this is the only way it is used.
*
* However, if you want to directly manipulate block expressions,
* for instance if you want to write a function returning such an expression, you
* will need to use this class.
*
* Here is an example illustrating the dynamic case:
* \include class_Block.cpp
* Output: \verbinclude class_Block.out
*
* \note Even though this expression has dynamic size, in the case where \a XprType
* has fixed size, this expression inherits a fixed maximal size which means that evaluating
* it does not cause a dynamic memory allocation.
*
* Here is an example illustrating the fixed-size case:
* \include class_FixedBlock.cpp
* Output: \verbinclude class_FixedBlock.out
*
* \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
*/
template
<
typename
XprType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
class
Block
:
public
BlockImpl
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
,
typename
internal
::
traits
<
XprType
>::
StorageKind
>
{
typedef
BlockImpl
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
,
typename
internal
::
traits
<
XprType
>::
StorageKind
>
Impl
;
using
BlockHelper
=
internal
::
block_xpr_helper
<
Block
>
;
public:
// typedef typename Impl::Base Base;
typedef
Impl
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
Block
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
Block
)
typedef
internal
::
remove_all_t
<
XprType
>
NestedExpression
;
/** Column or Row constructor
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Block
(
XprType
&
xpr
,
Index
i
)
:
Impl
(
xpr
,
i
)
{
eigen_assert
((
i
>=
0
)
&&
(((
BlockRows
==
1
)
&&
(
BlockCols
==
XprType
::
ColsAtCompileTime
)
&&
i
<
xpr
.
rows
())
||
((
BlockRows
==
XprType
::
RowsAtCompileTime
)
&&
(
BlockCols
==
1
)
&&
i
<
xpr
.
cols
())));
}
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Block
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
)
:
Impl
(
xpr
,
startRow
,
startCol
)
{
EIGEN_STATIC_ASSERT
(
RowsAtCompileTime
!=
Dynamic
&&
ColsAtCompileTime
!=
Dynamic
,
THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE
)
eigen_assert
(
startRow
>=
0
&&
BlockRows
>=
0
&&
startRow
+
BlockRows
<=
xpr
.
rows
()
&&
startCol
>=
0
&&
BlockCols
>=
0
&&
startCol
+
BlockCols
<=
xpr
.
cols
());
}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Block
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
,
Index
blockRows
,
Index
blockCols
)
:
Impl
(
xpr
,
startRow
,
startCol
,
blockRows
,
blockCols
)
{
eigen_assert
((
RowsAtCompileTime
==
Dynamic
||
RowsAtCompileTime
==
blockRows
)
&&
(
ColsAtCompileTime
==
Dynamic
||
ColsAtCompileTime
==
blockCols
));
eigen_assert
(
startRow
>=
0
&&
blockRows
>=
0
&&
startRow
<=
xpr
.
rows
()
-
blockRows
&&
startCol
>=
0
&&
blockCols
>=
0
&&
startCol
<=
xpr
.
cols
()
-
blockCols
);
}
// convert nested blocks (e.g. Block<Block<MatrixType>>) to a simple block expression (Block<MatrixType>)
using
ConstUnwindReturnType
=
Block
<
const
typename
BlockHelper
::
BaseType
,
BlockRows
,
BlockCols
,
InnerPanel
>
;
using
UnwindReturnType
=
Block
<
typename
BlockHelper
::
BaseType
,
BlockRows
,
BlockCols
,
InnerPanel
>
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
ConstUnwindReturnType
unwind
()
const
{
return
ConstUnwindReturnType
(
BlockHelper
::
base
(
*
this
),
BlockHelper
::
row
(
*
this
,
0
),
BlockHelper
::
col
(
*
this
,
0
),
this
->
rows
(),
this
->
cols
());
}
template
<
typename
T
=
Block
,
typename
EnableIf
=
std
::
enable_if_t
<!
std
::
is_const
<
T
>
::
value
>>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
UnwindReturnType
unwind
()
{
return
UnwindReturnType
(
BlockHelper
::
base
(
*
this
),
BlockHelper
::
row
(
*
this
,
0
),
BlockHelper
::
col
(
*
this
,
0
),
this
->
rows
(),
this
->
cols
());
}
};
// The generic default implementation for dense block simply forward to the internal::BlockImpl_dense
// that must be specialized for direct and non-direct access...
template
<
typename
XprType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
class
BlockImpl
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
,
Dense
>
:
public
internal
::
BlockImpl_dense
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
>
{
typedef
internal
::
BlockImpl_dense
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
>
Impl
;
typedef
typename
XprType
::
StorageIndex
StorageIndex
;
public:
typedef
Impl
Base
;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
BlockImpl
)
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl
(
XprType
&
xpr
,
Index
i
)
:
Impl
(
xpr
,
i
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
)
:
Impl
(
xpr
,
startRow
,
startCol
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
,
Index
blockRows
,
Index
blockCols
)
:
Impl
(
xpr
,
startRow
,
startCol
,
blockRows
,
blockCols
)
{}
};
namespace
internal
{
/** \internal Internal implementation of dense Blocks in the general case. */
template
<
typename
XprType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
,
bool
HasDirectAccess
>
class
BlockImpl_dense
:
public
internal
::
dense_xpr_base
<
Block
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
>>::
type
{
typedef
Block
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
>
BlockType
;
typedef
typename
internal
::
ref_selector
<
XprType
>::
non_const_type
XprTypeNested
;
public:
typedef
typename
internal
::
dense_xpr_base
<
BlockType
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
BlockType
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
BlockImpl_dense
)
// class InnerIterator; // FIXME apparently never used
/** Column or Row constructor
*/
EIGEN_DEVICE_FUNC
inline
BlockImpl_dense
(
XprType
&
xpr
,
Index
i
)
:
m_xpr
(
xpr
),
// It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
// and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
// all other cases are invalid.
// The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
m_startRow
((
BlockRows
==
1
)
&&
(
BlockCols
==
XprType
::
ColsAtCompileTime
)
?
i
:
0
),
m_startCol
((
BlockRows
==
XprType
::
RowsAtCompileTime
)
&&
(
BlockCols
==
1
)
?
i
:
0
),
m_blockRows
(
BlockRows
==
1
?
1
:
xpr
.
rows
()),
m_blockCols
(
BlockCols
==
1
?
1
:
xpr
.
cols
())
{}
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline
BlockImpl_dense
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
)
:
m_xpr
(
xpr
),
m_startRow
(
startRow
),
m_startCol
(
startCol
),
m_blockRows
(
BlockRows
),
m_blockCols
(
BlockCols
)
{}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline
BlockImpl_dense
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
,
Index
blockRows
,
Index
blockCols
)
:
m_xpr
(
xpr
),
m_startRow
(
startRow
),
m_startCol
(
startCol
),
m_blockRows
(
blockRows
),
m_blockCols
(
blockCols
)
{}
EIGEN_DEVICE_FUNC
inline
Index
rows
()
const
{
return
m_blockRows
.
value
();
}
EIGEN_DEVICE_FUNC
inline
Index
cols
()
const
{
return
m_blockCols
.
value
();
}
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
XprType
)
return
m_xpr
.
coeffRef
(
rowId
+
m_startRow
.
value
(),
colId
+
m_startCol
.
value
());
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
rowId
,
Index
colId
)
const
{
return
m_xpr
.
derived
().
coeffRef
(
rowId
+
m_startRow
.
value
(),
colId
+
m_startCol
.
value
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
CoeffReturnType
coeff
(
Index
rowId
,
Index
colId
)
const
{
return
m_xpr
.
coeff
(
rowId
+
m_startRow
.
value
(),
colId
+
m_startCol
.
value
());
}
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
index
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
XprType
)
return
m_xpr
.
coeffRef
(
m_startRow
.
value
()
+
(
RowsAtCompileTime
==
1
?
0
:
index
),
m_startCol
.
value
()
+
(
RowsAtCompileTime
==
1
?
index
:
0
));
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
index
)
const
{
return
m_xpr
.
coeffRef
(
m_startRow
.
value
()
+
(
RowsAtCompileTime
==
1
?
0
:
index
),
m_startCol
.
value
()
+
(
RowsAtCompileTime
==
1
?
index
:
0
));
}
EIGEN_DEVICE_FUNC
inline
const
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_xpr
.
coeff
(
m_startRow
.
value
()
+
(
RowsAtCompileTime
==
1
?
0
:
index
),
m_startCol
.
value
()
+
(
RowsAtCompileTime
==
1
?
index
:
0
));
}
template
<
int
LoadMode
>
EIGEN_DEVICE_FUNC
inline
PacketScalar
packet
(
Index
rowId
,
Index
colId
)
const
{
return
m_xpr
.
template
packet
<
Unaligned
>(
rowId
+
m_startRow
.
value
(),
colId
+
m_startCol
.
value
());
}
template
<
int
LoadMode
>
EIGEN_DEVICE_FUNC
inline
void
writePacket
(
Index
rowId
,
Index
colId
,
const
PacketScalar
&
val
)
{
m_xpr
.
template
writePacket
<
Unaligned
>(
rowId
+
m_startRow
.
value
(),
colId
+
m_startCol
.
value
(),
val
);
}
template
<
int
LoadMode
>
EIGEN_DEVICE_FUNC
inline
PacketScalar
packet
(
Index
index
)
const
{
return
m_xpr
.
template
packet
<
Unaligned
>(
m_startRow
.
value
()
+
(
RowsAtCompileTime
==
1
?
0
:
index
),
m_startCol
.
value
()
+
(
RowsAtCompileTime
==
1
?
index
:
0
));
}
template
<
int
LoadMode
>
EIGEN_DEVICE_FUNC
inline
void
writePacket
(
Index
index
,
const
PacketScalar
&
val
)
{
m_xpr
.
template
writePacket
<
Unaligned
>(
m_startRow
.
value
()
+
(
RowsAtCompileTime
==
1
?
0
:
index
),
m_startCol
.
value
()
+
(
RowsAtCompileTime
==
1
?
index
:
0
),
val
);
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** \sa MapBase::data() */
EIGEN_DEVICE_FUNC
constexpr
const
Scalar
*
data
()
const
;
EIGEN_DEVICE_FUNC
inline
Index
innerStride
()
const
;
EIGEN_DEVICE_FUNC
inline
Index
outerStride
()
const
;
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
internal
::
remove_all_t
<
XprTypeNested
>&
nestedExpression
()
const
{
return
m_xpr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
XprType
&
nestedExpression
()
{
return
m_xpr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
StorageIndex
startRow
()
const
noexcept
{
return
m_startRow
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
StorageIndex
startCol
()
const
noexcept
{
return
m_startCol
.
value
();
}
protected:
XprTypeNested
m_xpr
;
const
internal
::
variable_if_dynamic
<
StorageIndex
,
(
XprType
::
RowsAtCompileTime
==
1
&&
BlockRows
==
1
)
?
0
:
Dynamic
>
m_startRow
;
const
internal
::
variable_if_dynamic
<
StorageIndex
,
(
XprType
::
ColsAtCompileTime
==
1
&&
BlockCols
==
1
)
?
0
:
Dynamic
>
m_startCol
;
const
internal
::
variable_if_dynamic
<
StorageIndex
,
RowsAtCompileTime
>
m_blockRows
;
const
internal
::
variable_if_dynamic
<
StorageIndex
,
ColsAtCompileTime
>
m_blockCols
;
};
/** \internal Internal implementation of dense Blocks in the direct access case.*/
template
<
typename
XprType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
class
BlockImpl_dense
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
,
true
>
:
public
MapBase
<
Block
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
>>
{
typedef
Block
<
XprType
,
BlockRows
,
BlockCols
,
InnerPanel
>
BlockType
;
typedef
typename
internal
::
ref_selector
<
XprType
>::
non_const_type
XprTypeNested
;
enum
{
XprTypeIsRowMajor
=
(
int
(
traits
<
XprType
>::
Flags
)
&
RowMajorBit
)
!=
0
};
/** \internal Returns base+offset (unless base is null, in which case returns null).
* Adding an offset to nullptr is undefined behavior, so we must avoid it.
*/
template
<
typename
Scalar
>
EIGEN_DEVICE_FUNC
constexpr
EIGEN_ALWAYS_INLINE
static
Scalar
*
add_to_nullable_pointer
(
Scalar
*
base
,
Index
offset
)
{
return
base
!=
nullptr
?
base
+
offset
:
nullptr
;
}
public:
typedef
MapBase
<
BlockType
>
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
BlockType
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
BlockImpl_dense
)
/** Column or Row constructor
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl_dense
(
XprType
&
xpr
,
Index
i
)
:
Base
((
BlockRows
==
0
||
BlockCols
==
0
)
?
nullptr
:
add_to_nullable_pointer
(
xpr
.
data
(),
i
*
(((
BlockRows
==
1
)
&&
(
BlockCols
==
XprType
::
ColsAtCompileTime
)
&&
(
!
XprTypeIsRowMajor
))
||
((
BlockRows
==
XprType
::
RowsAtCompileTime
)
&&
(
BlockCols
==
1
)
&&
(
XprTypeIsRowMajor
))
?
xpr
.
innerStride
()
:
xpr
.
outerStride
())),
BlockRows
==
1
?
1
:
xpr
.
rows
(),
BlockCols
==
1
?
1
:
xpr
.
cols
()),
m_xpr
(
xpr
),
m_startRow
((
BlockRows
==
1
)
&&
(
BlockCols
==
XprType
::
ColsAtCompileTime
)
?
i
:
0
),
m_startCol
((
BlockRows
==
XprType
::
RowsAtCompileTime
)
&&
(
BlockCols
==
1
)
?
i
:
0
)
{
init
();
}
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl_dense
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
)
:
Base
((
BlockRows
==
0
||
BlockCols
==
0
)
?
nullptr
:
add_to_nullable_pointer
(
xpr
.
data
(),
xpr
.
innerStride
()
*
(
XprTypeIsRowMajor
?
startCol
:
startRow
)
+
xpr
.
outerStride
()
*
(
XprTypeIsRowMajor
?
startRow
:
startCol
))),
m_xpr
(
xpr
),
m_startRow
(
startRow
),
m_startCol
(
startCol
)
{
init
();
}
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl_dense
(
XprType
&
xpr
,
Index
startRow
,
Index
startCol
,
Index
blockRows
,
Index
blockCols
)
:
Base
((
blockRows
==
0
||
blockCols
==
0
)
?
nullptr
:
add_to_nullable_pointer
(
xpr
.
data
(),
xpr
.
innerStride
()
*
(
XprTypeIsRowMajor
?
startCol
:
startRow
)
+
xpr
.
outerStride
()
*
(
XprTypeIsRowMajor
?
startRow
:
startCol
)),
blockRows
,
blockCols
),
m_xpr
(
xpr
),
m_startRow
(
startRow
),
m_startCol
(
startCol
)
{
init
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
internal
::
remove_all_t
<
XprTypeNested
>&
nestedExpression
()
const
noexcept
{
return
m_xpr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
XprType
&
nestedExpression
()
{
return
m_xpr
;
}
/** \sa MapBase::innerStride() */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
innerStride
()
const
noexcept
{
return
internal
::
traits
<
BlockType
>::
HasSameStorageOrderAsXprType
?
m_xpr
.
innerStride
()
:
m_xpr
.
outerStride
();
}
/** \sa MapBase::outerStride() */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
outerStride
()
const
noexcept
{
return
internal
::
traits
<
BlockType
>::
HasSameStorageOrderAsXprType
?
m_xpr
.
outerStride
()
:
m_xpr
.
innerStride
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
StorageIndex
startRow
()
const
noexcept
{
return
m_startRow
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
StorageIndex
startCol
()
const
noexcept
{
return
m_startCol
.
value
();
}
#ifndef __SUNPRO_CC
// FIXME sunstudio is not friendly with the above friend...
// META-FIXME there is no 'friend' keyword around here. Is this obsolete?
protected:
#endif
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
BlockImpl_dense
(
XprType
&
xpr
,
const
Scalar
*
data
,
Index
blockRows
,
Index
blockCols
)
:
Base
(
data
,
blockRows
,
blockCols
),
m_xpr
(
xpr
)
{
init
();
}
#endif
protected:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
init
()
{
m_outerStride
=
internal
::
traits
<
BlockType
>::
HasSameStorageOrderAsXprType
?
m_xpr
.
outerStride
()
:
m_xpr
.
innerStride
();
}
XprTypeNested
m_xpr
;
const
internal
::
variable_if_dynamic
<
StorageIndex
,
(
XprType
::
RowsAtCompileTime
==
1
&&
BlockRows
==
1
)
?
0
:
Dynamic
>
m_startRow
;
const
internal
::
variable_if_dynamic
<
StorageIndex
,
(
XprType
::
ColsAtCompileTime
==
1
&&
BlockCols
==
1
)
?
0
:
Dynamic
>
m_startCol
;
Index
m_outerStride
;
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_BLOCK_H
eigen-master/Eigen/src/Core/CommaInitializer.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMMAINITIALIZER_H
#define EIGEN_COMMAINITIALIZER_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class CommaInitializer
* \ingroup Core_Module
*
* \brief Helper class used by the comma initializer operator
*
* This class is internally used to implement the comma initializer feature. It is
* the return type of MatrixBase::operator<<, and most of the time this is the only
* way it is used.
*
* \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
*/
template
<
typename
XprType
>
struct
CommaInitializer
{
typedef
typename
XprType
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
inline
CommaInitializer
(
XprType
&
xpr
,
const
Scalar
&
s
)
:
m_xpr
(
xpr
),
m_row
(
0
),
m_col
(
1
),
m_currentBlockRows
(
1
)
{
eigen_assert
(
m_xpr
.
rows
()
>
0
&&
m_xpr
.
cols
()
>
0
&&
"Cannot comma-initialize a 0x0 matrix (operator<<)"
);
m_xpr
.
coeffRef
(
0
,
0
)
=
s
;
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
inline
CommaInitializer
(
XprType
&
xpr
,
const
DenseBase
<
OtherDerived
>&
other
)
:
m_xpr
(
xpr
),
m_row
(
0
),
m_col
(
other
.
cols
()),
m_currentBlockRows
(
other
.
rows
())
{
eigen_assert
(
m_xpr
.
rows
()
>=
other
.
rows
()
&&
m_xpr
.
cols
()
>=
other
.
cols
()
&&
"Cannot comma-initialize a 0x0 matrix (operator<<)"
);
m_xpr
.
template
block
<
OtherDerived
::
RowsAtCompileTime
,
OtherDerived
::
ColsAtCompileTime
>(
0
,
0
,
other
.
rows
(),
other
.
cols
())
=
other
;
}
/* Copy/Move constructor which transfers ownership. This is crucial in
* absence of return value optimization to avoid assertions during destruction. */
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
EIGEN_DEVICE_FUNC
inline
CommaInitializer
(
const
CommaInitializer
&
o
)
:
m_xpr
(
o
.
m_xpr
),
m_row
(
o
.
m_row
),
m_col
(
o
.
m_col
),
m_currentBlockRows
(
o
.
m_currentBlockRows
)
{
// Mark original object as finished. In absence of R-value references we need to const_cast:
const_cast
<
CommaInitializer
&>
(
o
).
m_row
=
m_xpr
.
rows
();
const_cast
<
CommaInitializer
&>
(
o
).
m_col
=
m_xpr
.
cols
();
const_cast
<
CommaInitializer
&>
(
o
).
m_currentBlockRows
=
0
;
}
/* inserts a scalar value in the target matrix */
EIGEN_DEVICE_FUNC
CommaInitializer
&
operator
,(
const
Scalar
&
s
)
{
if
(
m_col
==
m_xpr
.
cols
())
{
m_row
+=
m_currentBlockRows
;
m_col
=
0
;
m_currentBlockRows
=
1
;
eigen_assert
(
m_row
<
m_xpr
.
rows
()
&&
"Too many rows passed to comma initializer (operator<<)"
);
}
eigen_assert
(
m_col
<
m_xpr
.
cols
()
&&
"Too many coefficients passed to comma initializer (operator<<)"
);
eigen_assert
(
m_currentBlockRows
==
1
);
m_xpr
.
coeffRef
(
m_row
,
m_col
++
)
=
s
;
return
*
this
;
}
/* inserts a matrix expression in the target matrix */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
CommaInitializer
&
operator
,(
const
DenseBase
<
OtherDerived
>&
other
)
{
if
(
m_col
==
m_xpr
.
cols
()
&&
(
other
.
cols
()
!=
0
||
other
.
rows
()
!=
m_currentBlockRows
))
{
m_row
+=
m_currentBlockRows
;
m_col
=
0
;
m_currentBlockRows
=
other
.
rows
();
eigen_assert
(
m_row
+
m_currentBlockRows
<=
m_xpr
.
rows
()
&&
"Too many rows passed to comma initializer (operator<<)"
);
}
eigen_assert
((
m_col
+
other
.
cols
()
<=
m_xpr
.
cols
())
&&
"Too many coefficients passed to comma initializer (operator<<)"
);
eigen_assert
(
m_currentBlockRows
==
other
.
rows
());
m_xpr
.
template
block
<
OtherDerived
::
RowsAtCompileTime
,
OtherDerived
::
ColsAtCompileTime
>(
m_row
,
m_col
,
other
.
rows
(),
other
.
cols
())
=
other
;
m_col
+=
other
.
cols
();
return
*
this
;
}
EIGEN_DEVICE_FUNC
inline
~
CommaInitializer
()
#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS
noexcept
(
false
)
// Eigen::eigen_assert_exception
#endif
{
finished
();
}
/** \returns the built matrix once all its coefficients have been set.
* Calling finished is 100% optional. Its purpose is to write expressions
* like this:
* \code
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode
*/
EIGEN_DEVICE_FUNC
inline
XprType
&
finished
()
{
eigen_assert
(((
m_row
+
m_currentBlockRows
)
==
m_xpr
.
rows
()
||
m_xpr
.
cols
()
==
0
)
&&
m_col
==
m_xpr
.
cols
()
&&
"Too few coefficients passed to comma initializer (operator<<)"
);
return
m_xpr
;
}
XprType
&
m_xpr
;
// target expression
Index
m_row
;
// current row id
Index
m_col
;
// current col id
Index
m_currentBlockRows
;
// current block height
};
/** \anchor MatrixBaseCommaInitRef
* Convenient operator to set the coefficients of a matrix.
*
* The coefficients must be provided in a row major order and exactly match
* the size of the matrix. Otherwise an assertion is raised.
*
* Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out
*
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary
* order.
*
* \sa CommaInitializer::finished(), class CommaInitializer
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
CommaInitializer
<
Derived
>
DenseBase
<
Derived
>::
operator
<<
(
const
Scalar
&
s
)
{
return
CommaInitializer
<
Derived
>
(
*
static_cast
<
Derived
*>
(
this
),
s
);
}
/** \sa operator<<(const Scalar&) */
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
inline
CommaInitializer
<
Derived
>
DenseBase
<
Derived
>::
operator
<<
(
const
DenseBase
<
OtherDerived
>&
other
)
{
return
CommaInitializer
<
Derived
>
(
*
static_cast
<
Derived
*>
(
this
),
other
);
}
}
// end namespace Eigen
#endif // EIGEN_COMMAINITIALIZER_H
eigen-master/Eigen/src/Core/ConditionEstimator.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONDITIONESTIMATOR_H
#define EIGEN_CONDITIONESTIMATOR_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
Vector
,
typename
RealVector
,
bool
IsComplex
>
struct
rcond_compute_sign
{
static
inline
Vector
run
(
const
Vector
&
v
)
{
const
RealVector
v_abs
=
v
.
cwiseAbs
();
return
(
v_abs
.
array
()
==
static_cast
<
typename
Vector
::
RealScalar
>
(
0
))
.
select
(
Vector
::
Ones
(
v
.
size
()),
v
.
cwiseQuotient
(
v_abs
));
}
};
// Partial specialization to avoid elementwise division for real vectors.
template
<
typename
Vector
>
struct
rcond_compute_sign
<
Vector
,
Vector
,
false
>
{
static
inline
Vector
run
(
const
Vector
&
v
)
{
return
(
v
.
array
()
<
static_cast
<
typename
Vector
::
RealScalar
>
(
0
))
.
select
(
-
Vector
::
Ones
(
v
.
size
()),
Vector
::
Ones
(
v
.
size
()));
}
};
/**
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
* \a matrix that implements .solve() and .adjoint().solve() methods.
*
* This function implements Algorithms 4.1 and 5.1 from
* http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
* which also forms the basis for the condition number estimators in
* LAPACK. Since at most 10 calls to the solve method of dec are
* performed, the total cost is O(dims^2), as opposed to O(dims^3)
* needed to compute the inverse matrix explicitly.
*
* The most common usage is in estimating the condition number
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
* computed directly in O(n^2) operations.
*
* Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
* LLT.
*
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
*/
template
<
typename
Decomposition
>
typename
Decomposition
::
RealScalar
rcond_invmatrix_L1_norm_estimate
(
const
Decomposition
&
dec
)
{
typedef
typename
Decomposition
::
MatrixType
MatrixType
;
typedef
typename
Decomposition
::
Scalar
Scalar
;
typedef
typename
Decomposition
::
RealScalar
RealScalar
;
typedef
typename
internal
::
plain_col_type
<
MatrixType
>::
type
Vector
;
typedef
typename
internal
::
plain_col_type
<
MatrixType
,
RealScalar
>::
type
RealVector
;
const
bool
is_complex
=
(
NumTraits
<
Scalar
>::
IsComplex
!=
0
);
eigen_assert
(
dec
.
rows
()
==
dec
.
cols
());
const
Index
n
=
dec
.
rows
();
if
(
n
==
0
)
return
0
;
// Disable Index to float conversion warning
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning(disable : 2259)
#endif
Vector
v
=
dec
.
solve
(
Vector
::
Ones
(
n
)
/
Scalar
(
n
));
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
// lower_bound is a lower bound on
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
// and is the objective maximized by the ("super-") gradient ascent
// algorithm below.
RealScalar
lower_bound
=
v
.
template
lpNorm
<
1
>();
if
(
n
==
1
)
return
lower_bound
;
// Gradient ascent algorithm follows: We know that the optimum is achieved at
// one of the simplices v = e_i, so in each iteration we follow a
// super-gradient to move towards the optimal one.
RealScalar
old_lower_bound
=
lower_bound
;
Vector
sign_vector
(
n
);
Vector
old_sign_vector
;
Index
v_max_abs_index
=
-
1
;
Index
old_v_max_abs_index
=
v_max_abs_index
;
for
(
int
k
=
0
;
k
<
4
;
++
k
)
{
sign_vector
=
internal
::
rcond_compute_sign
<
Vector
,
RealVector
,
is_complex
>::
run
(
v
);
if
(
k
>
0
&&
!
is_complex
&&
sign_vector
==
old_sign_vector
)
{
// Break if the solution stagnated.
break
;
}
// v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
v
=
dec
.
adjoint
().
solve
(
sign_vector
);
v
.
real
().
cwiseAbs
().
maxCoeff
(
&
v_max_abs_index
);
if
(
v_max_abs_index
==
old_v_max_abs_index
)
{
// Break if the solution stagnated.
break
;
}
// Move to the new simplex e_j, where j = v_max_abs_index.
v
=
dec
.
solve
(
Vector
::
Unit
(
n
,
v_max_abs_index
));
// v = inv(matrix) * e_j.
lower_bound
=
v
.
template
lpNorm
<
1
>();
if
(
lower_bound
<=
old_lower_bound
)
{
// Break if the gradient step did not increase the lower_bound.
break
;
}
if
(
!
is_complex
)
{
old_sign_vector
=
sign_vector
;
}
old_v_max_abs_index
=
v_max_abs_index
;
old_lower_bound
=
lower_bound
;
}
// The following calculates an independent estimate of ||matrix||_1 by
// multiplying matrix by a vector with entries of slowly increasing
// magnitude and alternating sign:
// v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
// This improvement to Hager's algorithm above is due to Higham. It was
// added to make the algorithm more robust in certain corner cases where
// large elements in the matrix might otherwise escape detection due to
// exact cancellation (especially when op and op_adjoint correspond to a
// sequence of backsubstitutions and permutations), which could cause
// Hager's algorithm to vastly underestimate ||matrix||_1.
Scalar
alternating_sign
(
RealScalar
(
1
));
for
(
Index
i
=
0
;
i
<
n
;
++
i
)
{
// The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
v
[
i
]
=
alternating_sign
*
static_cast
<
RealScalar
>
(
RealScalar
(
1
)
+
(
RealScalar
(
i
)
/
(
RealScalar
(
n
-
1
))));
alternating_sign
=
-
alternating_sign
;
}
v
=
dec
.
solve
(
v
);
const
RealScalar
alternate_lower_bound
=
(
2
*
v
.
template
lpNorm
<
1
>())
/
(
3
*
RealScalar
(
n
));
return
numext
::
maxi
(
lower_bound
,
alternate_lower_bound
);
}
/** \brief Reciprocal condition number estimator.
*
* Computing a decomposition of a dense matrix takes O(n^3) operations, while
* this method estimates the condition number quickly and reliably in O(n^2)
* operations.
*
* \returns an estimate of the reciprocal condition number
* (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
* its decomposition. Supports the following decompositions: FullPivLU,
* PartialPivLU, LDLT, and LLT.
*
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
*/
template
<
typename
Decomposition
>
typename
Decomposition
::
RealScalar
rcond_estimate_helper
(
typename
Decomposition
::
RealScalar
matrix_norm
,
const
Decomposition
&
dec
)
{
typedef
typename
Decomposition
::
RealScalar
RealScalar
;
eigen_assert
(
dec
.
rows
()
==
dec
.
cols
());
if
(
dec
.
rows
()
==
0
)
return
NumTraits
<
RealScalar
>::
infinity
();
if
(
numext
::
is_exactly_zero
(
matrix_norm
))
return
RealScalar
(
0
);
if
(
dec
.
rows
()
==
1
)
return
RealScalar
(
1
);
const
RealScalar
inverse_matrix_norm
=
rcond_invmatrix_L1_norm_estimate
(
dec
);
return
(
numext
::
is_exactly_zero
(
inverse_matrix_norm
)
?
RealScalar
(
0
)
:
(
RealScalar
(
1
)
/
inverse_matrix_norm
)
/
matrix_norm
);
}
}
// namespace internal
}
// namespace Eigen
#endif
eigen-master/Eigen/src/Core/CoreEvaluators.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COREEVALUATORS_H
#define EIGEN_COREEVALUATORS_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
// This class returns the evaluator kind from the expression storage kind.
// Default assumes index based accessors
template
<
typename
StorageKind
>
struct
storage_kind_to_evaluator_kind
{
typedef
IndexBased
Kind
;
};
// This class returns the evaluator shape from the expression storage kind.
// It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
template
<
typename
StorageKind
>
struct
storage_kind_to_shape
;
template
<
>
struct
storage_kind_to_shape
<
Dense
>
{
typedef
DenseShape
Shape
;
};
template
<
>
struct
storage_kind_to_shape
<
SolverStorage
>
{
typedef
SolverShape
Shape
;
};
template
<
>
struct
storage_kind_to_shape
<
PermutationStorage
>
{
typedef
PermutationShape
Shape
;
};
template
<
>
struct
storage_kind_to_shape
<
TranspositionsStorage
>
{
typedef
TranspositionsShape
Shape
;
};
// Evaluators have to be specialized with respect to various criteria such as:
// - storage/structure/shape
// - scalar type
// - etc.
// Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
// We currently distinguish the following kind of evaluators:
// - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose,
// MatrixWrapper, ArrayWrapper, Reverse, Replicate)
// - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
// - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
// - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires
// additional tags for dispatching.
// - mapbase_evaluator for Map, Block, Ref
// - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
template
<
typename
T
,
typename
Arg1Kind
=
typename
evaluator_traits
<
typename
T
::
Arg1
>
::
Kind
,
typename
Arg2Kind
=
typename
evaluator_traits
<
typename
T
::
Arg2
>::
Kind
,
typename
Arg3Kind
=
typename
evaluator_traits
<
typename
T
::
Arg3
>::
Kind
,
typename
Arg1Scalar
=
typename
traits
<
typename
T
::
Arg1
>::
Scalar
,
typename
Arg2Scalar
=
typename
traits
<
typename
T
::
Arg2
>::
Scalar
,
typename
Arg3Scalar
=
typename
traits
<
typename
T
::
Arg3
>::
Scalar
>
struct
ternary_evaluator
;
template
<
typename
T
,
typename
LhsKind
=
typename
evaluator_traits
<
typename
T
::
Lhs
>
::
Kind
,
typename
RhsKind
=
typename
evaluator_traits
<
typename
T
::
Rhs
>::
Kind
,
typename
LhsScalar
=
typename
traits
<
typename
T
::
Lhs
>::
Scalar
,
typename
RhsScalar
=
typename
traits
<
typename
T
::
Rhs
>::
Scalar
>
struct
binary_evaluator
;
template
<
typename
T
,
typename
Kind
=
typename
evaluator_traits
<
typename
T
::
NestedExpression
>
::
Kind
,
typename
Scalar
=
typename
T
::
Scalar
>
struct
unary_evaluator
;
// evaluator_traits<T> contains traits for evaluator<T>
template
<
typename
T
>
struct
evaluator_traits_base
{
// by default, get evaluator kind and shape from storage
typedef
typename
storage_kind_to_evaluator_kind
<
typename
traits
<
T
>::
StorageKind
>::
Kind
Kind
;
typedef
typename
storage_kind_to_shape
<
typename
traits
<
T
>::
StorageKind
>::
Shape
Shape
;
};
// Default evaluator traits
template
<
typename
T
>
struct
evaluator_traits
:
public
evaluator_traits_base
<
T
>
{};
template
<
typename
T
,
typename
Shape
=
typename
evaluator_traits
<
T
>
::
Shape
>
struct
evaluator_assume_aliasing
{
static
const
bool
value
=
false
;
};
// By default, we assume a unary expression:
template
<
typename
T
>
struct
evaluator
:
public
unary_evaluator
<
T
>
{
typedef
unary_evaluator
<
T
>
Base
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
T
&
xpr
)
:
Base
(
xpr
)
{}
};
// TODO: Think about const-correctness
template
<
typename
T
>
struct
evaluator
<
const
T
>
:
evaluator
<
T
>
{
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
T
&
xpr
)
:
evaluator
<
T
>
(
xpr
)
{}
};
// ---------- base class for all evaluators ----------
template
<
typename
ExpressionType
>
struct
evaluator_base
{
// TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle
// outer,inner indices.
typedef
traits
<
ExpressionType
>
ExpressionTraits
;
enum
{
Alignment
=
0
};
// noncopyable:
// Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)
// and make complex evaluator much larger than then should do.
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
evaluator_base
()
=
default
;
private:
EIGEN_DEVICE_FUNC
evaluator_base
(
const
evaluator_base
&
);
EIGEN_DEVICE_FUNC
const
evaluator_base
&
operator
=
(
const
evaluator_base
&
);
};
// -------------------- Matrix and Array --------------------
//
// evaluator<PlainObjectBase> is a common base class for the
// Matrix and Array evaluators.
// Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
// so no need for more sophisticated dispatching.
// this helper permits to completely eliminate m_outerStride if it is known at compiletime.
template
<
typename
Scalar
,
int
OuterStride
>
class
plainobjectbase_evaluator_data
{
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plainobjectbase_evaluator_data
(
const
Scalar
*
ptr
,
Index
outerStride
)
:
data
(
ptr
)
{
#ifndef EIGEN_INTERNAL_DEBUGGING
EIGEN_UNUSED_VARIABLE
(
outerStride
);
#endif
eigen_internal_assert
(
outerStride
==
OuterStride
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
outerStride
()
const
noexcept
{
return
OuterStride
;
}
const
Scalar
*
data
;
};
template
<
typename
Scalar
>
class
plainobjectbase_evaluator_data
<
Scalar
,
Dynamic
>
{
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plainobjectbase_evaluator_data
(
const
Scalar
*
ptr
,
Index
outerStride
)
:
data
(
ptr
),
m_outerStride
(
outerStride
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
outerStride
()
const
{
return
m_outerStride
;
}
const
Scalar
*
data
;
protected:
Index
m_outerStride
;
};
template
<
typename
Derived
>
struct
evaluator
<
PlainObjectBase
<
Derived
>>
:
evaluator_base
<
Derived
>
{
typedef
PlainObjectBase
<
Derived
>
PlainObjectType
;
typedef
typename
PlainObjectType
::
Scalar
Scalar
;
typedef
typename
PlainObjectType
::
CoeffReturnType
CoeffReturnType
;
enum
{
IsRowMajor
=
PlainObjectType
::
IsRowMajor
,
IsVectorAtCompileTime
=
PlainObjectType
::
IsVectorAtCompileTime
,
RowsAtCompileTime
=
PlainObjectType
::
RowsAtCompileTime
,
ColsAtCompileTime
=
PlainObjectType
::
ColsAtCompileTime
,
CoeffReadCost
=
NumTraits
<
Scalar
>::
ReadCost
,
Flags
=
traits
<
Derived
>::
EvaluatorFlags
,
Alignment
=
traits
<
Derived
>::
Alignment
};
enum
{
// We do not need to know the outer stride for vectors
OuterStrideAtCompileTime
=
IsVectorAtCompileTime
?
0
:
int
(
IsRowMajor
)
?
ColsAtCompileTime
:
RowsAtCompileTime
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
evaluator
()
:
m_d
(
0
,
OuterStrideAtCompileTime
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
explicit
evaluator
(
const
PlainObjectType
&
m
)
:
m_d
(
m
.
data
(),
IsVectorAtCompileTime
?
0
:
m
.
outerStride
())
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
coeff
(
getIndex
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_d
.
data
[
index
];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
coeffRef
(
getIndex
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
coeffRef
(
Index
index
)
{
return
const_cast
<
Scalar
*>
(
m_d
.
data
)[
index
];
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
packet
<
LoadMode
,
PacketType
>
(
getIndex
(
row
,
col
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
ploadt
<
PacketType
,
LoadMode
>
(
m_d
.
data
+
index
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
row
,
Index
col
,
const
PacketType
&
x
)
{
writePacket
<
StoreMode
,
PacketType
>
(
getIndex
(
row
,
col
),
x
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketType
&
x
)
{
pstoret
<
Scalar
,
PacketType
,
StoreMode
>
(
const_cast
<
Scalar
*>
(
m_d
.
data
)
+
index
,
x
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
packetSegment
<
LoadMode
,
PacketType
>
(
getIndex
(
row
,
col
),
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
ploadtSegment
<
PacketType
,
LoadMode
>
(
m_d
.
data
+
index
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
row
,
Index
col
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
writePacketSegment
<
StoreMode
,
PacketType
>
(
getIndex
(
row
,
col
),
x
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
index
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
pstoretSegment
<
Scalar
,
PacketType
,
StoreMode
>
(
const_cast
<
Scalar
*>
(
m_d
.
data
)
+
index
,
x
,
begin
,
count
);
}
protected:
plainobjectbase_evaluator_data
<
Scalar
,
OuterStrideAtCompileTime
>
m_d
;
private:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
constexpr
getIndex
(
Index
row
,
Index
col
)
const
{
return
IsRowMajor
?
row
*
m_d
.
outerStride
()
+
col
:
row
+
col
*
m_d
.
outerStride
();
}
};
template
<
typename
Scalar
,
int
Rows
,
int
Cols
,
int
Options
,
int
MaxRows
,
int
MaxCols
>
struct
evaluator
<
Matrix
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>>
:
evaluator
<
PlainObjectBase
<
Matrix
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>>>
{
typedef
Matrix
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
evaluator
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
explicit
evaluator
(
const
XprType
&
m
)
:
evaluator
<
PlainObjectBase
<
XprType
>>
(
m
)
{}
};
template
<
typename
Scalar
,
int
Rows
,
int
Cols
,
int
Options
,
int
MaxRows
,
int
MaxCols
>
struct
evaluator
<
Array
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>>
:
evaluator
<
PlainObjectBase
<
Array
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>>>
{
typedef
Array
<
Scalar
,
Rows
,
Cols
,
Options
,
MaxRows
,
MaxCols
>
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
evaluator
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
explicit
evaluator
(
const
XprType
&
m
)
:
evaluator
<
PlainObjectBase
<
XprType
>>
(
m
)
{}
};
// -------------------- Transpose --------------------
template
<
typename
ArgType
>
struct
unary_evaluator
<
Transpose
<
ArgType
>
,
IndexBased
>
:
evaluator_base
<
Transpose
<
ArgType
>>
{
typedef
Transpose
<
ArgType
>
XprType
;
enum
{
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
,
Flags
=
evaluator
<
ArgType
>::
Flags
^
RowMajorBit
,
Alignment
=
evaluator
<
ArgType
>::
Alignment
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
t
)
:
m_argImpl
(
t
.
nestedExpression
())
{}
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
coeff
(
col
,
row
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_argImpl
.
coeff
(
index
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
m_argImpl
.
coeffRef
(
col
,
row
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
typename
XprType
::
Scalar
&
coeffRef
(
Index
index
)
{
return
m_argImpl
.
coeffRef
(
index
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
col
,
row
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
index
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
row
,
Index
col
,
const
PacketType
&
x
)
{
m_argImpl
.
template
writePacket
<
StoreMode
,
PacketType
>(
col
,
row
,
x
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketType
&
x
)
{
m_argImpl
.
template
writePacket
<
StoreMode
,
PacketType
>(
index
,
x
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
col
,
row
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
row
,
Index
col
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
m_argImpl
.
template
writePacketSegment
<
StoreMode
,
PacketType
>(
col
,
row
,
x
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
index
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
m_argImpl
.
template
writePacketSegment
<
StoreMode
,
PacketType
>(
index
,
x
,
begin
,
count
);
}
protected:
evaluator
<
ArgType
>
m_argImpl
;
};
// -------------------- CwiseNullaryOp --------------------
// Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
// Likewise, there is not need to more sophisticated dispatching here.
template
<
typename
Scalar
,
typename
NullaryOp
,
bool
has_nullary
=
has_nullary_operator
<
NullaryOp
>
::
value
,
bool
has_unary
=
has_unary_operator
<
NullaryOp
>::
value
,
bool
has_binary
=
has_binary_operator
<
NullaryOp
>::
value
>
struct
nullary_wrapper
{
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
operator
()(
const
NullaryOp
&
op
,
IndexType
i
,
IndexType
j
)
const
{
return
op
(
i
,
j
);
}
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
operator
()(
const
NullaryOp
&
op
,
IndexType
i
)
const
{
return
op
(
i
);
}
template
<
typename
T
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
T
packetOp
(
const
NullaryOp
&
op
,
IndexType
i
,
IndexType
j
)
const
{
return
op
.
template
packetOp
<
T
>(
i
,
j
);
}
template
<
typename
T
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
T
packetOp
(
const
NullaryOp
&
op
,
IndexType
i
)
const
{
return
op
.
template
packetOp
<
T
>(
i
);
}
};
template
<
typename
Scalar
,
typename
NullaryOp
>
struct
nullary_wrapper
<
Scalar
,
NullaryOp
,
true
,
false
,
false
>
{
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
operator
()(
const
NullaryOp
&
op
,
IndexType
=
0
,
IndexType
=
0
)
const
{
return
op
();
}
template
<
typename
T
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
T
packetOp
(
const
NullaryOp
&
op
,
IndexType
=
0
,
IndexType
=
0
)
const
{
return
op
.
template
packetOp
<
T
>();
}
};
template
<
typename
Scalar
,
typename
NullaryOp
>
struct
nullary_wrapper
<
Scalar
,
NullaryOp
,
false
,
false
,
true
>
{
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
operator
()(
const
NullaryOp
&
op
,
IndexType
i
,
IndexType
j
=
0
)
const
{
return
op
(
i
,
j
);
}
template
<
typename
T
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
T
packetOp
(
const
NullaryOp
&
op
,
IndexType
i
,
IndexType
j
=
0
)
const
{
return
op
.
template
packetOp
<
T
>(
i
,
j
);
}
};
// We need the following specialization for vector-only functors assigned to a runtime vector,
// for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
// In this case, i==0 and j is used for the actual iteration.
template
<
typename
Scalar
,
typename
NullaryOp
>
struct
nullary_wrapper
<
Scalar
,
NullaryOp
,
false
,
true
,
false
>
{
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
operator
()(
const
NullaryOp
&
op
,
IndexType
i
,
IndexType
j
)
const
{
eigen_assert
(
i
==
0
||
j
==
0
);
return
op
(
i
+
j
);
}
template
<
typename
T
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
T
packetOp
(
const
NullaryOp
&
op
,
IndexType
i
,
IndexType
j
)
const
{
eigen_assert
(
i
==
0
||
j
==
0
);
return
op
.
template
packetOp
<
T
>(
i
+
j
);
}
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
operator
()(
const
NullaryOp
&
op
,
IndexType
i
)
const
{
return
op
(
i
);
}
template
<
typename
T
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
T
packetOp
(
const
NullaryOp
&
op
,
IndexType
i
)
const
{
return
op
.
template
packetOp
<
T
>(
i
);
}
};
template
<
typename
Scalar
,
typename
NullaryOp
>
struct
nullary_wrapper
<
Scalar
,
NullaryOp
,
false
,
false
,
false
>
{};
#if 0 && EIGEN_COMP_MSVC > 0
// Disable this ugly workaround. This is now handled in traits<Ref>::match,
// but this piece of code might still become handly if some other weird compilation
// errors pop up again.
// MSVC exhibits a weird compilation error when
// compiling:
// Eigen::MatrixXf A = MatrixXf::Random(3,3);
// Ref<const MatrixXf> R = 2.f*A;
// and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
// The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
// and at that time has_*ary_operator<T> returns true regardless of T.
// Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
// The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
// and packet() are really instantiated as implemented below:
// This is a simple wrapper around Index to enforce the re-instantiation of
// has_*ary_operator when needed.
template<typename T> struct nullary_wrapper_workaround_msvc {
nullary_wrapper_workaround_msvc(const T&);
operator T()const;
};
template<typename Scalar,typename NullaryOp>
struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
{
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
}
template <typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
}
template <typename T, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
}
template <typename T, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
return nullary_wrapper<Scalar,NullaryOp,
has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
}
};
#endif // MSVC workaround
template
<
typename
NullaryOp
,
typename
PlainObjectType
>
struct
evaluator
<
CwiseNullaryOp
<
NullaryOp
,
PlainObjectType
>>
:
evaluator_base
<
CwiseNullaryOp
<
NullaryOp
,
PlainObjectType
>>
{
typedef
CwiseNullaryOp
<
NullaryOp
,
PlainObjectType
>
XprType
;
typedef
remove_all_t
<
PlainObjectType
>
PlainObjectTypeCleaned
;
enum
{
CoeffReadCost
=
functor_traits
<
NullaryOp
>::
Cost
,
Flags
=
(
evaluator
<
PlainObjectTypeCleaned
>::
Flags
&
(
HereditaryBits
|
(
functor_has_linear_access
<
NullaryOp
>::
ret
?
LinearAccessBit
:
0
)
|
(
functor_traits
<
NullaryOp
>::
PacketAccess
?
PacketAccessBit
:
0
)))
|
(
functor_traits
<
NullaryOp
>::
IsRepeatable
?
0
:
EvalBeforeNestingBit
),
Alignment
=
AlignedMax
};
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
&
n
)
:
m_functor
(
n
.
functor
()),
m_wrapper
()
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
IndexType
row
,
IndexType
col
)
const
{
return
m_wrapper
(
m_functor
,
row
,
col
);
}
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
IndexType
index
)
const
{
return
m_wrapper
(
m_functor
,
index
);
}
template
<
int
LoadMode
,
typename
PacketType
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
IndexType
row
,
IndexType
col
)
const
{
return
m_wrapper
.
template
packetOp
<
PacketType
>(
m_functor
,
row
,
col
);
}
template
<
int
LoadMode
,
typename
PacketType
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
IndexType
index
)
const
{
return
m_wrapper
.
template
packetOp
<
PacketType
>(
m_functor
,
index
);
}
template
<
int
LoadMode
,
typename
PacketType
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
IndexType
row
,
IndexType
col
,
Index
/*begin*/
,
Index
/*count*/
)
const
{
return
packet
<
LoadMode
,
PacketType
,
IndexType
>
(
row
,
col
);
}
template
<
int
LoadMode
,
typename
PacketType
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
IndexType
index
,
Index
/*begin*/
,
Index
/*count*/
)
const
{
return
packet
<
LoadMode
,
PacketType
,
IndexType
>
(
index
);
}
protected:
const
NullaryOp
m_functor
;
const
nullary_wrapper
<
CoeffReturnType
,
NullaryOp
>
m_wrapper
;
};
// -------------------- CwiseUnaryOp --------------------
template
<
typename
UnaryOp
,
typename
ArgType
>
struct
unary_evaluator
<
CwiseUnaryOp
<
UnaryOp
,
ArgType
>
,
IndexBased
>
:
evaluator_base
<
CwiseUnaryOp
<
UnaryOp
,
ArgType
>>
{
typedef
CwiseUnaryOp
<
UnaryOp
,
ArgType
>
XprType
;
enum
{
CoeffReadCost
=
int
(
evaluator
<
ArgType
>::
CoeffReadCost
)
+
int
(
functor_traits
<
UnaryOp
>::
Cost
),
Flags
=
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
|
LinearAccessBit
|
(
functor_traits
<
UnaryOp
>::
PacketAccess
?
PacketAccessBit
:
0
)),
Alignment
=
evaluator
<
ArgType
>::
Alignment
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
op
)
:
m_d
(
op
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
functor_traits
<
UnaryOp
>::
Cost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
()(
m_d
.
argImpl
.
coeff
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_d
.
func
()(
m_d
.
argImpl
.
coeff
(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
));
}
protected:
// this helper permits to completely eliminate the functor if it is empty
struct
Data
{
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Data
(
const
XprType
&
xpr
)
:
op
(
xpr
.
functor
()),
argImpl
(
xpr
.
nestedExpression
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
UnaryOp
&
func
()
const
{
return
op
;
}
UnaryOp
op
;
evaluator
<
ArgType
>
argImpl
;
};
Data
m_d
;
};
// ----------------------- Casting ---------------------
template
<
typename
SrcType
,
typename
DstType
,
typename
ArgType
>
struct
unary_evaluator
<
CwiseUnaryOp
<
core_cast_op
<
SrcType
,
DstType
>
,
ArgType
>
,
IndexBased
>
{
using
CastOp
=
core_cast_op
<
SrcType
,
DstType
>
;
using
XprType
=
CwiseUnaryOp
<
CastOp
,
ArgType
>
;
// Use the largest packet type by default
using
SrcPacketType
=
typename
packet_traits
<
SrcType
>::
type
;
static
constexpr
int
SrcPacketSize
=
unpacket_traits
<
SrcPacketType
>::
size
;
static
constexpr
int
SrcPacketBytes
=
SrcPacketSize
*
sizeof
(
SrcType
);
enum
{
CoeffReadCost
=
int
(
evaluator
<
ArgType
>::
CoeffReadCost
)
+
int
(
functor_traits
<
CastOp
>::
Cost
),
PacketAccess
=
functor_traits
<
CastOp
>::
PacketAccess
,
ActualPacketAccessBit
=
PacketAccess
?
PacketAccessBit
:
0
,
Flags
=
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
|
LinearAccessBit
|
ActualPacketAccessBit
),
IsRowMajor
=
(
evaluator
<
ArgType
>::
Flags
&
RowMajorBit
),
Alignment
=
evaluator
<
ArgType
>::
Alignment
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
xpr
)
:
m_argImpl
(
xpr
.
nestedExpression
()),
m_rows
(
xpr
.
rows
()),
m_cols
(
xpr
.
cols
())
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
functor_traits
<
CastOp
>::
Cost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
template
<
typename
DstPacketType
>
using
AltSrcScalarOp
=
std
::
enable_if_t
<
(
unpacket_traits
<
DstPacketType
>::
size
<
SrcPacketSize
&&
!
find_packet_by_size
<
SrcType
,
unpacket_traits
<
DstPacketType
>::
size
>::
value
),
bool
>
;
template
<
typename
DstPacketType
>
using
SrcPacketArgs1
=
std
::
enable_if_t
<
(
find_packet_by_size
<
SrcType
,
unpacket_traits
<
DstPacketType
>::
size
>::
value
),
bool
>
;
template
<
typename
DstPacketType
>
using
SrcPacketArgs2
=
std
::
enable_if_t
<
(
unpacket_traits
<
DstPacketType
>::
size
)
==
(
2
*
SrcPacketSize
),
bool
>
;
template
<
typename
DstPacketType
>
using
SrcPacketArgs4
=
std
::
enable_if_t
<
(
unpacket_traits
<
DstPacketType
>::
size
)
==
(
4
*
SrcPacketSize
),
bool
>
;
template
<
typename
DstPacketType
>
using
SrcPacketArgs8
=
std
::
enable_if_t
<
(
unpacket_traits
<
DstPacketType
>::
size
)
==
(
8
*
SrcPacketSize
),
bool
>
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
bool
check_array_bounds
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
IsRowMajor
?
(
col
+
count
+
begin
<=
cols
())
:
(
row
+
count
+
begin
<=
rows
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
bool
check_array_bounds
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
index
+
count
+
begin
<=
size
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
SrcType
srcCoeff
(
Index
row
,
Index
col
,
Index
offset
)
const
{
Index
actualRow
=
IsRowMajor
?
row
:
row
+
offset
;
Index
actualCol
=
IsRowMajor
?
col
+
offset
:
col
;
return
m_argImpl
.
coeff
(
actualRow
,
actualCol
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
SrcType
srcCoeff
(
Index
index
,
Index
offset
)
const
{
Index
actualIndex
=
index
+
offset
;
return
m_argImpl
.
coeff
(
actualIndex
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstType
coeff
(
Index
row
,
Index
col
)
const
{
return
cast
<
SrcType
,
DstType
>
(
srcCoeff
(
row
,
col
,
0
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstType
coeff
(
Index
index
)
const
{
return
cast
<
SrcType
,
DstType
>
(
srcCoeff
(
index
,
0
));
}
template
<
int
LoadMode
,
typename
PacketType
=
SrcPacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
srcPacket
(
Index
row
,
Index
col
,
Index
offset
)
const
{
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
packetOffset
=
offset
*
PacketSize
;
Index
actualRow
=
IsRowMajor
?
row
:
row
+
packetOffset
;
Index
actualCol
=
IsRowMajor
?
col
+
packetOffset
:
col
;
eigen_assert
(
check_array_bounds
(
actualRow
,
actualCol
,
0
,
PacketSize
)
&&
"Array index out of bounds"
);
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
actualRow
,
actualCol
);
}
template
<
int
LoadMode
,
typename
PacketType
=
SrcPacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
srcPacket
(
Index
index
,
Index
offset
)
const
{
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
packetOffset
=
offset
*
PacketSize
;
Index
actualIndex
=
index
+
packetOffset
;
eigen_assert
(
check_array_bounds
(
actualIndex
,
0
,
PacketSize
)
&&
"Array index out of bounds"
);
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
actualIndex
);
}
template
<
int
LoadMode
,
typename
PacketType
=
SrcPacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
srcPacketSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
,
Index
offset
)
const
{
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
packetOffset
=
offset
*
PacketSize
;
Index
actualRow
=
IsRowMajor
?
row
:
row
+
packetOffset
;
Index
actualCol
=
IsRowMajor
?
col
+
packetOffset
:
col
;
eigen_assert
(
check_array_bounds
(
actualRow
,
actualCol
,
0
,
count
)
&&
"Array index out of bounds"
);
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
actualRow
,
actualCol
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
=
SrcPacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
srcPacketSegment
(
Index
index
,
Index
begin
,
Index
count
,
Index
offset
)
const
{
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
packetOffset
=
offset
*
PacketSize
;
Index
actualIndex
=
index
+
packetOffset
+
begin
;
eigen_assert
(
check_array_bounds
(
actualIndex
,
0
,
count
)
&&
"Array index out of bounds"
);
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
actualIndex
,
begin
,
count
);
}
template
<
int
NumPackets
,
int
LoadMode
,
typename
PacketType
=
SrcPacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketBlock
<
PacketType
,
NumPackets
>
srcPacketSegmentHelper
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
PacketType
,
NumPackets
>
packets
;
for
(
Index
i
=
0
;
i
<
NumPackets
;
i
++
)
packets
.
packet
[
i
]
=
pzero
(
PacketType
());
Index
offset
=
begin
/
SrcPacketSize
;
Index
actualBegin
=
begin
%
SrcPacketSize
;
for
(;
offset
<
NumPackets
;
offset
++
)
{
Index
actualCount
=
numext
::
mini
(
SrcPacketSize
-
actualBegin
,
count
);
packets
.
packet
[
offset
]
=
srcPacketSegment
<
SrcLoadMode
>
(
row
,
col
,
actualBegin
,
actualCount
,
offset
);
if
(
count
==
actualCount
)
break
;
actualBegin
=
0
;
count
-=
actualCount
;
}
return
packets
;
}
template
<
int
NumPackets
,
int
LoadMode
,
typename
PacketType
=
SrcPacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketBlock
<
PacketType
,
NumPackets
>
srcPacketSegmentHelper
(
Index
index
,
Index
begin
,
Index
count
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
PacketType
,
NumPackets
>
packets
;
for
(
Index
i
=
0
;
i
<
NumPackets
;
i
++
)
packets
.
packet
[
i
]
=
pzero
(
PacketType
());
Index
offset
=
begin
/
SrcPacketSize
;
Index
actualBegin
=
begin
%
SrcPacketSize
;
for
(;
offset
<
NumPackets
;
offset
++
)
{
Index
actualCount
=
numext
::
mini
(
SrcPacketSize
-
actualBegin
,
count
);
packets
.
packet
[
offset
]
=
srcPacketSegment
<
SrcLoadMode
>
(
index
,
actualBegin
,
actualCount
,
offset
);
if
(
count
==
actualCount
)
break
;
actualBegin
=
0
;
count
-=
actualCount
;
}
return
packets
;
}
// There is no source packet type with equal or fewer elements than DstPacketType.
// This is problematic as the evaluation loop may attempt to access data outside the bounds of the array.
// For example, consider the cast utilizing pcast<Packet4f,Packet2d> with an array of size 4: {0.0f,1.0f,2.0f,3.0f}.
// The first iteration of the evaluation loop will load 16 bytes: {0.0f,1.0f,2.0f,3.0f} and cast to {0.0,1.0}, which
// is acceptable. The second iteration will load 16 bytes: {2.0f,3.0f,?,?}, which is outside the bounds of the array.
template
<
int
LoadMode
,
typename
DstPacketType
,
AltSrcScalarOp
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
row
,
Index
col
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacketSegment
<
SrcLoadMode
>
(
row
,
col
,
0
,
DstPacketSize
,
0
));
}
// Use the source packet type with the same size as DstPacketType, if it exists
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs1
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
row
,
Index
col
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
using
SizedSrcPacketType
=
typename
find_packet_by_size
<
SrcType
,
DstPacketSize
>::
type
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SizedSrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
,
SizedSrcPacketType
>
(
row
,
col
,
0
));
}
// unpacket_traits<DstPacketType>::size == 2 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs2
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
row
,
Index
col
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
0
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
1
));
}
// unpacket_traits<DstPacketType>::size == 4 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs4
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
row
,
Index
col
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
0
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
1
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
2
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
3
));
}
// unpacket_traits<DstPacketType>::size == 8 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs8
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
row
,
Index
col
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
0
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
1
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
2
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
3
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
4
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
5
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
6
),
srcPacket
<
SrcLoadMode
>
(
row
,
col
,
7
));
}
// packetSegment variants
template
<
int
LoadMode
,
typename
DstPacketType
,
AltSrcScalarOp
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacketSegment
<
SrcLoadMode
>
(
row
,
col
,
begin
,
count
,
0
));
}
// Use the source packet type with the same size as DstPacketType, if it exists
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs1
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
using
SizedSrcPacketType
=
typename
find_packet_by_size
<
SrcType
,
DstPacketSize
>::
type
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SizedSrcPacketType
,
DstPacketType
>
(
srcPacketSegment
<
SrcLoadMode
,
SizedSrcPacketType
>
(
row
,
col
,
begin
,
count
,
0
));
}
// unpacket_traits<DstPacketType>::size == 2 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs2
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
constexpr
int
NumPackets
=
2
;
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
SrcPacketType
,
NumPackets
>
packets
=
srcPacketSegmentHelper
<
NumPackets
,
SrcLoadMode
>
(
row
,
col
,
begin
,
count
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
packets
.
packet
[
0
],
packets
.
packet
[
1
]);
}
// unpacket_traits<DstPacketType>::size == 4 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs4
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
constexpr
int
NumPackets
=
4
;
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
SrcPacketType
,
NumPackets
>
packets
=
srcPacketSegmentHelper
<
NumPackets
,
SrcLoadMode
>
(
row
,
col
,
begin
,
count
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
packets
.
packet
[
0
],
packets
.
packet
[
1
],
packets
.
packet
[
2
],
packets
.
packet
[
3
]);
}
// unpacket_traits<DstPacketType>::size == 8 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs8
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
constexpr
int
NumPackets
=
8
;
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
SrcPacketType
,
NumPackets
>
packets
=
srcPacketSegmentHelper
<
NumPackets
,
SrcLoadMode
>
(
row
,
col
,
begin
,
count
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
packets
.
packet
[
0
],
packets
.
packet
[
1
],
packets
.
packet
[
2
],
packets
.
packet
[
3
],
packets
.
packet
[
4
],
packets
.
packet
[
5
],
packets
.
packet
[
6
],
packets
.
packet
[
7
]);
}
// Analogous routines for linear access.
template
<
int
LoadMode
,
typename
DstPacketType
,
AltSrcScalarOp
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
index
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacketSegment
<
SrcLoadMode
>
(
index
,
0
,
DstPacketSize
,
0
));
}
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs1
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
index
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
using
SizedSrcPacketType
=
typename
find_packet_by_size
<
SrcType
,
DstPacketSize
>::
type
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SizedSrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
,
SizedSrcPacketType
>
(
index
,
0
));
}
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs2
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
index
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
>
(
index
,
0
),
srcPacket
<
SrcLoadMode
>
(
index
,
1
));
}
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs4
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
index
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
>
(
index
,
0
),
srcPacket
<
SrcLoadMode
>
(
index
,
1
),
srcPacket
<
SrcLoadMode
>
(
index
,
2
),
srcPacket
<
SrcLoadMode
>
(
index
,
3
));
}
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs8
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packet
(
Index
index
)
const
{
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacket
<
SrcLoadMode
>
(
index
,
0
),
srcPacket
<
SrcLoadMode
>
(
index
,
1
),
srcPacket
<
SrcLoadMode
>
(
index
,
2
),
srcPacket
<
SrcLoadMode
>
(
index
,
3
),
srcPacket
<
SrcLoadMode
>
(
index
,
4
),
srcPacket
<
SrcLoadMode
>
(
index
,
5
),
srcPacket
<
SrcLoadMode
>
(
index
,
6
),
srcPacket
<
SrcLoadMode
>
(
index
,
7
));
}
// packetSegment variants
template
<
int
LoadMode
,
typename
DstPacketType
,
AltSrcScalarOp
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
srcPacketSegment
<
SrcLoadMode
>
(
index
,
begin
,
count
,
0
));
}
// Use the source packet type with the same size as DstPacketType, if it exists
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs1
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
constexpr
int
DstPacketSize
=
unpacket_traits
<
DstPacketType
>::
size
;
using
SizedSrcPacketType
=
typename
find_packet_by_size
<
SrcType
,
DstPacketSize
>::
type
;
constexpr
int
SrcBytesIncrement
=
DstPacketSize
*
sizeof
(
SrcType
);
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcBytesIncrement
,
LoadMode
);
return
pcast
<
SizedSrcPacketType
,
DstPacketType
>
(
srcPacketSegment
<
SrcLoadMode
,
SizedSrcPacketType
>
(
index
,
begin
,
count
,
0
));
}
// unpacket_traits<DstPacketType>::size == 2 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs2
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
constexpr
int
NumPackets
=
2
;
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
SrcPacketType
,
NumPackets
>
packets
=
srcPacketSegmentHelper
<
NumPackets
,
SrcLoadMode
>
(
index
,
begin
,
count
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
packets
.
packet
[
0
],
packets
.
packet
[
1
]);
}
// unpacket_traits<DstPacketType>::size == 4 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs4
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
constexpr
int
NumPackets
=
4
;
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
SrcPacketType
,
NumPackets
>
packets
=
srcPacketSegmentHelper
<
NumPackets
,
SrcLoadMode
>
(
index
,
begin
,
count
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
packets
.
packet
[
0
],
packets
.
packet
[
1
],
packets
.
packet
[
2
],
packets
.
packet
[
3
]);
}
// unpacket_traits<DstPacketType>::size == 8 * SrcPacketSize
template
<
int
LoadMode
,
typename
DstPacketType
,
SrcPacketArgs8
<
DstPacketType
>
=
true
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DstPacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
constexpr
int
NumPackets
=
8
;
constexpr
int
SrcLoadMode
=
plain_enum_min
(
SrcPacketBytes
,
LoadMode
);
PacketBlock
<
SrcPacketType
,
NumPackets
>
packets
=
srcPacketSegmentHelper
<
NumPackets
,
SrcLoadMode
>
(
index
,
begin
,
count
);
return
pcast
<
SrcPacketType
,
DstPacketType
>
(
packets
.
packet
[
0
],
packets
.
packet
[
1
],
packets
.
packet
[
2
],
packets
.
packet
[
3
],
packets
.
packet
[
4
],
packets
.
packet
[
5
],
packets
.
packet
[
6
],
packets
.
packet
[
7
]);
}
constexpr
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
rows
()
const
{
return
m_rows
;
}
constexpr
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
cols
()
const
{
return
m_cols
;
}
constexpr
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
size
()
const
{
return
m_rows
*
m_cols
;
}
protected:
const
evaluator
<
ArgType
>
m_argImpl
;
const
variable_if_dynamic
<
Index
,
XprType
::
RowsAtCompileTime
>
m_rows
;
const
variable_if_dynamic
<
Index
,
XprType
::
ColsAtCompileTime
>
m_cols
;
};
// -------------------- CwiseTernaryOp --------------------
// this is a ternary expression
template
<
typename
TernaryOp
,
typename
Arg1
,
typename
Arg2
,
typename
Arg3
>
struct
evaluator
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>
:
public
ternary_evaluator
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>
{
typedef
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>
XprType
;
typedef
ternary_evaluator
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>
Base
;
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
)
{}
};
template
<
typename
TernaryOp
,
typename
Arg1
,
typename
Arg2
,
typename
Arg3
>
struct
ternary_evaluator
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>
,
IndexBased
,
IndexBased
>
:
evaluator_base
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>
{
typedef
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>
XprType
;
enum
{
CoeffReadCost
=
int
(
evaluator
<
Arg1
>::
CoeffReadCost
)
+
int
(
evaluator
<
Arg2
>::
CoeffReadCost
)
+
int
(
evaluator
<
Arg3
>::
CoeffReadCost
)
+
int
(
functor_traits
<
TernaryOp
>::
Cost
),
Arg1Flags
=
evaluator
<
Arg1
>::
Flags
,
Arg2Flags
=
evaluator
<
Arg2
>::
Flags
,
Arg3Flags
=
evaluator
<
Arg3
>::
Flags
,
SameType
=
is_same
<
typename
Arg1
::
Scalar
,
typename
Arg2
::
Scalar
>::
value
&&
is_same
<
typename
Arg1
::
Scalar
,
typename
Arg3
::
Scalar
>::
value
,
StorageOrdersAgree
=
(
int
(
Arg1Flags
)
&
RowMajorBit
)
==
(
int
(
Arg2Flags
)
&
RowMajorBit
)
&&
(
int
(
Arg1Flags
)
&
RowMajorBit
)
==
(
int
(
Arg3Flags
)
&
RowMajorBit
),
Flags0
=
(
int
(
Arg1Flags
)
|
int
(
Arg2Flags
)
|
int
(
Arg3Flags
))
&
(
HereditaryBits
|
(
int
(
Arg1Flags
)
&
int
(
Arg2Flags
)
&
int
(
Arg3Flags
)
&
((
StorageOrdersAgree
?
LinearAccessBit
:
0
)
|
(
functor_traits
<
TernaryOp
>::
PacketAccess
&&
StorageOrdersAgree
&&
SameType
?
PacketAccessBit
:
0
)))),
Flags
=
(
Flags0
&
~
RowMajorBit
)
|
(
Arg1Flags
&
RowMajorBit
),
Alignment
=
plain_enum_min
(
plain_enum_min
(
evaluator
<
Arg1
>::
Alignment
,
evaluator
<
Arg2
>::
Alignment
),
evaluator
<
Arg3
>::
Alignment
)
};
EIGEN_DEVICE_FUNC
explicit
ternary_evaluator
(
const
XprType
&
xpr
)
:
m_d
(
xpr
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
functor_traits
<
TernaryOp
>::
Cost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
()(
m_d
.
arg1Impl
.
coeff
(
row
,
col
),
m_d
.
arg2Impl
.
coeff
(
row
,
col
),
m_d
.
arg3Impl
.
coeff
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_d
.
func
()(
m_d
.
arg1Impl
.
coeff
(
index
),
m_d
.
arg2Impl
.
coeff
(
index
),
m_d
.
arg3Impl
.
coeff
(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
arg1Impl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
),
m_d
.
arg2Impl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
),
m_d
.
arg3Impl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
arg1Impl
.
template
packet
<
LoadMode
,
PacketType
>(
index
),
m_d
.
arg2Impl
.
template
packet
<
LoadMode
,
PacketType
>(
index
),
m_d
.
arg3Impl
.
template
packet
<
LoadMode
,
PacketType
>(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
arg1Impl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
),
m_d
.
arg2Impl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
),
m_d
.
arg3Impl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
arg1Impl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
),
m_d
.
arg2Impl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
),
m_d
.
arg3Impl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
));
}
protected:
// this helper permits to completely eliminate the functor if it is empty
struct
Data
{
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Data
(
const
XprType
&
xpr
)
:
op
(
xpr
.
functor
()),
arg1Impl
(
xpr
.
arg1
()),
arg2Impl
(
xpr
.
arg2
()),
arg3Impl
(
xpr
.
arg3
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
TernaryOp
&
func
()
const
{
return
op
;
}
TernaryOp
op
;
evaluator
<
Arg1
>
arg1Impl
;
evaluator
<
Arg2
>
arg2Impl
;
evaluator
<
Arg3
>
arg3Impl
;
};
Data
m_d
;
};
// specialization for expressions like (a < b).select(c, d) to enable full vectorization
template
<
typename
Arg1
,
typename
Arg2
,
typename
Scalar
,
typename
CmpLhsType
,
typename
CmpRhsType
,
ComparisonName
cmp
>
struct
evaluator
<
CwiseTernaryOp
<
scalar_boolean_select_op
<
Scalar
,
Scalar
,
bool
>
,
Arg1
,
Arg2
,
CwiseBinaryOp
<
scalar_cmp_op
<
Scalar
,
Scalar
,
cmp
,
false
>
,
CmpLhsType
,
CmpRhsType
>>>
:
public
ternary_evaluator
<
CwiseTernaryOp
<
scalar_boolean_select_op
<
Scalar
,
Scalar
,
Scalar
>
,
Arg1
,
Arg2
,
CwiseBinaryOp
<
scalar_cmp_op
<
Scalar
,
Scalar
,
cmp
,
true
>
,
CmpLhsType
,
CmpRhsType
>>>
{
using
DummyTernaryOp
=
scalar_boolean_select_op
<
Scalar
,
Scalar
,
bool
>
;
using
DummyArg3
=
CwiseBinaryOp
<
scalar_cmp_op
<
Scalar
,
Scalar
,
cmp
,
false
>
,
CmpLhsType
,
CmpRhsType
>
;
using
DummyXprType
=
CwiseTernaryOp
<
DummyTernaryOp
,
Arg1
,
Arg2
,
DummyArg3
>
;
using
TernaryOp
=
scalar_boolean_select_op
<
Scalar
,
Scalar
,
Scalar
>
;
using
Arg3
=
CwiseBinaryOp
<
scalar_cmp_op
<
Scalar
,
Scalar
,
cmp
,
true
>
,
CmpLhsType
,
CmpRhsType
>
;
using
XprType
=
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>
;
using
Base
=
ternary_evaluator
<
XprType
>
;
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
DummyXprType
&
xpr
)
:
Base
(
XprType
(
xpr
.
arg1
(),
xpr
.
arg2
(),
Arg3
(
xpr
.
arg3
().
lhs
(),
xpr
.
arg3
().
rhs
())))
{}
};
// -------------------- CwiseBinaryOp --------------------
// this is a binary expression
template
<
typename
BinaryOp
,
typename
Lhs
,
typename
Rhs
>
struct
evaluator
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>
:
public
binary_evaluator
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>
{
typedef
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>
XprType
;
typedef
binary_evaluator
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>
Base
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
xpr
)
:
Base
(
xpr
)
{}
};
template
<
typename
BinaryOp
,
typename
Lhs
,
typename
Rhs
>
struct
binary_evaluator
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>
,
IndexBased
,
IndexBased
>
:
evaluator_base
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>
{
typedef
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>
XprType
;
enum
{
CoeffReadCost
=
int
(
evaluator
<
Lhs
>::
CoeffReadCost
)
+
int
(
evaluator
<
Rhs
>::
CoeffReadCost
)
+
int
(
functor_traits
<
BinaryOp
>::
Cost
),
LhsFlags
=
evaluator
<
Lhs
>::
Flags
,
RhsFlags
=
evaluator
<
Rhs
>::
Flags
,
SameType
=
is_same
<
typename
Lhs
::
Scalar
,
typename
Rhs
::
Scalar
>::
value
,
StorageOrdersAgree
=
(
int
(
LhsFlags
)
&
RowMajorBit
)
==
(
int
(
RhsFlags
)
&
RowMajorBit
),
Flags0
=
(
int
(
LhsFlags
)
|
int
(
RhsFlags
))
&
(
HereditaryBits
|
(
int
(
LhsFlags
)
&
int
(
RhsFlags
)
&
((
StorageOrdersAgree
?
LinearAccessBit
:
0
)
|
(
functor_traits
<
BinaryOp
>::
PacketAccess
&&
StorageOrdersAgree
&&
SameType
?
PacketAccessBit
:
0
)))),
Flags
=
(
Flags0
&
~
RowMajorBit
)
|
(
LhsFlags
&
RowMajorBit
),
Alignment
=
plain_enum_min
(
evaluator
<
Lhs
>::
Alignment
,
evaluator
<
Rhs
>::
Alignment
)
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
binary_evaluator
(
const
XprType
&
xpr
)
:
m_d
(
xpr
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
functor_traits
<
BinaryOp
>::
Cost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
()(
m_d
.
lhsImpl
.
coeff
(
row
,
col
),
m_d
.
rhsImpl
.
coeff
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_d
.
func
()(
m_d
.
lhsImpl
.
coeff
(
index
),
m_d
.
rhsImpl
.
coeff
(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
lhsImpl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
),
m_d
.
rhsImpl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
lhsImpl
.
template
packet
<
LoadMode
,
PacketType
>(
index
),
m_d
.
rhsImpl
.
template
packet
<
LoadMode
,
PacketType
>(
index
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
lhsImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
),
m_d
.
rhsImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
m_d
.
func
().
packetOp
(
m_d
.
lhsImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
),
m_d
.
rhsImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
));
}
protected:
// this helper permits to completely eliminate the functor if it is empty
struct
Data
{
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Data
(
const
XprType
&
xpr
)
:
op
(
xpr
.
functor
()),
lhsImpl
(
xpr
.
lhs
()),
rhsImpl
(
xpr
.
rhs
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
BinaryOp
&
func
()
const
{
return
op
;
}
BinaryOp
op
;
evaluator
<
Lhs
>
lhsImpl
;
evaluator
<
Rhs
>
rhsImpl
;
};
Data
m_d
;
};
// -------------------- CwiseUnaryView --------------------
template
<
typename
UnaryOp
,
typename
ArgType
,
typename
StrideType
>
struct
unary_evaluator
<
CwiseUnaryView
<
UnaryOp
,
ArgType
,
StrideType
>
,
IndexBased
>
:
evaluator_base
<
CwiseUnaryView
<
UnaryOp
,
ArgType
,
StrideType
>>
{
typedef
CwiseUnaryView
<
UnaryOp
,
ArgType
,
StrideType
>
XprType
;
enum
{
CoeffReadCost
=
int
(
evaluator
<
ArgType
>::
CoeffReadCost
)
+
int
(
functor_traits
<
UnaryOp
>::
Cost
),
Flags
=
(
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
|
LinearAccessBit
|
DirectAccessBit
)),
Alignment
=
0
// FIXME it is not very clear why alignment is necessarily lost...
};
EIGEN_DEVICE_FUNC
explicit
unary_evaluator
(
const
XprType
&
op
)
:
m_d
(
op
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
functor_traits
<
UnaryOp
>::
Cost
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_d
.
func
()(
m_d
.
argImpl
.
coeff
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_d
.
func
()(
m_d
.
argImpl
.
coeff
(
index
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
m_d
.
func
()(
m_d
.
argImpl
.
coeffRef
(
row
,
col
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
m_d
.
func
()(
m_d
.
argImpl
.
coeffRef
(
index
));
}
protected:
// this helper permits to completely eliminate the functor if it is empty
struct
Data
{
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Data
(
const
XprType
&
xpr
)
:
op
(
xpr
.
functor
()),
argImpl
(
xpr
.
nestedExpression
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
UnaryOp
&
func
()
const
{
return
op
;
}
UnaryOp
op
;
evaluator
<
ArgType
>
argImpl
;
};
Data
m_d
;
};
// -------------------- Map --------------------
// FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
// but that might complicate template specialization
template
<
typename
Derived
,
typename
PlainObjectType
>
struct
mapbase_evaluator
;
template
<
typename
Derived
,
typename
PlainObjectType
>
struct
mapbase_evaluator
:
evaluator_base
<
Derived
>
{
typedef
Derived
XprType
;
typedef
typename
XprType
::
PointerType
PointerType
;
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
enum
{
IsRowMajor
=
XprType
::
RowsAtCompileTime
,
ColsAtCompileTime
=
XprType
::
ColsAtCompileTime
,
CoeffReadCost
=
NumTraits
<
Scalar
>::
ReadCost
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
mapbase_evaluator
(
const
XprType
&
map
)
:
m_data
(
const_cast
<
PointerType
>
(
map
.
data
())),
m_innerStride
(
map
.
innerStride
()),
m_outerStride
(
map
.
outerStride
())
{
EIGEN_STATIC_ASSERT
(
check_implication
((
evaluator
<
Derived
>::
Flags
&
PacketAccessBit
)
!=
0
,
inner_stride_at_compile_time
<
Derived
>::
ret
==
1
),
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1
);
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_data
[
col
*
colStride
()
+
row
*
rowStride
()];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_data
[
index
*
m_innerStride
.
value
()];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
m_data
[
col
*
colStride
()
+
row
*
rowStride
()];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
m_data
[
index
*
m_innerStride
.
value
()];
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
PointerType
ptr
=
m_data
+
row
*
rowStride
()
+
col
*
colStride
();
return
ploadt
<
PacketType
,
LoadMode
>
(
ptr
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
ploadt
<
PacketType
,
LoadMode
>
(
m_data
+
index
*
m_innerStride
.
value
());
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
row
,
Index
col
,
const
PacketType
&
x
)
{
PointerType
ptr
=
m_data
+
row
*
rowStride
()
+
col
*
colStride
();
pstoret
<
Scalar
,
PacketType
,
StoreMode
>
(
ptr
,
x
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketType
&
x
)
{
pstoret
<
Scalar
,
PacketType
,
StoreMode
>
(
m_data
+
index
*
m_innerStride
.
value
(),
x
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
PointerType
ptr
=
m_data
+
row
*
rowStride
()
+
col
*
colStride
();
return
ploadtSegment
<
PacketType
,
LoadMode
>
(
ptr
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
ploadtSegment
<
PacketType
,
LoadMode
>
(
m_data
+
index
*
m_innerStride
.
value
(),
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
row
,
Index
col
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
PointerType
ptr
=
m_data
+
row
*
rowStride
()
+
col
*
colStride
();
pstoretSegment
<
Scalar
,
PacketType
,
StoreMode
>
(
ptr
,
x
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
index
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
pstoretSegment
<
Scalar
,
PacketType
,
StoreMode
>
(
m_data
+
index
*
m_innerStride
.
value
(),
x
,
begin
,
count
);
}
protected:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rowStride
()
const
noexcept
{
return
XprType
::
IsRowMajor
?
m_outerStride
.
value
()
:
m_innerStride
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
colStride
()
const
noexcept
{
return
XprType
::
IsRowMajor
?
m_innerStride
.
value
()
:
m_outerStride
.
value
();
}
PointerType
m_data
;
const
variable_if_dynamic
<
Index
,
XprType
::
InnerStrideAtCompileTime
>
m_innerStride
;
const
variable_if_dynamic
<
Index
,
XprType
::
OuterStrideAtCompileTime
>
m_outerStride
;
};
template
<
typename
PlainObjectType
,
int
MapOptions
,
typename
StrideType
>
struct
evaluator
<
Map
<
PlainObjectType
,
MapOptions
,
StrideType
>>
:
public
mapbase_evaluator
<
Map
<
PlainObjectType
,
MapOptions
,
StrideType
>
,
PlainObjectType
>
{
typedef
Map
<
PlainObjectType
,
MapOptions
,
StrideType
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
// TODO: should check for smaller packet types once we can handle multi-sized packet types
typedef
typename
packet_traits
<
Scalar
>::
type
PacketScalar
;
enum
{
InnerStrideAtCompileTime
=
StrideType
::
InnerStrideAtCompileTime
==
0
?
int
(
PlainObjectType
::
InnerStrideAtCompileTime
)
:
int
(
StrideType
::
InnerStrideAtCompileTime
),
OuterStrideAtCompileTime
=
StrideType
::
OuterStrideAtCompileTime
==
0
?
int
(
PlainObjectType
::
OuterStrideAtCompileTime
)
:
int
(
StrideType
::
OuterStrideAtCompileTime
),
HasNoInnerStride
=
InnerStrideAtCompileTime
==
1
,
HasNoOuterStride
=
StrideType
::
OuterStrideAtCompileTime
==
0
,
HasNoStride
=
HasNoInnerStride
&&
HasNoOuterStride
,
IsDynamicSize
=
PlainObjectType
::
SizeAtCompileTime
==
Dynamic
,
PacketAccessMask
=
bool
(
HasNoInnerStride
)
?
~
int
(
0
)
:
~
int
(
PacketAccessBit
),
LinearAccessMask
=
bool
(
HasNoStride
)
||
bool
(
PlainObjectType
::
IsVectorAtCompileTime
)
?
~
int
(
0
)
:
~
int
(
LinearAccessBit
),
Flags
=
int
(
evaluator
<
PlainObjectType
>::
Flags
)
&
(
LinearAccessMask
&
PacketAccessMask
),
Alignment
=
int
(
MapOptions
)
&
int
(
AlignedMask
)
};
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
&
map
)
:
mapbase_evaluator
<
XprType
,
PlainObjectType
>
(
map
)
{}
};
// -------------------- Ref --------------------
template
<
typename
PlainObjectType
,
int
RefOptions
,
typename
StrideType
>
struct
evaluator
<
Ref
<
PlainObjectType
,
RefOptions
,
StrideType
>>
:
public
mapbase_evaluator
<
Ref
<
PlainObjectType
,
RefOptions
,
StrideType
>
,
PlainObjectType
>
{
typedef
Ref
<
PlainObjectType
,
RefOptions
,
StrideType
>
XprType
;
enum
{
Flags
=
evaluator
<
Map
<
PlainObjectType
,
RefOptions
,
StrideType
>>::
Flags
,
Alignment
=
evaluator
<
Map
<
PlainObjectType
,
RefOptions
,
StrideType
>>::
Alignment
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
ref
)
:
mapbase_evaluator
<
XprType
,
PlainObjectType
>
(
ref
)
{}
};
// -------------------- Block --------------------
template
<
typename
ArgType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
,
bool
HasDirectAccess
=
has_direct_access
<
ArgType
>
::
ret
>
struct
block_evaluator
;
template
<
typename
ArgType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
struct
evaluator
<
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>>
:
block_evaluator
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
{
typedef
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
// TODO: should check for smaller packet types once we can handle multi-sized packet types
typedef
typename
packet_traits
<
Scalar
>::
type
PacketScalar
;
enum
{
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
,
RowsAtCompileTime
=
traits
<
XprType
>::
RowsAtCompileTime
,
ColsAtCompileTime
=
traits
<
XprType
>::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
traits
<
XprType
>::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
traits
<
XprType
>::
MaxColsAtCompileTime
,
ArgTypeIsRowMajor
=
(
int
(
evaluator
<
ArgType
>::
Flags
)
&
RowMajorBit
)
!=
0
,
IsRowMajor
=
(
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
)
?
1
:
(
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
)
?
0
:
ArgTypeIsRowMajor
,
HasSameStorageOrderAsArgType
=
(
IsRowMajor
==
ArgTypeIsRowMajor
),
InnerSize
=
IsRowMajor
?
int
(
ColsAtCompileTime
)
:
int
(
RowsAtCompileTime
),
InnerStrideAtCompileTime
=
HasSameStorageOrderAsArgType
?
int
(
inner_stride_at_compile_time
<
ArgType
>::
ret
)
:
int
(
outer_stride_at_compile_time
<
ArgType
>::
ret
),
OuterStrideAtCompileTime
=
HasSameStorageOrderAsArgType
?
int
(
outer_stride_at_compile_time
<
ArgType
>::
ret
)
:
int
(
inner_stride_at_compile_time
<
ArgType
>::
ret
),
MaskPacketAccessBit
=
(
InnerStrideAtCompileTime
==
1
||
HasSameStorageOrderAsArgType
)
?
PacketAccessBit
:
0
,
FlagsLinearAccessBit
=
(
RowsAtCompileTime
==
1
||
ColsAtCompileTime
==
1
||
(
InnerPanel
&&
(
evaluator
<
ArgType
>::
Flags
&
LinearAccessBit
)))
?
LinearAccessBit
:
0
,
FlagsRowMajorBit
=
XprType
::
Flags
&
RowMajorBit
,
Flags0
=
evaluator
<
ArgType
>::
Flags
&
((
HereditaryBits
&
~
RowMajorBit
)
|
DirectAccessBit
|
MaskPacketAccessBit
),
Flags
=
Flags0
|
FlagsLinearAccessBit
|
FlagsRowMajorBit
,
PacketAlignment
=
unpacket_traits
<
PacketScalar
>::
alignment
,
Alignment0
=
(
InnerPanel
&&
(
OuterStrideAtCompileTime
!=
Dynamic
)
&&
(
OuterStrideAtCompileTime
!=
0
)
&&
(((
OuterStrideAtCompileTime
*
int
(
sizeof
(
Scalar
)))
%
int
(
PacketAlignment
))
==
0
))
?
int
(
PacketAlignment
)
:
0
,
Alignment
=
plain_enum_min
(
evaluator
<
ArgType
>::
Alignment
,
Alignment0
)
};
typedef
block_evaluator
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
block_evaluator_type
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
block
)
:
block_evaluator_type
(
block
)
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
};
// no direct-access => dispatch to a unary evaluator
template
<
typename
ArgType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
struct
block_evaluator
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
,
/*HasDirectAccess*/
false
>
:
unary_evaluator
<
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>>
{
typedef
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
block_evaluator
(
const
XprType
&
block
)
:
unary_evaluator
<
XprType
>
(
block
)
{}
};
template
<
typename
ArgType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
struct
unary_evaluator
<
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
,
IndexBased
>
:
evaluator_base
<
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>>
{
typedef
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
block
)
:
m_argImpl
(
block
.
nestedExpression
()),
m_startRow
(
block
.
startRow
()),
m_startCol
(
block
.
startCol
()),
m_linear_offset
(
ForwardLinearAccess
?
(
ArgType
::
IsRowMajor
?
block
.
startRow
()
*
block
.
nestedExpression
().
cols
()
+
block
.
startCol
()
:
block
.
startCol
()
*
block
.
nestedExpression
().
rows
()
+
block
.
startRow
())
:
0
)
{}
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
enum
{
RowsAtCompileTime
=
XprType
::
RowsAtCompileTime
,
ForwardLinearAccess
=
(
InnerPanel
||
int
(
XprType
::
IsRowMajor
)
==
int
(
ArgType
::
IsRowMajor
))
&&
bool
(
evaluator
<
ArgType
>::
Flags
&
LinearAccessBit
)
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
coeff
(
m_startRow
.
value
()
+
row
,
m_startCol
.
value
()
+
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
linear_coeff_impl
(
index
,
bool_constant
<
ForwardLinearAccess
>
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
m_argImpl
.
coeffRef
(
m_startRow
.
value
()
+
row
,
m_startCol
.
value
()
+
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
linear_coeffRef_impl
(
index
,
bool_constant
<
ForwardLinearAccess
>
());
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
m_startRow
.
value
()
+
row
,
m_startCol
.
value
()
+
col
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
if
(
ForwardLinearAccess
)
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
m_linear_offset
.
value
()
+
index
);
else
return
packet
<
LoadMode
,
PacketType
>
(
RowsAtCompileTime
==
1
?
0
:
index
,
RowsAtCompileTime
==
1
?
index
:
0
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
row
,
Index
col
,
const
PacketType
&
x
)
{
return
m_argImpl
.
template
writePacket
<
StoreMode
,
PacketType
>(
m_startRow
.
value
()
+
row
,
m_startCol
.
value
()
+
col
,
x
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketType
&
x
)
{
if
(
ForwardLinearAccess
)
return
m_argImpl
.
template
writePacket
<
StoreMode
,
PacketType
>(
m_linear_offset
.
value
()
+
index
,
x
);
else
return
writePacket
<
StoreMode
,
PacketType
>
(
RowsAtCompileTime
==
1
?
0
:
index
,
RowsAtCompileTime
==
1
?
index
:
0
,
x
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
m_startRow
.
value
()
+
row
,
m_startCol
.
value
()
+
col
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
if
(
ForwardLinearAccess
)
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
m_linear_offset
.
value
()
+
index
,
begin
,
count
);
else
return
packetSegment
<
LoadMode
,
PacketType
>
(
RowsAtCompileTime
==
1
?
0
:
index
,
RowsAtCompileTime
==
1
?
index
:
0
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
row
,
Index
col
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
return
m_argImpl
.
template
writePacketSegment
<
StoreMode
,
PacketType
>(
m_startRow
.
value
()
+
row
,
m_startCol
.
value
()
+
col
,
x
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
index
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
if
(
ForwardLinearAccess
)
return
m_argImpl
.
template
writePacketSegment
<
StoreMode
,
PacketType
>(
m_linear_offset
.
value
()
+
index
,
x
,
begin
,
count
);
else
return
writePacketSegment
<
StoreMode
,
PacketType
>
(
RowsAtCompileTime
==
1
?
0
:
index
,
RowsAtCompileTime
==
1
?
index
:
0
,
x
,
begin
,
count
);
}
protected:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
linear_coeff_impl
(
Index
index
,
internal
::
true_type
/* ForwardLinearAccess */
)
const
{
return
m_argImpl
.
coeff
(
m_linear_offset
.
value
()
+
index
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
linear_coeff_impl
(
Index
index
,
internal
::
false_type
/* not ForwardLinearAccess */
)
const
{
return
coeff
(
RowsAtCompileTime
==
1
?
0
:
index
,
RowsAtCompileTime
==
1
?
index
:
0
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
linear_coeffRef_impl
(
Index
index
,
internal
::
true_type
/* ForwardLinearAccess */
)
{
return
m_argImpl
.
coeffRef
(
m_linear_offset
.
value
()
+
index
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
linear_coeffRef_impl
(
Index
index
,
internal
::
false_type
/* not ForwardLinearAccess */
)
{
return
coeffRef
(
RowsAtCompileTime
==
1
?
0
:
index
,
RowsAtCompileTime
==
1
?
index
:
0
);
}
evaluator
<
ArgType
>
m_argImpl
;
const
variable_if_dynamic
<
Index
,
(
ArgType
::
RowsAtCompileTime
==
1
&&
BlockRows
==
1
)
?
0
:
Dynamic
>
m_startRow
;
const
variable_if_dynamic
<
Index
,
(
ArgType
::
ColsAtCompileTime
==
1
&&
BlockCols
==
1
)
?
0
:
Dynamic
>
m_startCol
;
const
variable_if_dynamic
<
Index
,
ForwardLinearAccess
?
Dynamic
:
0
>
m_linear_offset
;
};
// TODO: This evaluator does not actually use the child evaluator;
// all action is via the data() as returned by the Block expression.
template
<
typename
ArgType
,
int
BlockRows
,
int
BlockCols
,
bool
InnerPanel
>
struct
block_evaluator
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
,
/* HasDirectAccess */
true
>
:
mapbase_evaluator
<
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
,
typename
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>::
PlainObject
>
{
typedef
Block
<
ArgType
,
BlockRows
,
BlockCols
,
InnerPanel
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
block_evaluator
(
const
XprType
&
block
)
:
mapbase_evaluator
<
XprType
,
typename
XprType
::
PlainObject
>
(
block
)
{
eigen_internal_assert
((
internal
::
is_constant_evaluated
()
||
(
std
::
uintptr_t
(
block
.
data
())
%
plain_enum_max
(
1
,
evaluator
<
XprType
>::
Alignment
))
==
0
)
&&
"data is not aligned"
);
}
};
// -------------------- Select --------------------
// NOTE shall we introduce a ternary_evaluator?
// TODO enable vectorization for Select
template
<
typename
ConditionMatrixType
,
typename
ThenMatrixType
,
typename
ElseMatrixType
>
struct
evaluator
<
Select
<
ConditionMatrixType
,
ThenMatrixType
,
ElseMatrixType
>>
:
evaluator_base
<
Select
<
ConditionMatrixType
,
ThenMatrixType
,
ElseMatrixType
>>
{
typedef
Select
<
ConditionMatrixType
,
ThenMatrixType
,
ElseMatrixType
>
XprType
;
enum
{
CoeffReadCost
=
evaluator
<
ConditionMatrixType
>::
CoeffReadCost
+
plain_enum_max
(
evaluator
<
ThenMatrixType
>::
CoeffReadCost
,
evaluator
<
ElseMatrixType
>::
CoeffReadCost
),
Flags
=
(
unsigned
int
)
evaluator
<
ThenMatrixType
>::
Flags
&
evaluator
<
ElseMatrixType
>::
Flags
&
HereditaryBits
,
Alignment
=
plain_enum_min
(
evaluator
<
ThenMatrixType
>::
Alignment
,
evaluator
<
ElseMatrixType
>::
Alignment
)
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
select
)
:
m_conditionImpl
(
select
.
conditionMatrix
()),
m_thenImpl
(
select
.
thenMatrix
()),
m_elseImpl
(
select
.
elseMatrix
())
{
EIGEN_INTERNAL_CHECK_COST_VALUE
(
CoeffReadCost
);
}
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
if
(
m_conditionImpl
.
coeff
(
row
,
col
))
return
m_thenImpl
.
coeff
(
row
,
col
);
else
return
m_elseImpl
.
coeff
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
if
(
m_conditionImpl
.
coeff
(
index
))
return
m_thenImpl
.
coeff
(
index
);
else
return
m_elseImpl
.
coeff
(
index
);
}
protected:
evaluator
<
ConditionMatrixType
>
m_conditionImpl
;
evaluator
<
ThenMatrixType
>
m_thenImpl
;
evaluator
<
ElseMatrixType
>
m_elseImpl
;
};
// -------------------- Replicate --------------------
template
<
typename
ArgType
,
int
RowFactor
,
int
ColFactor
>
struct
unary_evaluator
<
Replicate
<
ArgType
,
RowFactor
,
ColFactor
>>
:
evaluator_base
<
Replicate
<
ArgType
,
RowFactor
,
ColFactor
>>
{
typedef
Replicate
<
ArgType
,
RowFactor
,
ColFactor
>
XprType
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
enum
{
Factor
=
(
RowFactor
==
Dynamic
||
ColFactor
==
Dynamic
)
?
Dynamic
:
RowFactor
*
ColFactor
};
typedef
typename
nested_eval
<
ArgType
,
Factor
>::
type
ArgTypeNested
;
typedef
remove_all_t
<
ArgTypeNested
>
ArgTypeNestedCleaned
;
enum
{
CoeffReadCost
=
evaluator
<
ArgTypeNestedCleaned
>::
CoeffReadCost
,
LinearAccessMask
=
XprType
::
IsVectorAtCompileTime
?
LinearAccessBit
:
0
,
Flags
=
(
evaluator
<
ArgTypeNestedCleaned
>::
Flags
&
(
HereditaryBits
|
LinearAccessMask
)
&
~
RowMajorBit
)
|
(
traits
<
XprType
>::
Flags
&
RowMajorBit
),
Alignment
=
evaluator
<
ArgTypeNestedCleaned
>::
Alignment
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
replicate
)
:
m_arg
(
replicate
.
nestedExpression
()),
m_argImpl
(
m_arg
),
m_rows
(
replicate
.
nestedExpression
().
rows
()),
m_cols
(
replicate
.
nestedExpression
().
cols
())
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
// try to avoid using modulo; this is a pure optimization strategy
const
Index
actual_row
=
traits
<
XprType
>::
RowsAtCompileTime
==
1
?
0
:
RowFactor
==
1
?
row
:
row
%
m_rows
.
value
();
const
Index
actual_col
=
traits
<
XprType
>::
ColsAtCompileTime
==
1
?
0
:
ColFactor
==
1
?
col
:
col
%
m_cols
.
value
();
return
m_argImpl
.
coeff
(
actual_row
,
actual_col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
// try to avoid using modulo; this is a pure optimization strategy
const
Index
actual_index
=
traits
<
XprType
>::
RowsAtCompileTime
==
1
?
(
ColFactor
==
1
?
index
:
index
%
m_cols
.
value
())
:
(
RowFactor
==
1
?
index
:
index
%
m_rows
.
value
());
return
m_argImpl
.
coeff
(
actual_index
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
const
Index
actual_row
=
traits
<
XprType
>::
RowsAtCompileTime
==
1
?
0
:
RowFactor
==
1
?
row
:
row
%
m_rows
.
value
();
const
Index
actual_col
=
traits
<
XprType
>::
ColsAtCompileTime
==
1
?
0
:
ColFactor
==
1
?
col
:
col
%
m_cols
.
value
();
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
actual_row
,
actual_col
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
const
Index
actual_index
=
traits
<
XprType
>::
RowsAtCompileTime
==
1
?
(
ColFactor
==
1
?
index
:
index
%
m_cols
.
value
())
:
(
RowFactor
==
1
?
index
:
index
%
m_rows
.
value
());
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
actual_index
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
const
Index
actual_row
=
traits
<
XprType
>::
RowsAtCompileTime
==
1
?
0
:
RowFactor
==
1
?
row
:
row
%
m_rows
.
value
();
const
Index
actual_col
=
traits
<
XprType
>::
ColsAtCompileTime
==
1
?
0
:
ColFactor
==
1
?
col
:
col
%
m_cols
.
value
();
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
actual_row
,
actual_col
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
const
Index
actual_index
=
traits
<
XprType
>::
RowsAtCompileTime
==
1
?
(
ColFactor
==
1
?
index
:
index
%
m_cols
.
value
())
:
(
RowFactor
==
1
?
index
:
index
%
m_rows
.
value
());
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
actual_index
,
begin
,
count
);
}
protected:
const
ArgTypeNested
m_arg
;
evaluator
<
ArgTypeNestedCleaned
>
m_argImpl
;
const
variable_if_dynamic
<
Index
,
ArgType
::
RowsAtCompileTime
>
m_rows
;
const
variable_if_dynamic
<
Index
,
ArgType
::
ColsAtCompileTime
>
m_cols
;
};
// -------------------- MatrixWrapper and ArrayWrapper --------------------
//
// evaluator_wrapper_base<T> is a common base class for the
// MatrixWrapper and ArrayWrapper evaluators.
template
<
typename
XprType
>
struct
evaluator_wrapper_base
:
evaluator_base
<
XprType
>
{
typedef
remove_all_t
<
typename
XprType
::
NestedExpressionType
>
ArgType
;
enum
{
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
,
Flags
=
evaluator
<
ArgType
>::
Flags
,
Alignment
=
evaluator
<
ArgType
>::
Alignment
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator_wrapper_base
(
const
ArgType
&
arg
)
:
m_argImpl
(
arg
)
{}
typedef
typename
ArgType
::
Scalar
Scalar
;
typedef
typename
ArgType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
coeff
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_argImpl
.
coeff
(
index
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
m_argImpl
.
coeffRef
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
m_argImpl
.
coeffRef
(
index
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
row
,
col
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
return
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
index
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
row
,
Index
col
,
const
PacketType
&
x
)
{
m_argImpl
.
template
writePacket
<
StoreMode
>(
row
,
col
,
x
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketType
&
x
)
{
m_argImpl
.
template
writePacket
<
StoreMode
>(
index
,
x
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
row
,
col
,
begin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
return
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
index
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
row
,
Index
col
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
m_argImpl
.
template
writePacketSegment
<
StoreMode
>(
row
,
col
,
x
,
begin
,
count
);
}
template
<
int
StoreMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
index
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
m_argImpl
.
template
writePacketSegment
<
StoreMode
>(
index
,
x
,
begin
,
count
);
}
protected:
evaluator
<
ArgType
>
m_argImpl
;
};
template
<
typename
TArgType
>
struct
unary_evaluator
<
MatrixWrapper
<
TArgType
>>
:
evaluator_wrapper_base
<
MatrixWrapper
<
TArgType
>>
{
typedef
MatrixWrapper
<
TArgType
>
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
wrapper
)
:
evaluator_wrapper_base
<
MatrixWrapper
<
TArgType
>>
(
wrapper
.
nestedExpression
())
{}
};
template
<
typename
TArgType
>
struct
unary_evaluator
<
ArrayWrapper
<
TArgType
>>
:
evaluator_wrapper_base
<
ArrayWrapper
<
TArgType
>>
{
typedef
ArrayWrapper
<
TArgType
>
XprType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
wrapper
)
:
evaluator_wrapper_base
<
ArrayWrapper
<
TArgType
>>
(
wrapper
.
nestedExpression
())
{}
};
// -------------------- Reverse --------------------
// defined in Reverse.h:
template
<
typename
PacketType
,
bool
ReversePacket
>
struct
reverse_packet_cond
;
template
<
typename
ArgType
,
int
Direction
>
struct
unary_evaluator
<
Reverse
<
ArgType
,
Direction
>>
:
evaluator_base
<
Reverse
<
ArgType
,
Direction
>>
{
typedef
Reverse
<
ArgType
,
Direction
>
XprType
;
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
enum
{
IsRowMajor
=
XprType
::
IsRowMajor
,
IsColMajor
=
!
IsRowMajor
,
ReverseRow
=
(
Direction
==
Vertical
)
||
(
Direction
==
BothDirections
),
ReverseCol
=
(
Direction
==
Horizontal
)
||
(
Direction
==
BothDirections
),
ReversePacket
=
(
Direction
==
BothDirections
)
||
((
Direction
==
Vertical
)
&&
IsColMajor
)
||
((
Direction
==
Horizontal
)
&&
IsRowMajor
),
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
,
// let's enable LinearAccess only with vectorization because of the product overhead
// FIXME enable DirectAccess with negative strides?
Flags0
=
evaluator
<
ArgType
>::
Flags
,
LinearAccess
=
((
Direction
==
BothDirections
)
&&
(
int
(
Flags0
)
&
PacketAccessBit
))
||
((
ReverseRow
&&
XprType
::
ColsAtCompileTime
==
1
)
||
(
ReverseCol
&&
XprType
::
RowsAtCompileTime
==
1
))
?
LinearAccessBit
:
0
,
Flags
=
int
(
Flags0
)
&
(
HereditaryBits
|
PacketAccessBit
|
LinearAccess
),
Alignment
=
0
// FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
unary_evaluator
(
const
XprType
&
reverse
)
:
m_argImpl
(
reverse
.
nestedExpression
()),
m_rows
(
ReverseRow
?
reverse
.
nestedExpression
().
rows
()
:
1
),
m_cols
(
ReverseCol
?
reverse
.
nestedExpression
().
cols
()
:
1
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
return
m_argImpl
.
coeff
(
ReverseRow
?
m_rows
.
value
()
-
row
-
1
:
row
,
ReverseCol
?
m_cols
.
value
()
-
col
-
1
:
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_argImpl
.
coeff
(
m_rows
.
value
()
*
m_cols
.
value
()
-
index
-
1
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
m_argImpl
.
coeffRef
(
ReverseRow
?
m_rows
.
value
()
-
row
-
1
:
row
,
ReverseCol
?
m_cols
.
value
()
-
col
-
1
:
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
m_argImpl
.
coeffRef
(
m_rows
.
value
()
*
m_cols
.
value
()
-
index
-
1
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
row
,
Index
col
)
const
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
OffsetRow
=
ReverseRow
&&
IsColMajor
?
PacketSize
:
1
;
static
constexpr
int
OffsetCol
=
ReverseCol
&&
IsRowMajor
?
PacketSize
:
1
;
using
reverse_packet
=
reverse_packet_cond
<
PacketType
,
ReversePacket
>
;
Index
actualRow
=
ReverseRow
?
m_rows
.
value
()
-
row
-
OffsetRow
:
row
;
Index
actualCol
=
ReverseCol
?
m_cols
.
value
()
-
col
-
OffsetCol
:
col
;
return
reverse_packet
::
run
(
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
actualRow
,
actualCol
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packet
(
Index
index
)
const
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
actualIndex
=
m_rows
.
value
()
*
m_cols
.
value
()
-
index
-
PacketSize
;
return
preverse
(
m_argImpl
.
template
packet
<
LoadMode
,
PacketType
>(
actualIndex
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
row
,
Index
col
,
const
PacketType
&
x
)
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
OffsetRow
=
ReverseRow
&&
IsColMajor
?
PacketSize
:
1
;
static
constexpr
int
OffsetCol
=
ReverseCol
&&
IsRowMajor
?
PacketSize
:
1
;
using
reverse_packet
=
reverse_packet_cond
<
PacketType
,
ReversePacket
>
;
Index
actualRow
=
ReverseRow
?
m_rows
.
value
()
-
row
-
OffsetRow
:
row
;
Index
actualCol
=
ReverseCol
?
m_cols
.
value
()
-
col
-
OffsetCol
:
col
;
m_argImpl
.
template
writePacket
<
LoadMode
>(
actualRow
,
actualCol
,
reverse_packet
::
run
(
x
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacket
(
Index
index
,
const
PacketType
&
x
)
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
actualIndex
=
m_rows
.
value
()
*
m_cols
.
value
()
-
index
-
PacketSize
;
m_argImpl
.
template
writePacket
<
LoadMode
>(
actualIndex
,
preverse
(
x
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
row
,
Index
col
,
Index
begin
,
Index
count
)
const
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
OffsetRow
=
ReverseRow
&&
IsColMajor
?
PacketSize
:
1
;
static
constexpr
int
OffsetCol
=
ReverseCol
&&
IsRowMajor
?
PacketSize
:
1
;
using
reverse_packet
=
reverse_packet_cond
<
PacketType
,
ReversePacket
>
;
Index
actualRow
=
ReverseRow
?
m_rows
.
value
()
-
row
-
OffsetRow
:
row
;
Index
actualCol
=
ReverseCol
?
m_cols
.
value
()
-
col
-
OffsetCol
:
col
;
Index
actualBegin
=
ReversePacket
?
(
PacketSize
-
count
-
begin
)
:
begin
;
return
reverse_packet
::
run
(
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
actualRow
,
actualCol
,
actualBegin
,
count
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
PacketType
packetSegment
(
Index
index
,
Index
begin
,
Index
count
)
const
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
actualIndex
=
m_rows
.
value
()
*
m_cols
.
value
()
-
index
-
PacketSize
;
Index
actualBegin
=
PacketSize
-
count
-
begin
;
return
preverse
(
m_argImpl
.
template
packetSegment
<
LoadMode
,
PacketType
>(
actualIndex
,
actualBegin
,
count
));
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
row
,
Index
col
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
static
constexpr
int
OffsetRow
=
ReverseRow
&&
IsColMajor
?
PacketSize
:
1
;
static
constexpr
int
OffsetCol
=
ReverseCol
&&
IsRowMajor
?
PacketSize
:
1
;
using
reverse_packet
=
reverse_packet_cond
<
PacketType
,
ReversePacket
>
;
Index
actualRow
=
ReverseRow
?
m_rows
.
value
()
-
row
-
OffsetRow
:
row
;
Index
actualCol
=
ReverseCol
?
m_cols
.
value
()
-
col
-
OffsetCol
:
col
;
Index
actualBegin
=
ReversePacket
?
(
PacketSize
-
count
-
begin
)
:
begin
;
m_argImpl
.
template
writePacketSegment
<
LoadMode
>(
actualRow
,
actualCol
,
reverse_packet
::
run
(
x
),
actualBegin
,
count
);
}
template
<
int
LoadMode
,
typename
PacketType
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
writePacketSegment
(
Index
index
,
const
PacketType
&
x
,
Index
begin
,
Index
count
)
{
static
constexpr
int
PacketSize
=
unpacket_traits
<
PacketType
>::
size
;
Index
actualIndex
=
m_rows
.
value
()
*
m_cols
.
value
()
-
index
-
PacketSize
;
Index
actualBegin
=
PacketSize
-
count
-
begin
;
m_argImpl
.
template
writePacketSegment
<
LoadMode
>(
actualIndex
,
preverse
(
x
),
actualBegin
,
count
);
}
protected:
evaluator
<
ArgType
>
m_argImpl
;
// If we do not reverse rows, then we do not need to know the number of rows; same for columns
// Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
const
variable_if_dynamic
<
Index
,
ReverseRow
?
ArgType
::
RowsAtCompileTime
:
1
>
m_rows
;
const
variable_if_dynamic
<
Index
,
ReverseCol
?
ArgType
::
ColsAtCompileTime
:
1
>
m_cols
;
};
// -------------------- Diagonal --------------------
template
<
typename
ArgType
,
int
DiagIndex
>
struct
evaluator
<
Diagonal
<
ArgType
,
DiagIndex
>>
:
evaluator_base
<
Diagonal
<
ArgType
,
DiagIndex
>>
{
typedef
Diagonal
<
ArgType
,
DiagIndex
>
XprType
;
enum
{
CoeffReadCost
=
evaluator
<
ArgType
>::
CoeffReadCost
,
Flags
=
(
unsigned
int
)(
evaluator
<
ArgType
>::
Flags
&
(
HereditaryBits
|
DirectAccessBit
)
&
~
RowMajorBit
)
|
LinearAccessBit
,
Alignment
=
0
};
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
evaluator
(
const
XprType
&
diagonal
)
:
m_argImpl
(
diagonal
.
nestedExpression
()),
m_index
(
diagonal
.
index
())
{}
typedef
typename
XprType
::
Scalar
Scalar
;
typedef
typename
XprType
::
CoeffReturnType
CoeffReturnType
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
row
,
Index
)
const
{
return
m_argImpl
.
coeff
(
row
+
rowOffset
(),
row
+
colOffset
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CoeffReturnType
coeff
(
Index
index
)
const
{
return
m_argImpl
.
coeff
(
index
+
rowOffset
(),
index
+
colOffset
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
)
{
return
m_argImpl
.
coeffRef
(
row
+
rowOffset
(),
row
+
colOffset
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
m_argImpl
.
coeffRef
(
index
+
rowOffset
(),
index
+
colOffset
());
}
protected:
evaluator
<
ArgType
>
m_argImpl
;
const
variable_if_dynamicindex
<
Index
,
XprType
::
DiagIndex
>
m_index
;
private:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rowOffset
()
const
{
return
m_index
.
value
()
>
0
?
0
:
-
m_index
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
colOffset
()
const
{
return
m_index
.
value
()
>
0
?
m_index
.
value
()
:
0
;
}
};
//----------------------------------------------------------------------
// deprecated code
//----------------------------------------------------------------------
// -------------------- EvalToTemp --------------------
// expression class for evaluating nested expression to a temporary
template
<
typename
ArgType
>
class
EvalToTemp
;
template
<
typename
ArgType
>
struct
traits
<
EvalToTemp
<
ArgType
>>
:
public
traits
<
ArgType
>
{};
template
<
typename
ArgType
>
class
EvalToTemp
:
public
dense_xpr_base
<
EvalToTemp
<
ArgType
>>::
type
{
public:
typedef
typename
dense_xpr_base
<
EvalToTemp
>::
type
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
EvalToTemp
)
explicit
EvalToTemp
(
const
ArgType
&
arg
)
:
m_arg
(
arg
)
{}
const
ArgType
&
arg
()
const
{
return
m_arg
;
}
constexpr
Index
rows
()
const
noexcept
{
return
m_arg
.
rows
();
}
constexpr
Index
cols
()
const
noexcept
{
return
m_arg
.
cols
();
}
private:
const
ArgType
&
m_arg
;
};
template
<
typename
ArgType
>
struct
evaluator
<
EvalToTemp
<
ArgType
>>
:
public
evaluator
<
typename
ArgType
::
PlainObject
>
{
typedef
EvalToTemp
<
ArgType
>
XprType
;
typedef
typename
ArgType
::
PlainObject
PlainObject
;
typedef
evaluator
<
PlainObject
>
Base
;
EIGEN_DEVICE_FUNC
explicit
evaluator
(
const
XprType
&
xpr
)
:
m_result
(
xpr
.
arg
())
{
internal
::
construct_at
<
Base
>
(
this
,
m_result
);
}
// This constructor is used when nesting an EvalTo evaluator in another evaluator
EIGEN_DEVICE_FUNC
evaluator
(
const
ArgType
&
arg
)
:
m_result
(
arg
)
{
internal
::
construct_at
<
Base
>
(
this
,
m_result
);
}
protected:
PlainObject
m_result
;
};
}
// namespace internal
}
// end namespace Eigen
#endif // EIGEN_COREEVALUATORS_H
eigen-master/Eigen/src/Core/CoreIterators.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COREITERATORS_H
#define EIGEN_COREITERATORS_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
*/
namespace
internal
{
template
<
typename
XprType
,
typename
EvaluatorKind
>
class
inner_iterator_selector
;
}
/** \class InnerIterator
* \brief An InnerIterator allows to loop over the element of any matrix expression.
*
* \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is
* constructed.
*
* TODO: add a usage example
*/
template
<
typename
XprType
>
class
InnerIterator
{
protected:
typedef
internal
::
inner_iterator_selector
<
XprType
,
typename
internal
::
evaluator_traits
<
XprType
>::
Kind
>
IteratorType
;
typedef
internal
::
evaluator
<
XprType
>
EvaluatorType
;
typedef
typename
internal
::
traits
<
XprType
>::
Scalar
Scalar
;
public:
/** Construct an iterator over the \a outerId -th row or column of \a xpr */
InnerIterator
(
const
XprType
&
xpr
,
const
Index
&
outerId
)
:
m_eval
(
xpr
),
m_iter
(
m_eval
,
outerId
,
xpr
.
innerSize
())
{}
/// \returns the value of the current coefficient.
EIGEN_STRONG_INLINE
Scalar
value
()
const
{
return
m_iter
.
value
();
}
/** Increment the iterator \c *this to the next non-zero coefficient.
* Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView
*/
EIGEN_STRONG_INLINE
InnerIterator
&
operator
++
()
{
m_iter
.
operator
++
();
return
*
this
;
}
EIGEN_STRONG_INLINE
InnerIterator
&
operator
+=
(
Index
i
)
{
m_iter
.
operator
+=
(
i
);
return
*
this
;
}
EIGEN_STRONG_INLINE
InnerIterator
operator
+
(
Index
i
)
{
InnerIterator
result
(
*
this
);
result
+=
i
;
return
result
;
}
/// \returns the column or row index of the current coefficient.
EIGEN_STRONG_INLINE
Index
index
()
const
{
return
m_iter
.
index
();
}
/// \returns the row index of the current coefficient.
EIGEN_STRONG_INLINE
Index
row
()
const
{
return
m_iter
.
row
();
}
/// \returns the column index of the current coefficient.
EIGEN_STRONG_INLINE
Index
col
()
const
{
return
m_iter
.
col
();
}
/// \returns \c true if the iterator \c *this still references a valid coefficient.
EIGEN_STRONG_INLINE
operator
bool
()
const
{
return
m_iter
;
}
protected:
EvaluatorType
m_eval
;
IteratorType
m_iter
;
private:
// If you get here, then you're not using the right InnerIterator type, e.g.:
// SparseMatrix<double,RowMajor> A;
// SparseMatrix<double>::InnerIterator it(A,0);
template
<
typename
T
>
InnerIterator
(
const
EigenBase
<
T
>
&
,
Index
outer
);
};
namespace
internal
{
// Generic inner iterator implementation for dense objects
template
<
typename
XprType
>
class
inner_iterator_selector
<
XprType
,
IndexBased
>
{
protected:
typedef
evaluator
<
XprType
>
EvaluatorType
;
typedef
typename
traits
<
XprType
>::
Scalar
Scalar
;
enum
{
IsRowMajor
=
(
XprType
::
Flags
&
RowMajorBit
)
==
RowMajorBit
};
public:
EIGEN_STRONG_INLINE
inner_iterator_selector
(
const
EvaluatorType
&
eval
,
const
Index
&
outerId
,
const
Index
&
innerSize
)
:
m_eval
(
eval
),
m_inner
(
0
),
m_outer
(
outerId
),
m_end
(
innerSize
)
{}
EIGEN_STRONG_INLINE
Scalar
value
()
const
{
return
(
IsRowMajor
)
?
m_eval
.
coeff
(
m_outer
,
m_inner
)
:
m_eval
.
coeff
(
m_inner
,
m_outer
);
}
EIGEN_STRONG_INLINE
inner_iterator_selector
&
operator
++
()
{
m_inner
++
;
return
*
this
;
}
EIGEN_STRONG_INLINE
Index
index
()
const
{
return
m_inner
;
}
inline
Index
row
()
const
{
return
IsRowMajor
?
m_outer
:
index
();
}
inline
Index
col
()
const
{
return
IsRowMajor
?
index
()
:
m_outer
;
}
EIGEN_STRONG_INLINE
operator
bool
()
const
{
return
m_inner
<
m_end
&&
m_inner
>=
0
;
}
protected:
const
EvaluatorType
&
m_eval
;
Index
m_inner
;
const
Index
m_outer
;
const
Index
m_end
;
};
// For iterator-based evaluator, inner-iterator is already implemented as
// evaluator<>::InnerIterator
template
<
typename
XprType
>
class
inner_iterator_selector
<
XprType
,
IteratorBased
>
:
public
evaluator
<
XprType
>::
InnerIterator
{
protected:
typedef
typename
evaluator
<
XprType
>::
InnerIterator
Base
;
typedef
evaluator
<
XprType
>
EvaluatorType
;
public:
EIGEN_STRONG_INLINE
inner_iterator_selector
(
const
EvaluatorType
&
eval
,
const
Index
&
outerId
,
const
Index
&
/*innerSize*/
)
:
Base
(
eval
,
outerId
)
{}
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_COREITERATORS_H
eigen-master/Eigen/src/Core/CwiseBinaryOp.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_BINARY_OP_H
#define EIGEN_CWISE_BINARY_OP_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
BinaryOp
,
typename
Lhs
,
typename
Rhs
>
struct
traits
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>
{
// we must not inherit from traits<Lhs> since it has
// the potential to cause problems with MSVC
typedef
remove_all_t
<
Lhs
>
Ancestor
;
typedef
typename
traits
<
Ancestor
>::
XprKind
XprKind
;
enum
{
RowsAtCompileTime
=
traits
<
Ancestor
>::
RowsAtCompileTime
,
ColsAtCompileTime
=
traits
<
Ancestor
>::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
traits
<
Ancestor
>::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
traits
<
Ancestor
>::
MaxColsAtCompileTime
};
// even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
// we still want to handle the case when the result type is different.
typedef
typename
result_of
<
BinaryOp
(
const
typename
Lhs
::
Scalar
&
,
const
typename
Rhs
::
Scalar
&
)
>::
type
Scalar
;
typedef
typename
cwise_promote_storage_type
<
typename
traits
<
Lhs
>::
StorageKind
,
typename
traits
<
Rhs
>::
StorageKind
,
BinaryOp
>::
ret
StorageKind
;
typedef
typename
promote_index_type
<
typename
traits
<
Lhs
>::
StorageIndex
,
typename
traits
<
Rhs
>::
StorageIndex
>::
type
StorageIndex
;
typedef
typename
Lhs
::
Nested
LhsNested
;
typedef
typename
Rhs
::
Nested
RhsNested
;
typedef
std
::
remove_reference_t
<
LhsNested
>
LhsNested_
;
typedef
std
::
remove_reference_t
<
RhsNested
>
RhsNested_
;
enum
{
Flags
=
cwise_promote_storage_order
<
typename
traits
<
Lhs
>::
StorageKind
,
typename
traits
<
Rhs
>::
StorageKind
,
LhsNested_
::
Flags
&
RowMajorBit
,
RhsNested_
::
Flags
&
RowMajorBit
>::
value
};
};
}
// end namespace internal
template
<
typename
BinaryOp
,
typename
Lhs
,
typename
Rhs
,
typename
StorageKind
>
class
CwiseBinaryOpImpl
;
/** \class CwiseBinaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
*
* \tparam BinaryOp template functor implementing the operator
* \tparam LhsType the type of the left-hand side
* \tparam RhsType the type of the right-hand side
*
* This class represents an expression where a coefficient-wise binary operator is applied to two expressions.
* It is the return type of binary operators, by which we mean only those binary operators where
* both the left-hand side and the right-hand side are Eigen expressions.
* For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
*
* Most of the time, this is the only way that it is used, so you typically don't have to name
* CwiseBinaryOp types explicitly.
*
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class
* CwiseNullaryOp
*/
template
<
typename
BinaryOp
,
typename
LhsType
,
typename
RhsType
>
class
CwiseBinaryOp
:
public
CwiseBinaryOpImpl
<
BinaryOp
,
LhsType
,
RhsType
,
typename
internal
::
cwise_promote_storage_type
<
typename
internal
::
traits
<
LhsType
>::
StorageKind
,
typename
internal
::
traits
<
RhsType
>::
StorageKind
,
BinaryOp
>::
ret
>
,
internal
::
no_assignment_operator
{
public:
typedef
internal
::
remove_all_t
<
BinaryOp
>
Functor
;
typedef
internal
::
remove_all_t
<
LhsType
>
Lhs
;
typedef
internal
::
remove_all_t
<
RhsType
>
Rhs
;
typedef
typename
CwiseBinaryOpImpl
<
BinaryOp
,
LhsType
,
RhsType
,
typename
internal
::
cwise_promote_storage_type
<
typename
internal
::
traits
<
LhsType
>::
StorageKind
,
typename
internal
::
traits
<
Rhs
>::
StorageKind
,
BinaryOp
>::
ret
>::
Base
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
CwiseBinaryOp
)
EIGEN_CHECK_BINARY_COMPATIBILIY
(
BinaryOp
,
typename
Lhs
::
Scalar
,
typename
Rhs
::
Scalar
)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
Lhs
,
Rhs
)
typedef
typename
internal
::
ref_selector
<
LhsType
>::
type
LhsNested
;
typedef
typename
internal
::
ref_selector
<
RhsType
>::
type
RhsNested
;
typedef
std
::
remove_reference_t
<
LhsNested
>
LhsNested_
;
typedef
std
::
remove_reference_t
<
RhsNested
>
RhsNested_
;
#if EIGEN_COMP_MSVC
// Required for Visual Studio or the Copy constructor will probably not get inlined!
EIGEN_STRONG_INLINE
CwiseBinaryOp
(
const
CwiseBinaryOp
<
BinaryOp
,
LhsType
,
RhsType
>&
)
=
default
;
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CwiseBinaryOp
(
const
Lhs
&
aLhs
,
const
Rhs
&
aRhs
,
const
BinaryOp
&
func
=
BinaryOp
())
:
m_lhs
(
aLhs
),
m_rhs
(
aRhs
),
m_functor
(
func
)
{
eigen_assert
(
aLhs
.
rows
()
==
aRhs
.
rows
()
&&
aLhs
.
cols
()
==
aRhs
.
cols
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
noexcept
{
// return the fixed size type if available to enable compile time optimizations
return
internal
::
traits
<
internal
::
remove_all_t
<
LhsNested
>>::
RowsAtCompileTime
==
Dynamic
?
m_rhs
.
rows
()
:
m_lhs
.
rows
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
noexcept
{
// return the fixed size type if available to enable compile time optimizations
return
internal
::
traits
<
internal
::
remove_all_t
<
LhsNested
>>::
ColsAtCompileTime
==
Dynamic
?
m_rhs
.
cols
()
:
m_lhs
.
cols
();
}
/** \returns the left hand side nested expression */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
LhsNested_
&
lhs
()
const
{
return
m_lhs
;
}
/** \returns the right hand side nested expression */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
RhsNested_
&
rhs
()
const
{
return
m_rhs
;
}
/** \returns the functor representing the binary operation */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
BinaryOp
&
functor
()
const
{
return
m_functor
;
}
protected:
LhsNested
m_lhs
;
RhsNested
m_rhs
;
const
BinaryOp
m_functor
;
};
// Generic API dispatcher
template
<
typename
BinaryOp
,
typename
Lhs
,
typename
Rhs
,
typename
StorageKind
>
class
CwiseBinaryOpImpl
:
public
internal
::
generic_xpr_base
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>::
type
{
public:
typedef
typename
internal
::
generic_xpr_base
<
CwiseBinaryOp
<
BinaryOp
,
Lhs
,
Rhs
>>::
type
Base
;
};
/** replaces \c *this by \c *this - \a other.
*
* \returns a reference to \c *this
*/
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
operator
-=
(
const
MatrixBase
<
OtherDerived
>&
other
)
{
call_assignment
(
derived
(),
other
.
derived
(),
internal
::
sub_assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
());
return
derived
();
}
/** replaces \c *this by \c *this + \a other.
*
* \returns a reference to \c *this
*/
template
<
typename
Derived
>
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
operator
+=
(
const
MatrixBase
<
OtherDerived
>&
other
)
{
call_assignment
(
derived
(),
other
.
derived
(),
internal
::
add_assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
());
return
derived
();
}
}
// end namespace Eigen
#endif // EIGEN_CWISE_BINARY_OP_H
eigen-master/Eigen/src/Core/CwiseNullaryOp.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_NULLARY_OP_H
#define EIGEN_CWISE_NULLARY_OP_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
NullaryOp
,
typename
PlainObjectType
>
struct
traits
<
CwiseNullaryOp
<
NullaryOp
,
PlainObjectType
>
>
:
traits
<
PlainObjectType
>
{
enum
{
Flags
=
traits
<
PlainObjectType
>::
Flags
&
RowMajorBit
};
};
}
// namespace internal
/** \class CwiseNullaryOp
* \ingroup Core_Module
*
* \brief Generic expression of a matrix where all coefficients are defined by a functor
*
* \tparam NullaryOp template functor implementing the operator
* \tparam PlainObjectType the underlying plain matrix/array type
*
* This class represents an expression of a generic nullary operator.
* It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
* and most of the time this is the only way it is used.
*
* However, if you want to write a function returning such an expression, you
* will need to use this class.
*
* The functor NullaryOp must expose one of the following method:
<table class="manual">
<tr ><td>\c operator()() </td><td>if the procedural generation does not depend on the coefficient entries
(e.g., random numbers)</td></tr> <tr class="alt"><td>\c operator()(Index i)</td><td>if the procedural generation makes
sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace) </td></tr> <tr ><td>\c
operator()(Index i,Index j)</td><td>if the procedural generation depends on the matrix coordinates \c i, \c j (e.g.,
to generate a checkerboard with 0 and 1)</td></tr>
</table>
* It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized
for vectors.
*
* See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding
* C++11 random number generators.
*
* A nullary expression can also be used to implement custom sophisticated matrix manipulations
* that cannot be covered by the existing set of natively supported matrix manipulations.
* See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations
* on the behavior of CwiseNullaryOp.
*
* \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr
*/
template
<
typename
NullaryOp
,
typename
PlainObjectType
>
class
CwiseNullaryOp
:
public
internal
::
dense_xpr_base
<
CwiseNullaryOp
<
NullaryOp
,
PlainObjectType
>
>::
type
,
internal
::
no_assignment_operator
{
public:
typedef
typename
internal
::
dense_xpr_base
<
CwiseNullaryOp
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
CwiseNullaryOp
)
EIGEN_DEVICE_FUNC
CwiseNullaryOp
(
Index
rows
,
Index
cols
,
const
NullaryOp
&
func
=
NullaryOp
())
:
m_rows
(
rows
),
m_cols
(
cols
),
m_functor
(
func
)
{
eigen_assert
(
rows
>=
0
&&
(
RowsAtCompileTime
==
Dynamic
||
RowsAtCompileTime
==
rows
)
&&
cols
>=
0
&&
(
ColsAtCompileTime
==
Dynamic
||
ColsAtCompileTime
==
cols
));
}
EIGEN_DEVICE_FUNC
CwiseNullaryOp
(
Index
size
,
const
NullaryOp
&
func
=
NullaryOp
())
:
CwiseNullaryOp
(
RowsAtCompileTime
==
1
?
1
:
size
,
RowsAtCompileTime
==
1
?
size
:
1
,
func
)
{
EIGEN_STATIC_ASSERT
(
CwiseNullaryOp
::
IsVectorAtCompileTime
,
YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
.
value
();
}
/** \returns the functor representing the nullary operation */
EIGEN_DEVICE_FUNC
const
NullaryOp
&
functor
()
const
{
return
m_functor
;
}
protected:
const
internal
::
variable_if_dynamic
<
Index
,
RowsAtCompileTime
>
m_rows
;
const
internal
::
variable_if_dynamic
<
Index
,
ColsAtCompileTime
>
m_cols
;
const
NullaryOp
m_functor
;
};
/** \returns an expression of a matrix defined by a custom functor \a func
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
* instead.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
*
* \sa class CwiseNullaryOp
*/
template
<
typename
Derived
>
template
<
typename
CustomNullaryOp
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
const
CwiseNullaryOp
<
CustomNullaryOp
,
typename
DenseBase
<
Derived
>::
PlainObject
>
#else
const
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
#endif
DenseBase
<
Derived
>::
NullaryExpr
(
Index
rows
,
Index
cols
,
const
CustomNullaryOp
&
func
)
{
return
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
(
rows
,
cols
,
func
);
}
/** \returns an expression of a matrix defined by a custom functor \a func
*
* The parameter \a size is the size of the returned vector.
* Must be compatible with this MatrixBase type.
*
* \only_for_vectors
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
* it is redundant to pass \a size as argument, so Zero() should be used
* instead.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
*
* Here is an example with C++11 random generators: \include random_cpp11.cpp
* Output: \verbinclude random_cpp11.out
*
* \sa class CwiseNullaryOp
*/
template
<
typename
Derived
>
template
<
typename
CustomNullaryOp
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
const
CwiseNullaryOp
<
CustomNullaryOp
,
typename
DenseBase
<
Derived
>::
PlainObject
>
#else
const
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
#endif
DenseBase
<
Derived
>::
NullaryExpr
(
Index
size
,
const
CustomNullaryOp
&
func
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
if
(
RowsAtCompileTime
==
1
)
return
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
(
1
,
size
,
func
);
else
return
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
(
size
,
1
,
func
);
}
/** \returns an expression of a matrix defined by a custom functor \a func
*
* This variant is only for fixed-size DenseBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
*
* \sa class CwiseNullaryOp
*/
template
<
typename
Derived
>
template
<
typename
CustomNullaryOp
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
const
CwiseNullaryOp
<
CustomNullaryOp
,
typename
DenseBase
<
Derived
>::
PlainObject
>
#else
const
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
#endif
DenseBase
<
Derived
>::
NullaryExpr
(
const
CustomNullaryOp
&
func
)
{
return
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
(
RowsAtCompileTime
,
ColsAtCompileTime
,
func
);
}
/** \returns an expression of a constant matrix of value \a value
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this DenseBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
* instead.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
*
* \sa class CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ConstantReturnType
DenseBase
<
Derived
>::
Constant
(
Index
rows
,
Index
cols
,
const
Scalar
&
value
)
{
return
DenseBase
<
Derived
>::
NullaryExpr
(
rows
,
cols
,
internal
::
scalar_constant_op
<
Scalar
>
(
value
));
}
/** \returns an expression of a constant matrix of value \a value
*
* The parameter \a size is the size of the returned vector.
* Must be compatible with this DenseBase type.
*
* \only_for_vectors
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
* it is redundant to pass \a size as argument, so Zero() should be used
* instead.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
*
* \sa class CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ConstantReturnType
DenseBase
<
Derived
>::
Constant
(
Index
size
,
const
Scalar
&
value
)
{
return
DenseBase
<
Derived
>::
NullaryExpr
(
size
,
internal
::
scalar_constant_op
<
Scalar
>
(
value
));
}
/** \returns an expression of a constant matrix of value \a value
*
* This variant is only for fixed-size DenseBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
*
* \sa class CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ConstantReturnType
DenseBase
<
Derived
>::
Constant
(
const
Scalar
&
value
)
{
EIGEN_STATIC_ASSERT_FIXED_SIZE
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
RowsAtCompileTime
,
ColsAtCompileTime
,
internal
::
scalar_constant_op
<
Scalar
>
(
value
));
}
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)
*
* \only_for_vectors
*
* Example: \include DenseBase_LinSpaced_seq_deprecated.cpp
* Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out
*
* \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&)
*/
template
<
typename
Derived
>
EIGEN_DEPRECATED
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
RandomAccessLinSpacedReturnType
DenseBase
<
Derived
>::
LinSpaced
(
Sequential_t
,
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
high
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
size
,
internal
::
linspaced_op
<
Scalar
>
(
low
,
high
,
size
));
}
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
*
* \sa LinSpaced(const Scalar&, const Scalar&)
*/
template
<
typename
Derived
>
EIGEN_DEPRECATED
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
RandomAccessLinSpacedReturnType
DenseBase
<
Derived
>::
LinSpaced
(
Sequential_t
,
const
Scalar
&
low
,
const
Scalar
&
high
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
EIGEN_STATIC_ASSERT_FIXED_SIZE
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
Derived
::
SizeAtCompileTime
,
internal
::
linspaced_op
<
Scalar
>
(
low
,
high
,
Derived
::
SizeAtCompileTime
));
}
/**
* \brief Sets a linearly spaced vector.
*
* The function generates 'size' equally spaced values in the closed interval [low,high].
* When size is set to 1, a vector of length 1 containing 'high' is returned.
*
* \only_for_vectors
*
* Example: \include DenseBase_LinSpaced.cpp
* Output: \verbinclude DenseBase_LinSpaced.out
*
* For integer scalar types, an even spacing is possible if and only if the length of the range,
* i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the
* number of values \c high-low+1 (meaning each value can be repeated the same number of time).
* If one of these two considions is not satisfied, then \c high is lowered to the largest value
* satisfying one of this constraint.
* Here are some examples:
*
* Example: \include DenseBase_LinSpacedInt.cpp
* Output: \verbinclude DenseBase_LinSpacedInt.out
*
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
RandomAccessLinSpacedReturnType
DenseBase
<
Derived
>::
LinSpaced
(
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
high
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
size
,
internal
::
linspaced_op
<
Scalar
>
(
low
,
high
,
size
));
}
/**
* \copydoc DenseBase::LinSpaced(Index, const DenseBase::Scalar&, const DenseBase::Scalar&)
* Special version for fixed size types which does not require the size parameter.
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
RandomAccessLinSpacedReturnType
DenseBase
<
Derived
>::
LinSpaced
(
const
Scalar
&
low
,
const
Scalar
&
high
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
EIGEN_STATIC_ASSERT_FIXED_SIZE
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
Derived
::
SizeAtCompileTime
,
internal
::
linspaced_op
<
Scalar
>
(
low
,
high
,
Derived
::
SizeAtCompileTime
));
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
RandomAccessEqualSpacedReturnType
DenseBase
<
Derived
>::
EqualSpaced
(
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
step
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
size
,
internal
::
equalspaced_op
<
Scalar
>
(
low
,
step
));
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
RandomAccessEqualSpacedReturnType
DenseBase
<
Derived
>::
EqualSpaced
(
const
Scalar
&
low
,
const
Scalar
&
step
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
DenseBase
<
Derived
>::
NullaryExpr
(
Derived
::
SizeAtCompileTime
,
internal
::
equalspaced_op
<
Scalar
>
(
low
,
step
));
}
/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
bool
DenseBase
<
Derived
>::
isApproxToConstant
(
const
Scalar
&
val
,
const
RealScalar
&
prec
)
const
{
typename
internal
::
nested_eval
<
Derived
,
1
>::
type
self
(
derived
());
for
(
Index
j
=
0
;
j
<
cols
();
++
j
)
for
(
Index
i
=
0
;
i
<
rows
();
++
i
)
if
(
!
internal
::
isApprox
(
self
.
coeff
(
i
,
j
),
val
,
prec
))
return
false
;
return
true
;
}
/** This is just an alias for isApproxToConstant().
*
* \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
bool
DenseBase
<
Derived
>::
isConstant
(
const
Scalar
&
val
,
const
RealScalar
&
prec
)
const
{
return
isApproxToConstant
(
val
,
prec
);
}
/** Alias for setConstant(): sets all coefficients in this expression to \a val.
*
* \sa setConstant(), Constant(), class CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
DenseBase
<
Derived
>::
fill
(
const
Scalar
&
val
)
{
setConstant
(
val
);
}
/** Sets all coefficients in this expression to value \a val.
*
* \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(),
* Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setConstant
(
const
Scalar
&
val
)
{
internal
::
eigen_fill_impl
<
Derived
>::
run
(
derived
(),
val
);
return
derived
();
}
/** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val.
*
* \only_for_vectors
*
* Example: \include Matrix_setConstant_int.cpp
* Output: \verbinclude Matrix_setConstant_int.out
*
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp,
* MatrixBase::Constant(const Scalar&)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setConstant
(
Index
size
,
const
Scalar
&
val
)
{
resize
(
size
);
return
setConstant
(
val
);
}
/** Resizes to the given size, and sets all coefficients in this expression to the given value \a val.
*
* \param rows the new number of rows
* \param cols the new number of columns
* \param val the value to which all coefficients are set
*
* Example: \include Matrix_setConstant_int_int.cpp
* Output: \verbinclude Matrix_setConstant_int_int.out
*
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
* MatrixBase::Constant(const Scalar&)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setConstant
(
Index
rows
,
Index
cols
,
const
Scalar
&
val
)
{
resize
(
rows
,
cols
);
return
setConstant
(
val
);
}
/** Resizes to the given size, changing only the number of columns, and sets all
* coefficients in this expression to the given value \a val. For the parameter
* of type NoChange_t, just pass the special value \c NoChange.
*
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
* MatrixBase::Constant(const Scalar&)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setConstant
(
NoChange_t
,
Index
cols
,
const
Scalar
&
val
)
{
return
setConstant
(
rows
(),
cols
,
val
);
}
/** Resizes to the given size, changing only the number of rows, and sets all
* coefficients in this expression to the given value \a val. For the parameter
* of type NoChange_t, just pass the special value \c NoChange.
*
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
* MatrixBase::Constant(const Scalar&)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setConstant
(
Index
rows
,
NoChange_t
,
const
Scalar
&
val
)
{
return
setConstant
(
rows
,
cols
(),
val
);
}
/**
* \brief Sets a linearly spaced vector.
*
* The function generates 'size' equally spaced values in the closed interval [low,high].
* When size is set to 1, a vector of length 1 containing 'high' is returned.
*
* \only_for_vectors
*
* Example: \include DenseBase_setLinSpaced.cpp
* Output: \verbinclude DenseBase_setLinSpaced.out
*
* For integer scalar types, do not miss the explanations on the definition
* of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
*
* \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setLinSpaced
(
Index
newSize
,
const
Scalar
&
low
,
const
Scalar
&
high
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
derived
()
=
Derived
::
NullaryExpr
(
newSize
,
internal
::
linspaced_op
<
Scalar
>
(
low
,
high
,
newSize
));
}
/**
* \brief Sets a linearly spaced vector.
*
* The function fills \c *this with equally spaced values in the closed interval [low,high].
* When size is set to 1, a vector of length 1 containing 'high' is returned.
*
* \only_for_vectors
*
* For integer scalar types, do not miss the explanations on the definition
* of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
*
* \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setLinSpaced
(
const
Scalar
&
low
,
const
Scalar
&
high
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
setLinSpaced
(
size
(),
low
,
high
);
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setEqualSpaced
(
Index
newSize
,
const
Scalar
&
low
,
const
Scalar
&
step
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
derived
()
=
Derived
::
NullaryExpr
(
newSize
,
internal
::
equalspaced_op
<
Scalar
>
(
low
,
step
));
}
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setEqualSpaced
(
const
Scalar
&
low
,
const
Scalar
&
step
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
setEqualSpaced
(
size
(),
low
,
step
);
}
// zero:
/** \returns an expression of a zero matrix.
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
* instead.
*
* Example: \include MatrixBase_zero_int_int.cpp
* Output: \verbinclude MatrixBase_zero_int_int.out
*
* \sa Zero(), Zero(Index)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ZeroReturnType
DenseBase
<
Derived
>::
Zero
(
Index
rows
,
Index
cols
)
{
return
ZeroReturnType
(
rows
,
cols
);
}
/** \returns an expression of a zero vector.
*
* The parameter \a size is the size of the returned vector.
* Must be compatible with this MatrixBase type.
*
* \only_for_vectors
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
* it is redundant to pass \a size as argument, so Zero() should be used
* instead.
*
* Example: \include MatrixBase_zero_int.cpp
* Output: \verbinclude MatrixBase_zero_int.out
*
* \sa Zero(), Zero(Index,Index)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ZeroReturnType
DenseBase
<
Derived
>::
Zero
(
Index
size
)
{
return
ZeroReturnType
(
size
);
}
/** \returns an expression of a fixed-size zero matrix or vector.
*
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* Example: \include MatrixBase_zero.cpp
* Output: \verbinclude MatrixBase_zero.out
*
* \sa Zero(Index), Zero(Index,Index)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ZeroReturnType
DenseBase
<
Derived
>::
Zero
()
{
return
ZeroReturnType
(
RowsAtCompileTime
,
ColsAtCompileTime
);
}
/** \returns true if *this is approximately equal to the zero matrix,
* within the precision given by \a prec.
*
* Example: \include MatrixBase_isZero.cpp
* Output: \verbinclude MatrixBase_isZero.out
*
* \sa class CwiseNullaryOp, Zero()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
bool
DenseBase
<
Derived
>::
isZero
(
const
RealScalar
&
prec
)
const
{
typename
internal
::
nested_eval
<
Derived
,
1
>::
type
self
(
derived
());
for
(
Index
j
=
0
;
j
<
cols
();
++
j
)
for
(
Index
i
=
0
;
i
<
rows
();
++
i
)
if
(
!
internal
::
isMuchSmallerThan
(
self
.
coeff
(
i
,
j
),
static_cast
<
Scalar
>
(
1
),
prec
))
return
false
;
return
true
;
}
/** Sets all coefficients in this expression to zero.
*
* Example: \include MatrixBase_setZero.cpp
* Output: \verbinclude MatrixBase_setZero.out
*
* \sa class CwiseNullaryOp, Zero()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setZero
()
{
internal
::
eigen_zero_impl
<
Derived
>::
run
(
derived
());
return
derived
();
}
/** Resizes to the given \a size, and sets all coefficients in this expression to zero.
*
* \only_for_vectors
*
* Example: \include Matrix_setZero_int.cpp
* Output: \verbinclude Matrix_setZero_int.out
*
* \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setZero
(
Index
newSize
)
{
resize
(
newSize
);
return
setZero
();
}
/** Resizes to the given size, and sets all coefficients in this expression to zero.
*
* \param rows the new number of rows
* \param cols the new number of columns
*
* Example: \include Matrix_setZero_int_int.cpp
* Output: \verbinclude Matrix_setZero_int_int.out
*
* \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setZero
(
Index
rows
,
Index
cols
)
{
resize
(
rows
,
cols
);
return
setZero
();
}
/** Resizes to the given size, changing only the number of columns, and sets all
* coefficients in this expression to zero. For the parameter of type NoChange_t,
* just pass the special value \c NoChange.
*
* \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp,
* DenseBase::Zero()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setZero
(
NoChange_t
,
Index
cols
)
{
return
setZero
(
rows
(),
cols
);
}
/** Resizes to the given size, changing only the number of rows, and sets all
* coefficients in this expression to zero. For the parameter of type NoChange_t,
* just pass the special value \c NoChange.
*
* \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp,
* DenseBase::Zero()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setZero
(
Index
rows
,
NoChange_t
)
{
return
setZero
(
rows
,
cols
());
}
// ones:
/** \returns an expression of a matrix where all coefficients equal one.
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
* instead.
*
* Example: \include MatrixBase_ones_int_int.cpp
* Output: \verbinclude MatrixBase_ones_int_int.out
*
* \sa Ones(), Ones(Index), isOnes(), class Ones
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ConstantReturnType
DenseBase
<
Derived
>::
Ones
(
Index
rows
,
Index
cols
)
{
return
Constant
(
rows
,
cols
,
Scalar
(
1
));
}
/** \returns an expression of a vector where all coefficients equal one.
*
* The parameter \a newSize is the size of the returned vector.
* Must be compatible with this MatrixBase type.
*
* \only_for_vectors
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
* it is redundant to pass \a size as argument, so Ones() should be used
* instead.
*
* Example: \include MatrixBase_ones_int.cpp
* Output: \verbinclude MatrixBase_ones_int.out
*
* \sa Ones(), Ones(Index,Index), isOnes(), class Ones
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ConstantReturnType
DenseBase
<
Derived
>::
Ones
(
Index
newSize
)
{
return
Constant
(
newSize
,
Scalar
(
1
));
}
/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
*
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* Example: \include MatrixBase_ones.cpp
* Output: \verbinclude MatrixBase_ones.out
*
* \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
DenseBase
<
Derived
>::
ConstantReturnType
DenseBase
<
Derived
>::
Ones
()
{
return
Constant
(
Scalar
(
1
));
}
/** \returns true if *this is approximately equal to the matrix where all coefficients
* are equal to 1, within the precision given by \a prec.
*
* Example: \include MatrixBase_isOnes.cpp
* Output: \verbinclude MatrixBase_isOnes.out
*
* \sa class CwiseNullaryOp, Ones()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
bool
DenseBase
<
Derived
>::
isOnes
(
const
RealScalar
&
prec
)
const
{
return
isApproxToConstant
(
Scalar
(
1
),
prec
);
}
/** Sets all coefficients in this expression to one.
*
* Example: \include MatrixBase_setOnes.cpp
* Output: \verbinclude MatrixBase_setOnes.out
*
* \sa class CwiseNullaryOp, Ones()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
DenseBase
<
Derived
>::
setOnes
()
{
return
setConstant
(
Scalar
(
1
));
}
/** Resizes to the given \a newSize, and sets all coefficients in this expression to one.
*
* \only_for_vectors
*
* Example: \include Matrix_setOnes_int.cpp
* Output: \verbinclude Matrix_setOnes_int.out
*
* \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setOnes
(
Index
newSize
)
{
resize
(
newSize
);
return
setConstant
(
Scalar
(
1
));
}
/** Resizes to the given size, and sets all coefficients in this expression to one.
*
* \param rows the new number of rows
* \param cols the new number of columns
*
* Example: \include Matrix_setOnes_int_int.cpp
* Output: \verbinclude Matrix_setOnes_int_int.out
*
* \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setOnes
(
Index
rows
,
Index
cols
)
{
resize
(
rows
,
cols
);
return
setConstant
(
Scalar
(
1
));
}
/** Resizes to the given size, changing only the number of rows, and sets all
* coefficients in this expression to one. For the parameter of type NoChange_t,
* just pass the special value \c NoChange.
*
* \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp,
* MatrixBase::Ones()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setOnes
(
Index
rows
,
NoChange_t
)
{
return
setOnes
(
rows
,
cols
());
}
/** Resizes to the given size, changing only the number of columns, and sets all
* coefficients in this expression to one. For the parameter of type NoChange_t,
* just pass the special value \c NoChange.
*
* \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp,
* MatrixBase::Ones()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
PlainObjectBase
<
Derived
>::
setOnes
(
NoChange_t
,
Index
cols
)
{
return
setOnes
(
rows
(),
cols
);
}
// Identity:
/** \returns an expression of the identity matrix (not necessarily square).
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
* instead.
*
* Example: \include MatrixBase_identity_int_int.cpp
* Output: \verbinclude MatrixBase_identity_int_int.out
*
* \sa Identity(), setIdentity(), isIdentity()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
IdentityReturnType
MatrixBase
<
Derived
>::
Identity
(
Index
rows
,
Index
cols
)
{
return
DenseBase
<
Derived
>::
NullaryExpr
(
rows
,
cols
,
internal
::
scalar_identity_op
<
Scalar
>
());
}
/** \returns an expression of the identity matrix (not necessarily square).
*
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
* need to use the variant taking size arguments.
*
* Example: \include MatrixBase_identity.cpp
* Output: \verbinclude MatrixBase_identity.out
*
* \sa Identity(Index,Index), setIdentity(), isIdentity()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
IdentityReturnType
MatrixBase
<
Derived
>::
Identity
()
{
EIGEN_STATIC_ASSERT_FIXED_SIZE
(
Derived
)
return
MatrixBase
<
Derived
>::
NullaryExpr
(
RowsAtCompileTime
,
ColsAtCompileTime
,
internal
::
scalar_identity_op
<
Scalar
>
());
}
/** \returns true if *this is approximately equal to the identity matrix
* (not necessarily square),
* within the precision given by \a prec.
*
* Example: \include MatrixBase_isIdentity.cpp
* Output: \verbinclude MatrixBase_isIdentity.out
*
* \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
*/
template
<
typename
Derived
>
bool
MatrixBase
<
Derived
>::
isIdentity
(
const
RealScalar
&
prec
)
const
{
typename
internal
::
nested_eval
<
Derived
,
1
>::
type
self
(
derived
());
for
(
Index
j
=
0
;
j
<
cols
();
++
j
)
{
for
(
Index
i
=
0
;
i
<
rows
();
++
i
)
{
if
(
i
==
j
)
{
if
(
!
internal
::
isApprox
(
self
.
coeff
(
i
,
j
),
static_cast
<
Scalar
>
(
1
),
prec
))
return
false
;
}
else
{
if
(
!
internal
::
isMuchSmallerThan
(
self
.
coeff
(
i
,
j
),
static_cast
<
RealScalar
>
(
1
),
prec
))
return
false
;
}
}
}
return
true
;
}
namespace
internal
{
template
<
typename
Derived
,
bool
Big
=
(
Derived
::
SizeAtCompileTime
>
=
16
)
>
struct
setIdentity_impl
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Derived
&
run
(
Derived
&
m
)
{
return
m
=
Derived
::
Identity
(
m
.
rows
(),
m
.
cols
());
}
};
template
<
typename
Derived
>
struct
setIdentity_impl
<
Derived
,
true
>
{
EIGEN_DEVICE_FUNC
static
EIGEN_STRONG_INLINE
Derived
&
run
(
Derived
&
m
)
{
m
.
setZero
();
const
Index
size
=
numext
::
mini
(
m
.
rows
(),
m
.
cols
());
for
(
Index
i
=
0
;
i
<
size
;
++
i
)
m
.
coeffRef
(
i
,
i
)
=
typename
Derived
::
Scalar
(
1
);
return
m
;
}
};
}
// end namespace internal
/** Writes the identity expression (not necessarily square) into *this.
*
* Example: \include MatrixBase_setIdentity.cpp
* Output: \verbinclude MatrixBase_setIdentity.out
*
* \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
setIdentity
()
{
return
internal
::
setIdentity_impl
<
Derived
>::
run
(
derived
());
}
/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
*
* \param rows the new number of rows
* \param cols the new number of columns
*
* Example: \include Matrix_setIdentity_int_int.cpp
* Output: \verbinclude Matrix_setIdentity_int_int.out
*
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
setIdentity
(
Index
rows
,
Index
cols
)
{
derived
().
resize
(
rows
,
cols
);
return
setIdentity
();
}
/** \returns an expression of the i-th unit (basis) vector.
*
* \only_for_vectors
*
* \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
BasisReturnType
MatrixBase
<
Derived
>::
Unit
(
Index
newSize
,
Index
i
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
BasisReturnType
(
SquareMatrixType
::
Identity
(
newSize
,
newSize
),
i
);
}
/** \returns an expression of the i-th unit (basis) vector.
*
* \only_for_vectors
*
* This variant is for fixed-size vector only.
*
* \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
BasisReturnType
MatrixBase
<
Derived
>::
Unit
(
Index
i
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
)
return
BasisReturnType
(
SquareMatrixType
::
Identity
(),
i
);
}
/** \returns an expression of the X axis unit vector (1{,0}^*)
*
* \only_for_vectors
*
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
* MatrixBase::UnitW()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
BasisReturnType
MatrixBase
<
Derived
>::
UnitX
()
{
return
Derived
::
Unit
(
0
);
}
/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
*
* \only_for_vectors
*
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
* MatrixBase::UnitW()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
BasisReturnType
MatrixBase
<
Derived
>::
UnitY
()
{
return
Derived
::
Unit
(
1
);
}
/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
*
* \only_for_vectors
*
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
* MatrixBase::UnitW()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
BasisReturnType
MatrixBase
<
Derived
>::
UnitZ
()
{
return
Derived
::
Unit
(
2
);
}
/** \returns an expression of the W axis unit vector (0,0,0,1)
*
* \only_for_vectors
*
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
* MatrixBase::UnitW()
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
typename
MatrixBase
<
Derived
>::
BasisReturnType
MatrixBase
<
Derived
>::
UnitW
()
{
return
Derived
::
Unit
(
3
);
}
/** \brief Set the coefficients of \c *this to the i-th unit (basis) vector
*
* \param i index of the unique coefficient to be set to 1
*
* \only_for_vectors
*
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
setUnit
(
Index
i
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
);
eigen_assert
(
i
<
size
());
derived
().
setZero
();
derived
().
coeffRef
(
i
)
=
Scalar
(
1
);
return
derived
();
}
/** \brief Resizes to the given \a newSize, and writes the i-th unit (basis) vector into *this.
*
* \param newSize the new size of the vector
* \param i index of the unique coefficient to be set to 1
*
* \only_for_vectors
*
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
*/
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
MatrixBase
<
Derived
>::
setUnit
(
Index
newSize
,
Index
i
)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY
(
Derived
);
eigen_assert
(
i
<
newSize
);
derived
().
resize
(
newSize
);
return
setUnit
(
i
);
}
}
// end namespace Eigen
#endif // EIGEN_CWISE_NULLARY_OP_H
eigen-master/Eigen/src/Core/CwiseTernaryOp.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_TERNARY_OP_H
#define EIGEN_CWISE_TERNARY_OP_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
TernaryOp
,
typename
Arg1
,
typename
Arg2
,
typename
Arg3
>
struct
traits
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>
{
// we must not inherit from traits<Arg1> since it has
// the potential to cause problems with MSVC
typedef
remove_all_t
<
Arg1
>
Ancestor
;
typedef
typename
traits
<
Ancestor
>::
XprKind
XprKind
;
enum
{
RowsAtCompileTime
=
traits
<
Ancestor
>::
RowsAtCompileTime
,
ColsAtCompileTime
=
traits
<
Ancestor
>::
ColsAtCompileTime
,
MaxRowsAtCompileTime
=
traits
<
Ancestor
>::
MaxRowsAtCompileTime
,
MaxColsAtCompileTime
=
traits
<
Ancestor
>::
MaxColsAtCompileTime
};
// even though we require Arg1, Arg2, and Arg3 to have the same scalar type
// (see CwiseTernaryOp constructor),
// we still want to handle the case when the result type is different.
typedef
typename
result_of
<
TernaryOp
(
const
typename
Arg1
::
Scalar
&
,
const
typename
Arg2
::
Scalar
&
,
const
typename
Arg3
::
Scalar
&
)
>::
type
Scalar
;
typedef
typename
internal
::
traits
<
Arg1
>::
StorageKind
StorageKind
;
typedef
typename
internal
::
traits
<
Arg1
>::
StorageIndex
StorageIndex
;
typedef
typename
Arg1
::
Nested
Arg1Nested
;
typedef
typename
Arg2
::
Nested
Arg2Nested
;
typedef
typename
Arg3
::
Nested
Arg3Nested
;
typedef
std
::
remove_reference_t
<
Arg1Nested
>
Arg1Nested_
;
typedef
std
::
remove_reference_t
<
Arg2Nested
>
Arg2Nested_
;
typedef
std
::
remove_reference_t
<
Arg3Nested
>
Arg3Nested_
;
enum
{
Flags
=
Arg1Nested_
::
Flags
&
RowMajorBit
};
};
}
// end namespace internal
template
<
typename
TernaryOp
,
typename
Arg1
,
typename
Arg2
,
typename
Arg3
,
typename
StorageKind
>
class
CwiseTernaryOpImpl
;
/** \class CwiseTernaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise ternary operator is
* applied to two expressions
*
* \tparam TernaryOp template functor implementing the operator
* \tparam Arg1Type the type of the first argument
* \tparam Arg2Type the type of the second argument
* \tparam Arg3Type the type of the third argument
*
* This class represents an expression where a coefficient-wise ternary
* operator is applied to three expressions.
* It is the return type of ternary operators, by which we mean only those
* ternary operators where
* all three arguments are Eigen expressions.
* For example, the return type of betainc(matrix1, matrix2, matrix3) is a
* CwiseTernaryOp.
*
* Most of the time, this is the only way that it is used, so you typically
* don't have to name
* CwiseTernaryOp types explicitly.
*
* \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const
* MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp,
* class CwiseUnaryOp, class CwiseNullaryOp
*/
template
<
typename
TernaryOp
,
typename
Arg1Type
,
typename
Arg2Type
,
typename
Arg3Type
>
class
CwiseTernaryOp
:
public
CwiseTernaryOpImpl
<
TernaryOp
,
Arg1Type
,
Arg2Type
,
Arg3Type
,
typename
internal
::
traits
<
Arg1Type
>::
StorageKind
>
,
internal
::
no_assignment_operator
{
public:
typedef
internal
::
remove_all_t
<
Arg1Type
>
Arg1
;
typedef
internal
::
remove_all_t
<
Arg2Type
>
Arg2
;
typedef
internal
::
remove_all_t
<
Arg3Type
>
Arg3
;
// require the sizes to match
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
Arg1
,
Arg2
)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
Arg1
,
Arg3
)
// The index types should match
EIGEN_STATIC_ASSERT
((
internal
::
is_same
<
typename
internal
::
traits
<
Arg1Type
>::
StorageKind
,
typename
internal
::
traits
<
Arg2Type
>::
StorageKind
>::
value
),
STORAGE_KIND_MUST_MATCH
)
EIGEN_STATIC_ASSERT
((
internal
::
is_same
<
typename
internal
::
traits
<
Arg1Type
>::
StorageKind
,
typename
internal
::
traits
<
Arg3Type
>::
StorageKind
>::
value
),
STORAGE_KIND_MUST_MATCH
)
typedef
typename
CwiseTernaryOpImpl
<
TernaryOp
,
Arg1Type
,
Arg2Type
,
Arg3Type
,
typename
internal
::
traits
<
Arg1Type
>::
StorageKind
>::
Base
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
CwiseTernaryOp
)
typedef
typename
internal
::
ref_selector
<
Arg1Type
>::
type
Arg1Nested
;
typedef
typename
internal
::
ref_selector
<
Arg2Type
>::
type
Arg2Nested
;
typedef
typename
internal
::
ref_selector
<
Arg3Type
>::
type
Arg3Nested
;
typedef
std
::
remove_reference_t
<
Arg1Nested
>
Arg1Nested_
;
typedef
std
::
remove_reference_t
<
Arg2Nested
>
Arg2Nested_
;
typedef
std
::
remove_reference_t
<
Arg3Nested
>
Arg3Nested_
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
CwiseTernaryOp
(
const
Arg1
&
a1
,
const
Arg2
&
a2
,
const
Arg3
&
a3
,
const
TernaryOp
&
func
=
TernaryOp
())
:
m_arg1
(
a1
),
m_arg2
(
a2
),
m_arg3
(
a3
),
m_functor
(
func
)
{
eigen_assert
(
a1
.
rows
()
==
a2
.
rows
()
&&
a1
.
cols
()
==
a2
.
cols
()
&&
a1
.
rows
()
==
a3
.
rows
()
&&
a1
.
cols
()
==
a3
.
cols
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
rows
()
const
{
// return the fixed size type if available to enable compile time
// optimizations
if
(
internal
::
traits
<
internal
::
remove_all_t
<
Arg1Nested
>>::
RowsAtCompileTime
==
Dynamic
&&
internal
::
traits
<
internal
::
remove_all_t
<
Arg2Nested
>>::
RowsAtCompileTime
==
Dynamic
)
return
m_arg3
.
rows
();
else
if
(
internal
::
traits
<
internal
::
remove_all_t
<
Arg1Nested
>>::
RowsAtCompileTime
==
Dynamic
&&
internal
::
traits
<
internal
::
remove_all_t
<
Arg3Nested
>>::
RowsAtCompileTime
==
Dynamic
)
return
m_arg2
.
rows
();
else
return
m_arg1
.
rows
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
cols
()
const
{
// return the fixed size type if available to enable compile time
// optimizations
if
(
internal
::
traits
<
internal
::
remove_all_t
<
Arg1Nested
>>::
ColsAtCompileTime
==
Dynamic
&&
internal
::
traits
<
internal
::
remove_all_t
<
Arg2Nested
>>::
ColsAtCompileTime
==
Dynamic
)
return
m_arg3
.
cols
();
else
if
(
internal
::
traits
<
internal
::
remove_all_t
<
Arg1Nested
>>::
ColsAtCompileTime
==
Dynamic
&&
internal
::
traits
<
internal
::
remove_all_t
<
Arg3Nested
>>::
ColsAtCompileTime
==
Dynamic
)
return
m_arg2
.
cols
();
else
return
m_arg1
.
cols
();
}
/** \returns the first argument nested expression */
EIGEN_DEVICE_FUNC
const
Arg1Nested_
&
arg1
()
const
{
return
m_arg1
;
}
/** \returns the first argument nested expression */
EIGEN_DEVICE_FUNC
const
Arg2Nested_
&
arg2
()
const
{
return
m_arg2
;
}
/** \returns the third argument nested expression */
EIGEN_DEVICE_FUNC
const
Arg3Nested_
&
arg3
()
const
{
return
m_arg3
;
}
/** \returns the functor representing the ternary operation */
EIGEN_DEVICE_FUNC
const
TernaryOp
&
functor
()
const
{
return
m_functor
;
}
protected:
Arg1Nested
m_arg1
;
Arg2Nested
m_arg2
;
Arg3Nested
m_arg3
;
const
TernaryOp
m_functor
;
};
// Generic API dispatcher
template
<
typename
TernaryOp
,
typename
Arg1
,
typename
Arg2
,
typename
Arg3
,
typename
StorageKind
>
class
CwiseTernaryOpImpl
:
public
internal
::
generic_xpr_base
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>::
type
{
public:
typedef
typename
internal
::
generic_xpr_base
<
CwiseTernaryOp
<
TernaryOp
,
Arg1
,
Arg2
,
Arg3
>>::
type
Base
;
};
}
// end namespace Eigen
#endif // EIGEN_CWISE_TERNARY_OP_H
eigen-master/Eigen/src/Core/CwiseUnaryOp.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_UNARY_OP_H
#define EIGEN_CWISE_UNARY_OP_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
UnaryOp
,
typename
XprType
>
struct
traits
<
CwiseUnaryOp
<
UnaryOp
,
XprType
>
>
:
traits
<
XprType
>
{
typedef
typename
result_of
<
UnaryOp
(
const
typename
XprType
::
Scalar
&
)
>::
type
Scalar
;
typedef
typename
XprType
::
Nested
XprTypeNested
;
typedef
std
::
remove_reference_t
<
XprTypeNested
>
XprTypeNested_
;
enum
{
Flags
=
XprTypeNested_
::
Flags
&
RowMajorBit
};
};
}
// namespace internal
template
<
typename
UnaryOp
,
typename
XprType
,
typename
StorageKind
>
class
CwiseUnaryOpImpl
;
/** \class CwiseUnaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise unary operator is applied to an expression
*
* \tparam UnaryOp template functor implementing the operator
* \tparam XprType the type of the expression to which we are applying the unary operator
*
* This class represents an expression where a unary operator is applied to an expression.
* It is the return type of all operations taking exactly 1 input expression, regardless of the
* presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
* is considered unary, because only the right-hand side is an expression, and its
* return type is a specialization of CwiseUnaryOp.
*
* Most of the time, this is the only way that it is used, so you typically don't have to name
* CwiseUnaryOp types explicitly.
*
* \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
*/
template
<
typename
UnaryOp
,
typename
XprType
>
class
CwiseUnaryOp
:
public
CwiseUnaryOpImpl
<
UnaryOp
,
XprType
,
typename
internal
::
traits
<
XprType
>::
StorageKind
>
,
internal
::
no_assignment_operator
{
public:
typedef
typename
CwiseUnaryOpImpl
<
UnaryOp
,
XprType
,
typename
internal
::
traits
<
XprType
>::
StorageKind
>::
Base
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
CwiseUnaryOp
)
typedef
typename
internal
::
ref_selector
<
XprType
>::
type
XprTypeNested
;
typedef
internal
::
remove_all_t
<
XprType
>
NestedExpression
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
explicit
CwiseUnaryOp
(
const
XprType
&
xpr
,
const
UnaryOp
&
func
=
UnaryOp
())
:
m_xpr
(
xpr
),
m_functor
(
func
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
noexcept
{
return
m_xpr
.
rows
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
noexcept
{
return
m_xpr
.
cols
();
}
/** \returns the functor representing the unary operation */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
UnaryOp
&
functor
()
const
{
return
m_functor
;
}
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
const
internal
::
remove_all_t
<
XprTypeNested
>&
nestedExpression
()
const
{
return
m_xpr
;
}
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
internal
::
remove_all_t
<
XprTypeNested
>&
nestedExpression
()
{
return
m_xpr
;
}
protected:
XprTypeNested
m_xpr
;
const
UnaryOp
m_functor
;
};
// Generic API dispatcher
template
<
typename
UnaryOp
,
typename
XprType
,
typename
StorageKind
>
class
CwiseUnaryOpImpl
:
public
internal
::
generic_xpr_base
<
CwiseUnaryOp
<
UnaryOp
,
XprType
>
>::
type
{
public:
typedef
typename
internal
::
generic_xpr_base
<
CwiseUnaryOp
<
UnaryOp
,
XprType
>
>::
type
Base
;
};
}
// end namespace Eigen
#endif // EIGEN_CWISE_UNARY_OP_H
eigen-master/Eigen/src/Core/CwiseUnaryView.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_UNARY_VIEW_H
#define EIGEN_CWISE_UNARY_VIEW_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
ViewOp
,
typename
MatrixType
,
typename
StrideType
>
struct
traits
<
CwiseUnaryView
<
ViewOp
,
MatrixType
,
StrideType
>
>
:
traits
<
MatrixType
>
{
typedef
typename
result_of
<
ViewOp
(
typename
traits
<
MatrixType
>::
Scalar
&
)
>::
type1
ScalarRef
;
static_assert
(
std
::
is_reference
<
ScalarRef
>::
value
,
"Views must return a reference type."
);
typedef
remove_all_t
<
ScalarRef
>
Scalar
;
typedef
typename
MatrixType
::
Nested
MatrixTypeNested
;
typedef
remove_all_t
<
MatrixTypeNested
>
MatrixTypeNested_
;
enum
{
FlagsLvalueBit
=
is_lvalue
<
MatrixType
>::
value
?
LvalueBit
:
0
,
Flags
=
traits
<
MatrixTypeNested_
>::
Flags
&
(
RowMajorBit
|
FlagsLvalueBit
|
DirectAccessBit
),
// FIXME DirectAccessBit should not be handled by expressions
MatrixTypeInnerStride
=
inner_stride_at_compile_time
<
MatrixType
>::
ret
,
// need to cast the sizeof's from size_t to int explicitly, otherwise:
// "error: no integral type can represent all of the enumerator values
InnerStrideAtCompileTime
=
StrideType
::
InnerStrideAtCompileTime
==
0
?
(
MatrixTypeInnerStride
==
Dynamic
?
int
(
Dynamic
)
:
int
(
MatrixTypeInnerStride
)
*
int
(
sizeof
(
typename
traits
<
MatrixType
>::
Scalar
)
/
sizeof
(
Scalar
)))
:
int
(
StrideType
::
InnerStrideAtCompileTime
),
OuterStrideAtCompileTime
=
StrideType
::
OuterStrideAtCompileTime
==
0
?
(
outer_stride_at_compile_time
<
MatrixType
>::
ret
==
Dynamic
?
int
(
Dynamic
)
:
outer_stride_at_compile_time
<
MatrixType
>::
ret
*
int
(
sizeof
(
typename
traits
<
MatrixType
>::
Scalar
)
/
sizeof
(
Scalar
)))
:
int
(
StrideType
::
OuterStrideAtCompileTime
)
};
};
// Generic API dispatcher
template
<
typename
ViewOp
,
typename
XprType
,
typename
StrideType
,
typename
StorageKind
,
bool
Mutable
=
!
std
::
is_const
<
XprType
>
::
value
>
class
CwiseUnaryViewImpl
:
public
generic_xpr_base
<
CwiseUnaryView
<
ViewOp
,
XprType
,
StrideType
>
>::
type
{
public:
typedef
typename
generic_xpr_base
<
CwiseUnaryView
<
ViewOp
,
XprType
,
StrideType
>
>::
type
Base
;
};
template
<
typename
ViewOp
,
typename
MatrixType
,
typename
StrideType
>
class
CwiseUnaryViewImpl
<
ViewOp
,
MatrixType
,
StrideType
,
Dense
,
false
>
:
public
dense_xpr_base
<
CwiseUnaryView
<
ViewOp
,
MatrixType
,
StrideType
>
>::
type
{
public:
typedef
CwiseUnaryView
<
ViewOp
,
MatrixType
,
StrideType
>
Derived
;
typedef
typename
dense_xpr_base
<
CwiseUnaryView
<
ViewOp
,
MatrixType
,
StrideType
>
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Derived
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
CwiseUnaryViewImpl
)
EIGEN_DEVICE_FUNC
inline
const
Scalar
*
data
()
const
{
return
&
(
this
->
coeffRef
(
0
));
}
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
{
return
StrideType
::
InnerStrideAtCompileTime
!=
0
?
int
(
StrideType
::
InnerStrideAtCompileTime
)
:
derived
().
nestedExpression
().
innerStride
()
*
sizeof
(
typename
traits
<
MatrixType
>::
Scalar
)
/
sizeof
(
Scalar
);
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
{
return
StrideType
::
OuterStrideAtCompileTime
!=
0
?
int
(
StrideType
::
OuterStrideAtCompileTime
)
:
derived
().
nestedExpression
().
outerStride
()
*
sizeof
(
typename
traits
<
MatrixType
>::
Scalar
)
/
sizeof
(
Scalar
);
}
protected:
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR
(
CwiseUnaryViewImpl
)
// Allow const access to coeffRef for the case of direct access being enabled.
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
index
)
const
{
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeffRef
(
index
);
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
const
{
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeffRef
(
row
,
col
);
}
};
template
<
typename
ViewOp
,
typename
MatrixType
,
typename
StrideType
>
class
CwiseUnaryViewImpl
<
ViewOp
,
MatrixType
,
StrideType
,
Dense
,
true
>
:
public
CwiseUnaryViewImpl
<
ViewOp
,
MatrixType
,
StrideType
,
Dense
,
false
>
{
public:
typedef
CwiseUnaryViewImpl
<
ViewOp
,
MatrixType
,
StrideType
,
Dense
,
false
>
Base
;
typedef
CwiseUnaryView
<
ViewOp
,
MatrixType
,
StrideType
>
Derived
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Derived
)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
CwiseUnaryViewImpl
)
using
Base
::
data
;
EIGEN_DEVICE_FUNC
inline
Scalar
*
data
()
{
return
&
(
this
->
coeffRef
(
0
));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeffRef
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRef
(
Index
index
)
{
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeffRef
(
index
);
}
protected:
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR
(
CwiseUnaryViewImpl
)
};
}
// namespace internal
/** \class CwiseUnaryView
* \ingroup Core_Module
*
* \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
*
* \tparam ViewOp template functor implementing the view
* \tparam MatrixType the type of the matrix we are applying the unary operator
*
* This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
* It is the return type of real() and imag(), and most of the time this is the only way it is used.
*
* \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
*/
template
<
typename
ViewOp
,
typename
MatrixType
,
typename
StrideType
>
class
CwiseUnaryView
:
public
internal
::
CwiseUnaryViewImpl
<
ViewOp
,
MatrixType
,
StrideType
,
typename
internal
::
traits
<
MatrixType
>::
StorageKind
>
{
public:
typedef
typename
internal
::
CwiseUnaryViewImpl
<
ViewOp
,
MatrixType
,
StrideType
,
typename
internal
::
traits
<
MatrixType
>::
StorageKind
>::
Base
Base
;
EIGEN_GENERIC_PUBLIC_INTERFACE
(
CwiseUnaryView
)
typedef
typename
internal
::
ref_selector
<
MatrixType
>::
non_const_type
MatrixTypeNested
;
typedef
internal
::
remove_all_t
<
MatrixType
>
NestedExpression
;
explicit
EIGEN_DEVICE_FUNC
inline
CwiseUnaryView
(
MatrixType
&
mat
,
const
ViewOp
&
func
=
ViewOp
())
:
m_matrix
(
mat
),
m_functor
(
func
)
{}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
CwiseUnaryView
)
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
noexcept
{
return
m_matrix
.
rows
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
noexcept
{
return
m_matrix
.
cols
();
}
/** \returns the functor representing unary operation */
EIGEN_DEVICE_FUNC
const
ViewOp
&
functor
()
const
{
return
m_functor
;
}
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
const
internal
::
remove_all_t
<
MatrixTypeNested
>&
nestedExpression
()
const
{
return
m_matrix
;
}
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
std
::
remove_reference_t
<
MatrixTypeNested
>&
nestedExpression
()
{
return
m_matrix
;
}
protected:
MatrixTypeNested
m_matrix
;
ViewOp
m_functor
;
};
}
// namespace Eigen
#endif // EIGEN_CWISE_UNARY_VIEW_H
eigen-master/Eigen/src/Core/DenseBase.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DENSEBASE_H
#define EIGEN_DENSEBASE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
EIGEN_STATIC_ASSERT
(
NumTraits
<
DenseIndex
>::
IsSigned
,
THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE
)
/** \class DenseBase
* \ingroup Core_Module
*
* \brief Base class for all dense matrices, vectors, and arrays
*
* This class is the base that is inherited by all dense objects (matrix, vector, arrays,
* and related expression types). The common Eigen API for dense objects is contained in this class.
*
* \tparam Derived is the derived type, e.g., a matrix type or an expression.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
*
* \sa \blank \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
:
public
DenseCoeffsBase
<
Derived
,
internal
::
accessors_level
<
Derived
>::
value
>
#else
:
public
DenseCoeffsBase
<
Derived
,
DirectWriteAccessors
>
#endif // not EIGEN_PARSED_BY_DOXYGEN
{
public:
/** Inner iterator type to iterate over the coefficients of a row or column.
* \sa class InnerIterator
*/
typedef
Eigen
::
InnerIterator
<
Derived
>
InnerIterator
;
typedef
typename
internal
::
traits
<
Derived
>::
StorageKind
StorageKind
;
/**
* \brief The type used to store indices
* \details This typedef is relevant for types that store multiple indices such as
* PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index
* \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.
*/
typedef
typename
internal
::
traits
<
Derived
>::
StorageIndex
StorageIndex
;
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
*
* It is an alias for the Scalar type */
typedef
Scalar
value_type
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
typedef
DenseCoeffsBase
<
Derived
,
internal
::
accessors_level
<
Derived
>::
value
>
Base
;
using
Base
::
coeff
;
using
Base
::
coeffByOuterInner
;
using
Base
::
colIndexByOuterInner
;
using
Base
::
cols
;
using
Base
::
const_cast_derived
;
using
Base
::
derived
;
using
Base
::
rowIndexByOuterInner
;
using
Base
::
rows
;
using
Base
::
size
;
using
Base
::
operator
();
using
Base
::
operator
[];
using
Base
::
colStride
;
using
Base
::
innerStride
;
using
Base
::
outerStride
;
using
Base
::
rowStride
;
using
Base
::
stride
;
using
Base
::
w
;
using
Base
::
x
;
using
Base
::
y
;
using
Base
::
z
;
typedef
typename
Base
::
CoeffReturnType
CoeffReturnType
;
enum
{
RowsAtCompileTime
=
internal
::
traits
<
Derived
>::
RowsAtCompileTime
,
/**< The number of rows at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
ColsAtCompileTime
=
internal
::
traits
<
Derived
>::
ColsAtCompileTime
,
/**< The number of columns at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
SizeAtCompileTime
=
(
internal
::
size_of_xpr_at_compile_time
<
Derived
>::
ret
),
/**< This is equal to the number of coefficients, i.e. the number of
* rows times the number of columns, or to \a Dynamic if this is not
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
MaxRowsAtCompileTime
=
internal
::
traits
<
Derived
>::
MaxRowsAtCompileTime
,
/**< This value is equal to the maximum possible number of rows that this expression
* might have. If this expression might have an arbitrarily high number of rows,
* this value is set to \a Dynamic.
*
* This value is useful to know when evaluating an expression, in order to determine
* whether it is possible to avoid doing a dynamic memory allocation.
*
* \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
*/
MaxColsAtCompileTime
=
internal
::
traits
<
Derived
>::
MaxColsAtCompileTime
,
/**< This value is equal to the maximum possible number of columns that this expression
* might have. If this expression might have an arbitrarily high number of columns,
* this value is set to \a Dynamic.
*
* This value is useful to know when evaluating an expression, in order to determine
* whether it is possible to avoid doing a dynamic memory allocation.
*
* \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
*/
MaxSizeAtCompileTime
=
internal
::
size_at_compile_time
(
internal
::
traits
<
Derived
>::
MaxRowsAtCompileTime
,
internal
::
traits
<
Derived
>::
MaxColsAtCompileTime
),
/**< This value is equal to the maximum possible number of coefficients that this expression
* might have. If this expression might have an arbitrarily high number of coefficients,
* this value is set to \a Dynamic.
*
* This value is useful to know when evaluating an expression, in order to determine
* whether it is possible to avoid doing a dynamic memory allocation.
*
* \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
*/
IsVectorAtCompileTime
=
internal
::
traits
<
Derived
>::
RowsAtCompileTime
==
1
||
internal
::
traits
<
Derived
>::
ColsAtCompileTime
==
1
,
/**< This is set to true if either the number of rows or the number of
* columns is known at compile-time to be equal to 1. Indeed, in that case,
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
NumDimensions
=
int
(
MaxSizeAtCompileTime
)
==
1
?
0
:
bool
(
IsVectorAtCompileTime
)
?
1
:
2
,
/**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
* and 2 for matrices.
*/
Flags
=
internal
::
traits
<
Derived
>::
Flags
,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
*/
IsRowMajor
=
int
(
Flags
)
&
RowMajorBit
,
/**< True if this expression has row-major storage order. */
InnerSizeAtCompileTime
=
int
(
IsVectorAtCompileTime
)
?
int
(
SizeAtCompileTime
)
:
int
(
IsRowMajor
)
?
int
(
ColsAtCompileTime
)
:
int
(
RowsAtCompileTime
),
InnerStrideAtCompileTime
=
internal
::
inner_stride_at_compile_time
<
Derived
>::
ret
,
OuterStrideAtCompileTime
=
internal
::
outer_stride_at_compile_time
<
Derived
>::
ret
};
typedef
typename
internal
::
find_best_packet
<
Scalar
,
SizeAtCompileTime
>::
type
PacketScalar
;
enum
{
IsPlainObjectBase
=
0
};
/** The plain matrix type corresponding to this expression.
* \sa PlainObject */
typedef
Matrix
<
typename
internal
::
traits
<
Derived
>::
Scalar
,
internal
::
traits
<
Derived
>::
RowsAtCompileTime
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
,
AutoAlign
|
(
internal
::
traits
<
Derived
>::
Flags
&
RowMajorBit
?
RowMajor
:
ColMajor
),
internal
::
traits
<
Derived
>::
MaxRowsAtCompileTime
,
internal
::
traits
<
Derived
>::
MaxColsAtCompileTime
>
PlainMatrix
;
/** The plain array type corresponding to this expression.
* \sa PlainObject */
typedef
Array
<
typename
internal
::
traits
<
Derived
>::
Scalar
,
internal
::
traits
<
Derived
>::
RowsAtCompileTime
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
,
AutoAlign
|
(
internal
::
traits
<
Derived
>::
Flags
&
RowMajorBit
?
RowMajor
:
ColMajor
),
internal
::
traits
<
Derived
>::
MaxRowsAtCompileTime
,
internal
::
traits
<
Derived
>::
MaxColsAtCompileTime
>
PlainArray
;
/** \brief The plain matrix or array type corresponding to this expression.
*
* This is not necessarily exactly the return type of eval(). In the case of plain matrices,
* the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
* that the return type of eval() is either PlainObject or const PlainObject&.
*/
typedef
std
::
conditional_t
<
internal
::
is_same
<
typename
internal
::
traits
<
Derived
>::
XprKind
,
MatrixXpr
>::
value
,
PlainMatrix
,
PlainArray
>
PlainObject
;
/** \returns the outer size.
*
* \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
* column-major matrix, and the number of rows for a row-major matrix. */
EIGEN_DEVICE_FUNC
constexpr
Index
outerSize
()
const
{
return
IsVectorAtCompileTime
?
1
:
int
(
IsRowMajor
)
?
this
->
rows
()
:
this
->
cols
();
}
/** \returns the inner size.
*
* \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
* column-major matrix, and the number of columns for a row-major matrix. */
EIGEN_DEVICE_FUNC
constexpr
Index
innerSize
()
const
{
return
IsVectorAtCompileTime
?
this
->
size
()
:
int
(
IsRowMajor
)
?
this
->
cols
()
:
this
->
rows
();
}
/** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and
* does nothing else.
*/
EIGEN_DEVICE_FUNC
void
resize
(
Index
newSize
)
{
EIGEN_ONLY_USED_FOR_DEBUG
(
newSize
);
eigen_assert
(
newSize
==
this
->
size
()
&&
"DenseBase::resize() does not actually allow to resize."
);
}
/** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and
* does nothing else.
*/
EIGEN_DEVICE_FUNC
void
resize
(
Index
rows
,
Index
cols
)
{
EIGEN_ONLY_USED_FOR_DEBUG
(
rows
);
EIGEN_ONLY_USED_FOR_DEBUG
(
cols
);
eigen_assert
(
rows
==
this
->
rows
()
&&
cols
==
this
->
cols
()
&&
"DenseBase::resize() does not actually allow to resize."
);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef
CwiseNullaryOp
<
internal
::
scalar_constant_op
<
Scalar
>
,
PlainObject
>
ConstantReturnType
;
/** \internal Represents a matrix with all coefficients equal to zero*/
typedef
CwiseNullaryOp
<
internal
::
scalar_zero_op
<
Scalar
>
,
PlainObject
>
ZeroReturnType
;
/** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
EIGEN_DEPRECATED
typedef
CwiseNullaryOp
<
internal
::
linspaced_op
<
Scalar
>
,
PlainObject
>
SequentialLinSpacedReturnType
;
/** \internal Represents a vector with linearly spaced coefficients that allows random access. */
typedef
CwiseNullaryOp
<
internal
::
linspaced_op
<
Scalar
>
,
PlainObject
>
RandomAccessLinSpacedReturnType
;
/** \internal Represents a vector with equally spaced coefficients that allows random access. */
typedef
CwiseNullaryOp
<
internal
::
equalspaced_op
<
Scalar
>
,
PlainObject
>
RandomAccessEqualSpacedReturnType
;
/** \internal the return type of MatrixBase::eigenvalues() */
typedef
Matrix
<
typename
NumTraits
<
typename
internal
::
traits
<
Derived
>::
Scalar
>::
Real
,
internal
::
traits
<
Derived
>::
ColsAtCompileTime
,
1
>
EigenvaluesReturnType
;
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** Copies \a other into *this. \returns a reference to *this. */
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
DenseBase
<
OtherDerived
>&
other
);
/** Special case of the template operator=, in order to prevent the compiler
* from generating a default operator= (issue hit with g++ 4.1)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
DenseBase
&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
operator
=
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
operator
+=
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
operator
-=
(
const
EigenBase
<
OtherDerived
>&
other
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
Derived
&
operator
=
(
const
ReturnByValue
<
OtherDerived
>&
func
);
/** \internal
* Copies \a other into *this without evaluating other. \returns a reference to *this. */
template
<
typename
OtherDerived
>
/** \deprecated */
EIGEN_DEPRECATED
EIGEN_DEVICE_FUNC
Derived
&
lazyAssign
(
const
DenseBase
<
OtherDerived
>&
other
);
EIGEN_DEVICE_FUNC
CommaInitializer
<
Derived
>
operator
<<
(
const
Scalar
&
s
);
template
<
unsigned
int
Added
,
unsigned
int
Removed
>
/** \deprecated it now returns \c *this */
EIGEN_DEPRECATED
const
Derived
&
flagged
()
const
{
return
derived
();
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
CommaInitializer
<
Derived
>
operator
<<
(
const
DenseBase
<
OtherDerived
>&
other
);
typedef
Transpose
<
Derived
>
TransposeReturnType
;
EIGEN_DEVICE_FUNC
TransposeReturnType
transpose
();
typedef
Transpose
<
const
Derived
>
ConstTransposeReturnType
;
EIGEN_DEVICE_FUNC
const
ConstTransposeReturnType
transpose
()
const
;
EIGEN_DEVICE_FUNC
void
transposeInPlace
();
EIGEN_DEVICE_FUNC
static
const
ConstantReturnType
Constant
(
Index
rows
,
Index
cols
,
const
Scalar
&
value
);
EIGEN_DEVICE_FUNC
static
const
ConstantReturnType
Constant
(
Index
size
,
const
Scalar
&
value
);
EIGEN_DEVICE_FUNC
static
const
ConstantReturnType
Constant
(
const
Scalar
&
value
);
EIGEN_DEPRECATED
EIGEN_DEVICE_FUNC
static
const
RandomAccessLinSpacedReturnType
LinSpaced
(
Sequential_t
,
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
high
);
EIGEN_DEPRECATED
EIGEN_DEVICE_FUNC
static
const
RandomAccessLinSpacedReturnType
LinSpaced
(
Sequential_t
,
const
Scalar
&
low
,
const
Scalar
&
high
);
EIGEN_DEVICE_FUNC
static
const
RandomAccessLinSpacedReturnType
LinSpaced
(
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
high
);
EIGEN_DEVICE_FUNC
static
const
RandomAccessLinSpacedReturnType
LinSpaced
(
const
Scalar
&
low
,
const
Scalar
&
high
);
EIGEN_DEVICE_FUNC
static
const
RandomAccessEqualSpacedReturnType
EqualSpaced
(
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
step
);
EIGEN_DEVICE_FUNC
static
const
RandomAccessEqualSpacedReturnType
EqualSpaced
(
const
Scalar
&
low
,
const
Scalar
&
step
);
template
<
typename
CustomNullaryOp
>
EIGEN_DEVICE_FUNC
static
const
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
NullaryExpr
(
Index
rows
,
Index
cols
,
const
CustomNullaryOp
&
func
);
template
<
typename
CustomNullaryOp
>
EIGEN_DEVICE_FUNC
static
const
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
NullaryExpr
(
Index
size
,
const
CustomNullaryOp
&
func
);
template
<
typename
CustomNullaryOp
>
EIGEN_DEVICE_FUNC
static
const
CwiseNullaryOp
<
CustomNullaryOp
,
PlainObject
>
NullaryExpr
(
const
CustomNullaryOp
&
func
);
EIGEN_DEVICE_FUNC
static
const
ZeroReturnType
Zero
(
Index
rows
,
Index
cols
);
EIGEN_DEVICE_FUNC
static
const
ZeroReturnType
Zero
(
Index
size
);
EIGEN_DEVICE_FUNC
static
const
ZeroReturnType
Zero
();
EIGEN_DEVICE_FUNC
static
const
ConstantReturnType
Ones
(
Index
rows
,
Index
cols
);
EIGEN_DEVICE_FUNC
static
const
ConstantReturnType
Ones
(
Index
size
);
EIGEN_DEVICE_FUNC
static
const
ConstantReturnType
Ones
();
EIGEN_DEVICE_FUNC
void
fill
(
const
Scalar
&
value
);
EIGEN_DEVICE_FUNC
Derived
&
setConstant
(
const
Scalar
&
value
);
EIGEN_DEVICE_FUNC
Derived
&
setLinSpaced
(
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
high
);
EIGEN_DEVICE_FUNC
Derived
&
setLinSpaced
(
const
Scalar
&
low
,
const
Scalar
&
high
);
EIGEN_DEVICE_FUNC
Derived
&
setEqualSpaced
(
Index
size
,
const
Scalar
&
low
,
const
Scalar
&
step
);
EIGEN_DEVICE_FUNC
Derived
&
setEqualSpaced
(
const
Scalar
&
low
,
const
Scalar
&
step
);
EIGEN_DEVICE_FUNC
Derived
&
setZero
();
EIGEN_DEVICE_FUNC
Derived
&
setOnes
();
EIGEN_DEVICE_FUNC
Derived
&
setRandom
();
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
bool
isApprox
(
const
DenseBase
<
OtherDerived
>&
other
,
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
bool
isMuchSmallerThan
(
const
RealScalar
&
other
,
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
bool
isMuchSmallerThan
(
const
DenseBase
<
OtherDerived
>&
other
,
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
bool
isApproxToConstant
(
const
Scalar
&
value
,
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
bool
isConstant
(
const
Scalar
&
value
,
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
bool
isZero
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
bool
isOnes
(
const
RealScalar
&
prec
=
NumTraits
<
Scalar
>::
dummy_precision
())
const
;
EIGEN_DEVICE_FUNC
inline
bool
hasNaN
()
const
;
EIGEN_DEVICE_FUNC
inline
bool
allFinite
()
const
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
*=
(
const
Scalar
&
other
);
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
/=
(
const
Scalar
&
other
);
typedef
internal
::
add_const_on_value_type_t
<
typename
internal
::
eval
<
Derived
>::
type
>
EvalReturnType
;
/** \returns the matrix or vector obtained by evaluating this expression.
*
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
*
* \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page
* \endlink.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
EvalReturnType
eval
()
const
{
// Even though MSVC does not honor strong inlining when the return type
// is a dynamic matrix, we desperately need strong inlining for fixed
// size types on MSVC.
return
typename
internal
::
eval
<
Derived
>::
type
(
derived
());
}
/** swaps *this with the expression \a other.
*
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
swap
(
const
DenseBase
<
OtherDerived
>&
other
)
{
EIGEN_STATIC_ASSERT
(
!
OtherDerived
::
IsPlainObjectBase
,
THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY
);
eigen_assert
(
rows
()
==
other
.
rows
()
&&
cols
()
==
other
.
cols
());
call_assignment
(
derived
(),
other
.
const_cast_derived
(),
internal
::
swap_assign_op
<
Scalar
>
());
}
/** swaps *this with the matrix or array \a other.
*
*/
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
swap
(
PlainObjectBase
<
OtherDerived
>&
other
)
{
eigen_assert
(
rows
()
==
other
.
rows
()
&&
cols
()
==
other
.
cols
());
call_assignment
(
derived
(),
other
.
derived
(),
internal
::
swap_assign_op
<
Scalar
>
());
}
EIGEN_DEVICE_FUNC
inline
const
NestByValue
<
Derived
>
nestByValue
()
const
;
EIGEN_DEVICE_FUNC
inline
const
ForceAlignedAccess
<
Derived
>
forceAlignedAccess
()
const
;
EIGEN_DEVICE_FUNC
inline
ForceAlignedAccess
<
Derived
>
forceAlignedAccess
();
template
<
bool
Enable
>
EIGEN_DEVICE_FUNC
inline
const
std
::
conditional_t
<
Enable
,
ForceAlignedAccess
<
Derived
>
,
Derived
&>
forceAlignedAccessIf
()
const
;
template
<
bool
Enable
>
EIGEN_DEVICE_FUNC
inline
std
::
conditional_t
<
Enable
,
ForceAlignedAccess
<
Derived
>
,
Derived
&>
forceAlignedAccessIf
();
EIGEN_DEVICE_FUNC
Scalar
sum
()
const
;
EIGEN_DEVICE_FUNC
Scalar
mean
()
const
;
EIGEN_DEVICE_FUNC
Scalar
trace
()
const
;
EIGEN_DEVICE_FUNC
Scalar
prod
()
const
;
template
<
int
NaNPropagation
>
EIGEN_DEVICE_FUNC
typename
internal
::
traits
<
Derived
>::
Scalar
minCoeff
()
const
;
template
<
int
NaNPropagation
>
EIGEN_DEVICE_FUNC
typename
internal
::
traits
<
Derived
>::
Scalar
maxCoeff
()
const
;
// By default, the fastest version with undefined NaN propagation semantics is
// used.
// TODO(rmlarsen): Replace with default template argument when we move to
// c++11 or beyond.
EIGEN_DEVICE_FUNC
inline
typename
internal
::
traits
<
Derived
>::
Scalar
minCoeff
()
const
{
return
minCoeff
<
PropagateFast
>
();
}
EIGEN_DEVICE_FUNC
inline
typename
internal
::
traits
<
Derived
>::
Scalar
maxCoeff
()
const
{
return
maxCoeff
<
PropagateFast
>
();
}
template
<
int
NaNPropagation
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
typename
internal
::
traits
<
Derived
>::
Scalar
minCoeff
(
IndexType
*
row
,
IndexType
*
col
)
const
;
template
<
int
NaNPropagation
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
typename
internal
::
traits
<
Derived
>::
Scalar
maxCoeff
(
IndexType
*
row
,
IndexType
*
col
)
const
;
template
<
int
NaNPropagation
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
typename
internal
::
traits
<
Derived
>::
Scalar
minCoeff
(
IndexType
*
index
)
const
;
template
<
int
NaNPropagation
,
typename
IndexType
>
EIGEN_DEVICE_FUNC
typename
internal
::
traits
<
Derived
>::
Scalar
maxCoeff
(
IndexType
*
index
)
const
;
// TODO(rmlarsen): Replace these methods with a default template argument.
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
inline
typename
internal
::
traits
<
Derived
>::
Scalar
minCoeff
(
IndexType
*
row
,
IndexType
*
col
)
const
{
return
minCoeff
<
PropagateFast
>
(
row
,
col
);
}
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
inline
typename
internal
::
traits
<
Derived
>::
Scalar
maxCoeff
(
IndexType
*
row
,
IndexType
*
col
)
const
{
return
maxCoeff
<
PropagateFast
>
(
row
,
col
);
}
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
inline
typename
internal
::
traits
<
Derived
>::
Scalar
minCoeff
(
IndexType
*
index
)
const
{
return
minCoeff
<
PropagateFast
>
(
index
);
}
template
<
typename
IndexType
>
EIGEN_DEVICE_FUNC
inline
typename
internal
::
traits
<
Derived
>::
Scalar
maxCoeff
(
IndexType
*
index
)
const
{
return
maxCoeff
<
PropagateFast
>
(
index
);
}
template
<
typename
BinaryOp
>
EIGEN_DEVICE_FUNC
Scalar
redux
(
const
BinaryOp
&
func
)
const
;
template
<
typename
Visitor
>
EIGEN_DEVICE_FUNC
void
visit
(
Visitor
&
func
)
const
;
/** \returns a WithFormat proxy object allowing to print a matrix the with given
* format \a fmt.
*
* See class IOFormat for some examples.
*
* \sa class IOFormat, class WithFormat
*/
inline
const
WithFormat
<
Derived
>
format
(
const
IOFormat
&
fmt
)
const
{
return
WithFormat
<
Derived
>
(
derived
(),
fmt
);
}
/** \returns the unique coefficient of a 1x1 expression */
EIGEN_DEVICE_FUNC
CoeffReturnType
value
()
const
{
EIGEN_STATIC_ASSERT_SIZE_1x1
(
Derived
)
eigen_assert
(
this
->
rows
()
==
1
&&
this
->
cols
()
==
1
);
return
derived
().
coeff
(
0
,
0
);
}
EIGEN_DEVICE_FUNC
bool
all
()
const
;
EIGEN_DEVICE_FUNC
bool
any
()
const
;
EIGEN_DEVICE_FUNC
Index
count
()
const
;
typedef
VectorwiseOp
<
Derived
,
Horizontal
>
RowwiseReturnType
;
typedef
const
VectorwiseOp
<
const
Derived
,
Horizontal
>
ConstRowwiseReturnType
;
typedef
VectorwiseOp
<
Derived
,
Vertical
>
ColwiseReturnType
;
typedef
const
VectorwiseOp
<
const
Derived
,
Vertical
>
ConstColwiseReturnType
;
/** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions
*
* Example: \include MatrixBase_rowwise.cpp
* Output: \verbinclude MatrixBase_rowwise.out
*
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
// Code moved here due to a CUDA compiler bug
EIGEN_DEVICE_FUNC
inline
ConstRowwiseReturnType
rowwise
()
const
{
return
ConstRowwiseReturnType
(
derived
());
}
EIGEN_DEVICE_FUNC
RowwiseReturnType
rowwise
();
/** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions
*
* Example: \include MatrixBase_colwise.cpp
* Output: \verbinclude MatrixBase_colwise.out
*
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
EIGEN_DEVICE_FUNC
inline
ConstColwiseReturnType
colwise
()
const
{
return
ConstColwiseReturnType
(
derived
());
}
EIGEN_DEVICE_FUNC
ColwiseReturnType
colwise
();
typedef
CwiseNullaryOp
<
internal
::
scalar_random_op
<
Scalar
>
,
PlainObject
>
RandomReturnType
;
static
const
RandomReturnType
Random
(
Index
rows
,
Index
cols
);
static
const
RandomReturnType
Random
(
Index
size
);
static
const
RandomReturnType
Random
();
template
<
typename
ThenDerived
,
typename
ElseDerived
>
inline
EIGEN_DEVICE_FUNC
CwiseTernaryOp
<
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
ElseDerived
>::
Scalar
,
Scalar
>
,
ThenDerived
,
ElseDerived
,
Derived
>
select
(
const
DenseBase
<
ThenDerived
>&
thenMatrix
,
const
DenseBase
<
ElseDerived
>&
elseMatrix
)
const
;
template
<
typename
ThenDerived
>
inline
EIGEN_DEVICE_FUNC
CwiseTernaryOp
<
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ThenDerived
>::
Scalar
,
typename
DenseBase
<
ThenDerived
>::
Scalar
,
Scalar
>
,
ThenDerived
,
typename
DenseBase
<
ThenDerived
>::
ConstantReturnType
,
Derived
>
select
(
const
DenseBase
<
ThenDerived
>&
thenMatrix
,
const
typename
DenseBase
<
ThenDerived
>::
Scalar
&
elseScalar
)
const
;
template
<
typename
ElseDerived
>
inline
EIGEN_DEVICE_FUNC
CwiseTernaryOp
<
internal
::
scalar_boolean_select_op
<
typename
DenseBase
<
ElseDerived
>::
Scalar
,
typename
DenseBase
<
ElseDerived
>::
Scalar
,
Scalar
>
,
typename
DenseBase
<
ElseDerived
>::
ConstantReturnType
,
ElseDerived
,
Derived
>
select
(
const
typename
DenseBase
<
ElseDerived
>::
Scalar
&
thenScalar
,
const
DenseBase
<
ElseDerived
>&
elseMatrix
)
const
;
template
<
int
p
>
RealScalar
lpNorm
()
const
;
template
<
int
RowFactor
,
int
ColFactor
>
EIGEN_DEVICE_FUNC
const
Replicate
<
Derived
,
RowFactor
,
ColFactor
>
replicate
()
const
;
/**
* \return an expression of the replication of \c *this
*
* Example: \include MatrixBase_replicate_int_int.cpp
* Output: \verbinclude MatrixBase_replicate_int_int.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
*/
// Code moved here due to a CUDA compiler bug
EIGEN_DEVICE_FUNC
const
Replicate
<
Derived
,
Dynamic
,
Dynamic
>
replicate
(
Index
rowFactor
,
Index
colFactor
)
const
{
return
Replicate
<
Derived
,
Dynamic
,
Dynamic
>
(
derived
(),
rowFactor
,
colFactor
);
}
typedef
Reverse
<
Derived
,
BothDirections
>
ReverseReturnType
;
typedef
const
Reverse
<
const
Derived
,
BothDirections
>
ConstReverseReturnType
;
EIGEN_DEVICE_FUNC
ReverseReturnType
reverse
();
/** This is the const version of reverse(). */
// Code moved here due to a CUDA compiler bug
EIGEN_DEVICE_FUNC
ConstReverseReturnType
reverse
()
const
{
return
ConstReverseReturnType
(
derived
());
}
EIGEN_DEVICE_FUNC
void
reverseInPlace
();
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** STL-like <a href="https://en.cppreference.com/w/cpp/named_req/RandomAccessIterator">RandomAccessIterator</a>
* iterator type as returned by the begin() and end() methods.
*/
typedef
random_access_iterator_type
iterator
;
/** This is the const version of iterator (aka read-only) */
typedef
random_access_iterator_type
const_iterator
;
#else
typedef
std
::
conditional_t
<
(
Flags
&
DirectAccessBit
)
==
DirectAccessBit
,
internal
::
pointer_based_stl_iterator
<
Derived
>
,
internal
::
generic_randaccess_stl_iterator
<
Derived
>
>
iterator_type
;
typedef
std
::
conditional_t
<
(
Flags
&
DirectAccessBit
)
==
DirectAccessBit
,
internal
::
pointer_based_stl_iterator
<
const
Derived
>
,
internal
::
generic_randaccess_stl_iterator
<
const
Derived
>
>
const_iterator_type
;
// Stl-style iterators are supported only for vectors.
typedef
std
::
conditional_t
<
IsVectorAtCompileTime
,
iterator_type
,
void
>
iterator
;
typedef
std
::
conditional_t
<
IsVectorAtCompileTime
,
const_iterator_type
,
void
>
const_iterator
;
#endif
inline
iterator
begin
();
inline
const_iterator
begin
()
const
;
inline
const_iterator
cbegin
()
const
;
inline
iterator
end
();
inline
const_iterator
end
()
const
;
inline
const_iterator
cend
()
const
;
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
#define EIGEN_DOC_UNARY_ADDONS(X, Y)
#include "../plugins/CommonCwiseUnaryOps.inc"
#include "../plugins/BlockMethods.inc"
#include "../plugins/IndexedViewMethods.inc"
#include "../plugins/ReshapedMethods.inc"
#ifdef EIGEN_DENSEBASE_PLUGIN
#include EIGEN_DENSEBASE_PLUGIN
#endif
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
#undef EIGEN_DOC_UNARY_ADDONS
// disable the use of evalTo for dense objects with a nice compilation error
template
<
typename
Dest
>
EIGEN_DEVICE_FUNC
inline
void
evalTo
(
Dest
&
)
const
{
EIGEN_STATIC_ASSERT
((
internal
::
is_same
<
Dest
,
void
>::
value
),
THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS
);
}
protected:
EIGEN_DEFAULT_COPY_CONSTRUCTOR
(
DenseBase
)
/** Default constructor. Do nothing. */
#ifdef EIGEN_INTERNAL_DEBUGGING
EIGEN_DEVICE_FUNC
constexpr
DenseBase
()
{
/* Just checks for self-consistency of the flags.
* Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down
*/
EIGEN_STATIC_ASSERT
(
(
internal
::
check_implication
(
MaxRowsAtCompileTime
==
1
&&
MaxColsAtCompileTime
!=
1
,
int
(
IsRowMajor
))
&&
internal
::
check_implication
(
MaxColsAtCompileTime
==
1
&&
MaxRowsAtCompileTime
!=
1
,
int
(
!
IsRowMajor
))),
INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION
)
}
#else
EIGEN_DEVICE_FUNC
constexpr
DenseBase
()
=
default
;
#endif
private:
EIGEN_DEVICE_FUNC
explicit
DenseBase
(
int
);
EIGEN_DEVICE_FUNC
DenseBase
(
int
,
int
);
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
explicit
DenseBase
(
const
DenseBase
<
OtherDerived
>&
);
};
/** Free-function swap.
*/
template
<
typename
DerivedA
,
typename
DerivedB
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
// Use forwarding references to capture all combinations of cv-qualified l+r-value cases.
std
::
enable_if_t
<
std
::
is_base_of
<
DenseBase
<
std
::
decay_t
<
DerivedA
>>
,
std
::
decay_t
<
DerivedA
>>::
value
&&
std
::
is_base_of
<
DenseBase
<
std
::
decay_t
<
DerivedB
>>
,
std
::
decay_t
<
DerivedB
>>::
value
,
void
>
swap
(
DerivedA
&&
a
,
DerivedB
&&
b
)
{
a
.
swap
(
b
);
}
}
// end namespace Eigen
#endif // EIGEN_DENSEBASE_H
eigen-master/Eigen/src/Core/DenseCoeffsBase.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DENSECOEFFSBASE_H
#define EIGEN_DENSECOEFFSBASE_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
template
<
typename
T
>
struct
add_const_on_value_type_if_arithmetic
{
typedef
std
::
conditional_t
<
is_arithmetic
<
T
>::
value
,
T
,
add_const_on_value_type_t
<
T
>>
type
;
};
}
// namespace internal
/** \brief Base class providing read-only coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
*
* \note #ReadOnlyAccessors Constant indicating read-only access
*
* This class defines the \c operator() \c const function and friends, which can be used to read specific
* entries of a matrix or array.
*
* \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
* \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
DenseCoeffsBase
<
Derived
,
ReadOnlyAccessors
>
:
public
EigenBase
<
Derived
>
{
public:
typedef
typename
internal
::
traits
<
Derived
>::
StorageKind
StorageKind
;
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
typename
internal
::
packet_traits
<
Scalar
>::
type
PacketScalar
;
// Explanation for this CoeffReturnType typedef.
// - This is the return type of the coeff() method.
// - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
// to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
// - The is_arithmetic check is required since "const int", "const double", etc. will cause warnings on some systems
// while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
// not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
typedef
std
::
conditional_t
<
bool
(
internal
::
traits
<
Derived
>::
Flags
&
LvalueBit
),
const
Scalar
&
,
std
::
conditional_t
<
internal
::
is_arithmetic
<
Scalar
>::
value
,
Scalar
,
const
Scalar
>>
CoeffReturnType
;
typedef
typename
internal
::
add_const_on_value_type_if_arithmetic
<
typename
internal
::
packet_traits
<
Scalar
>::
type
>::
type
PacketReturnType
;
typedef
EigenBase
<
Derived
>
Base
;
using
Base
::
cols
;
using
Base
::
derived
;
using
Base
::
rows
;
using
Base
::
size
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
rowIndexByOuterInner
(
Index
outer
,
Index
inner
)
const
{
return
int
(
Derived
::
RowsAtCompileTime
)
==
1
?
0
:
int
(
Derived
::
ColsAtCompileTime
)
==
1
?
inner
:
int
(
Derived
::
Flags
)
&
RowMajorBit
?
outer
:
inner
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Index
colIndexByOuterInner
(
Index
outer
,
Index
inner
)
const
{
return
int
(
Derived
::
ColsAtCompileTime
)
==
1
?
0
:
int
(
Derived
::
RowsAtCompileTime
)
==
1
?
inner
:
int
(
Derived
::
Flags
)
&
RowMajorBit
?
inner
:
outer
;
}
/** Short version: don't use this function, use
* \link operator()(Index,Index) const \endlink instead.
*
* Long version: this function is similar to
* \link operator()(Index,Index) const \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator()(Index,Index) const \endlink.
*
* \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
coeff
(
Index
row
,
Index
col
)
const
{
eigen_internal_assert
(
row
>=
0
&&
row
<
rows
()
&&
col
>=
0
&&
col
<
cols
());
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeff
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
coeffByOuterInner
(
Index
outer
,
Index
inner
)
const
{
return
coeff
(
rowIndexByOuterInner
(
outer
,
inner
),
colIndexByOuterInner
(
outer
,
inner
));
}
/** \returns the coefficient at given the given row and column.
*
* \sa operator()(Index,Index), operator[](Index)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
operator
()(
Index
row
,
Index
col
)
const
{
eigen_assert
(
row
>=
0
&&
row
<
rows
()
&&
col
>=
0
&&
col
<
cols
());
return
coeff
(
row
,
col
);
}
/** Short version: don't use this function, use
* \link operator[](Index) const \endlink instead.
*
* Long version: this function is similar to
* \link operator[](Index) const \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameter \a index is in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator[](Index) const \endlink.
*
* \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
coeff
(
Index
index
)
const
{
EIGEN_STATIC_ASSERT
(
internal
::
evaluator
<
Derived
>::
Flags
&
LinearAccessBit
,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS
)
eigen_internal_assert
(
index
>=
0
&&
index
<
size
());
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeff
(
index
);
}
/** \returns the coefficient at given index.
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
* \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
* z() const, w() const
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
operator
[](
Index
index
)
const
{
EIGEN_STATIC_ASSERT
(
Derived
::
IsVectorAtCompileTime
,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD
)
eigen_assert
(
index
>=
0
&&
index
<
size
());
return
coeff
(
index
);
}
/** \returns the coefficient at given index.
*
* This is synonymous to operator[](Index) const.
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
* \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
* z() const, w() const
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
operator
()(
Index
index
)
const
{
eigen_assert
(
index
>=
0
&&
index
<
size
());
return
coeff
(
index
);
}
/** equivalent to operator[](0). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
x
()
const
{
return
(
*
this
)[
0
];
}
/** equivalent to operator[](1). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
y
()
const
{
EIGEN_STATIC_ASSERT
(
Derived
::
SizeAtCompileTime
==
-
1
||
Derived
::
SizeAtCompileTime
>=
2
,
OUT_OF_RANGE_ACCESS
);
return
(
*
this
)[
1
];
}
/** equivalent to operator[](2). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
z
()
const
{
EIGEN_STATIC_ASSERT
(
Derived
::
SizeAtCompileTime
==
-
1
||
Derived
::
SizeAtCompileTime
>=
3
,
OUT_OF_RANGE_ACCESS
);
return
(
*
this
)[
2
];
}
/** equivalent to operator[](3). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
CoeffReturnType
w
()
const
{
EIGEN_STATIC_ASSERT
(
Derived
::
SizeAtCompileTime
==
-
1
||
Derived
::
SizeAtCompileTime
>=
4
,
OUT_OF_RANGE_ACCESS
);
return
(
*
this
)[
3
];
}
/** \internal
* \returns the packet of coefficients starting at the given row and column. It is your responsibility
* to ensure that a packet really starts there. This method is only available on expressions having the
* PacketAccessBit.
*
* The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
* the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
* starting at an address which is a multiple of the packet size.
*/
template
<
int
LoadMode
>
EIGEN_STRONG_INLINE
PacketReturnType
packet
(
Index
row
,
Index
col
)
const
{
typedef
typename
internal
::
packet_traits
<
Scalar
>::
type
DefaultPacketType
;
eigen_internal_assert
(
row
>=
0
&&
row
<
rows
()
&&
col
>=
0
&&
col
<
cols
());
return
internal
::
evaluator
<
Derived
>
(
derived
()).
template
packet
<
LoadMode
,
DefaultPacketType
>(
row
,
col
);
}
/** \internal */
template
<
int
LoadMode
>
EIGEN_STRONG_INLINE
PacketReturnType
packetByOuterInner
(
Index
outer
,
Index
inner
)
const
{
return
packet
<
LoadMode
>
(
rowIndexByOuterInner
(
outer
,
inner
),
colIndexByOuterInner
(
outer
,
inner
));
}
/** \internal
* \returns the packet of coefficients starting at the given index. It is your responsibility
* to ensure that a packet really starts there. This method is only available on expressions having the
* PacketAccessBit and the LinearAccessBit.
*
* The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
* the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
* starting at an address which is a multiple of the packet size.
*/
template
<
int
LoadMode
>
EIGEN_STRONG_INLINE
PacketReturnType
packet
(
Index
index
)
const
{
EIGEN_STATIC_ASSERT
(
internal
::
evaluator
<
Derived
>::
Flags
&
LinearAccessBit
,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS
)
typedef
typename
internal
::
packet_traits
<
Scalar
>::
type
DefaultPacketType
;
eigen_internal_assert
(
index
>=
0
&&
index
<
size
());
return
internal
::
evaluator
<
Derived
>
(
derived
()).
template
packet
<
LoadMode
,
DefaultPacketType
>(
index
);
}
protected:
// explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
// But some methods are only available in the DirectAccess case.
// So we add dummy methods here with these names, so that "using... " doesn't fail.
// It's not private so that the child class DenseBase can access them, and it's not public
// either since it's an implementation detail, so has to be protected.
void
coeffRef
();
void
coeffRefByOuterInner
();
void
writePacket
();
void
writePacketByOuterInner
();
void
copyCoeff
();
void
copyCoeffByOuterInner
();
void
copyPacket
();
void
copyPacketByOuterInner
();
void
stride
();
void
innerStride
();
void
outerStride
();
void
rowStride
();
void
colStride
();
};
/** \brief Base class providing read/write coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
*
* \note #WriteAccessors Constant indicating read/write access
*
* This class defines the non-const \c operator() function and friends, which can be used to write specific
* entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
* defines the const variant for reading specific entries.
*
* \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
DenseCoeffsBase
<
Derived
,
WriteAccessors
>
:
public
DenseCoeffsBase
<
Derived
,
ReadOnlyAccessors
>
{
public:
typedef
DenseCoeffsBase
<
Derived
,
ReadOnlyAccessors
>
Base
;
typedef
typename
internal
::
traits
<
Derived
>::
StorageKind
StorageKind
;
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
typename
internal
::
packet_traits
<
Scalar
>::
type
PacketScalar
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
using
Base
::
coeff
;
using
Base
::
colIndexByOuterInner
;
using
Base
::
cols
;
using
Base
::
derived
;
using
Base
::
rowIndexByOuterInner
;
using
Base
::
rows
;
using
Base
::
size
;
using
Base
::
operator
[];
using
Base
::
operator
();
using
Base
::
w
;
using
Base
::
x
;
using
Base
::
y
;
using
Base
::
z
;
/** Short version: don't use this function, use
* \link operator()(Index,Index) \endlink instead.
*
* Long version: this function is similar to
* \link operator()(Index,Index) \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator()(Index,Index) \endlink.
*
* \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
coeffRef
(
Index
row
,
Index
col
)
{
eigen_internal_assert
(
row
>=
0
&&
row
<
rows
()
&&
col
>=
0
&&
col
<
cols
());
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeffRef
(
row
,
col
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Scalar
&
coeffRefByOuterInner
(
Index
outer
,
Index
inner
)
{
return
coeffRef
(
rowIndexByOuterInner
(
outer
,
inner
),
colIndexByOuterInner
(
outer
,
inner
));
}
/** \returns a reference to the coefficient at given the given row and column.
*
* \sa operator[](Index)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
operator
()(
Index
row
,
Index
col
)
{
eigen_assert
(
row
>=
0
&&
row
<
rows
()
&&
col
>=
0
&&
col
<
cols
());
return
coeffRef
(
row
,
col
);
}
/** Short version: don't use this function, use
* \link operator[](Index) \endlink instead.
*
* Long version: this function is similar to
* \link operator[](Index) \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator[](Index) \endlink.
*
* \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
coeffRef
(
Index
index
)
{
EIGEN_STATIC_ASSERT
(
internal
::
evaluator
<
Derived
>::
Flags
&
LinearAccessBit
,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS
)
eigen_internal_assert
(
index
>=
0
&&
index
<
size
());
return
internal
::
evaluator
<
Derived
>
(
derived
()).
coeffRef
(
index
);
}
/** \returns a reference to the coefficient at given index.
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
* \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
operator
[](
Index
index
)
{
EIGEN_STATIC_ASSERT
(
Derived
::
IsVectorAtCompileTime
,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD
)
eigen_assert
(
index
>=
0
&&
index
<
size
());
return
coeffRef
(
index
);
}
/** \returns a reference to the coefficient at given index.
*
* This is synonymous to operator[](Index).
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
* \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
operator
()(
Index
index
)
{
eigen_assert
(
index
>=
0
&&
index
<
size
());
return
coeffRef
(
index
);
}
/** equivalent to operator[](0). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
x
()
{
return
(
*
this
)[
0
];
}
/** equivalent to operator[](1). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
y
()
{
EIGEN_STATIC_ASSERT
(
Derived
::
SizeAtCompileTime
==
-
1
||
Derived
::
SizeAtCompileTime
>=
2
,
OUT_OF_RANGE_ACCESS
);
return
(
*
this
)[
1
];
}
/** equivalent to operator[](2). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
z
()
{
EIGEN_STATIC_ASSERT
(
Derived
::
SizeAtCompileTime
==
-
1
||
Derived
::
SizeAtCompileTime
>=
3
,
OUT_OF_RANGE_ACCESS
);
return
(
*
this
)[
2
];
}
/** equivalent to operator[](3). */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Scalar
&
w
()
{
EIGEN_STATIC_ASSERT
(
Derived
::
SizeAtCompileTime
==
-
1
||
Derived
::
SizeAtCompileTime
>=
4
,
OUT_OF_RANGE_ACCESS
);
return
(
*
this
)[
3
];
}
};
/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
*
* \note #DirectAccessors Constant indicating direct access
*
* This class defines functions to work with strides which can be used to access entries directly. This class
* inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
* \c operator() .
*
* \sa \blank \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
DenseCoeffsBase
<
Derived
,
DirectAccessors
>
:
public
DenseCoeffsBase
<
Derived
,
ReadOnlyAccessors
>
{
public:
typedef
DenseCoeffsBase
<
Derived
,
ReadOnlyAccessors
>
Base
;
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
using
Base
::
cols
;
using
Base
::
derived
;
using
Base
::
rows
;
using
Base
::
size
;
/** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
*
* \sa outerStride(), rowStride(), colStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
{
return
derived
().
innerStride
();
}
/** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
* in a column-major matrix).
*
* \sa innerStride(), rowStride(), colStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
{
return
derived
().
outerStride
();
}
// FIXME shall we remove it ?
constexpr
Index
stride
()
const
{
return
Derived
::
IsVectorAtCompileTime
?
innerStride
()
:
outerStride
();
}
/** \returns the pointer increment between two consecutive rows.
*
* \sa innerStride(), outerStride(), colStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
rowStride
()
const
{
return
Derived
::
IsRowMajor
?
outerStride
()
:
innerStride
();
}
/** \returns the pointer increment between two consecutive columns.
*
* \sa innerStride(), outerStride(), rowStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
colStride
()
const
{
return
Derived
::
IsRowMajor
?
innerStride
()
:
outerStride
();
}
};
/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
*
* \note #DirectWriteAccessors Constant indicating direct access
*
* This class defines functions to work with strides which can be used to access entries directly. This class
* inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
* \c operator().
*
* \sa \blank \ref TopicClassHierarchy
*/
template
<
typename
Derived
>
class
DenseCoeffsBase
<
Derived
,
DirectWriteAccessors
>
:
public
DenseCoeffsBase
<
Derived
,
WriteAccessors
>
{
public:
typedef
DenseCoeffsBase
<
Derived
,
WriteAccessors
>
Base
;
typedef
typename
internal
::
traits
<
Derived
>::
Scalar
Scalar
;
typedef
typename
NumTraits
<
Scalar
>::
Real
RealScalar
;
using
Base
::
cols
;
using
Base
::
derived
;
using
Base
::
rows
;
using
Base
::
size
;
/** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
*
* \sa outerStride(), rowStride(), colStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
noexcept
{
return
derived
().
innerStride
();
}
/** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
* in a column-major matrix).
*
* \sa innerStride(), rowStride(), colStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
noexcept
{
return
derived
().
outerStride
();
}
// FIXME shall we remove it ?
constexpr
Index
stride
()
const
noexcept
{
return
Derived
::
IsVectorAtCompileTime
?
innerStride
()
:
outerStride
();
}
/** \returns the pointer increment between two consecutive rows.
*
* \sa innerStride(), outerStride(), colStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
rowStride
()
const
noexcept
{
return
Derived
::
IsRowMajor
?
outerStride
()
:
innerStride
();
}
/** \returns the pointer increment between two consecutive columns.
*
* \sa innerStride(), outerStride(), rowStride()
*/
EIGEN_DEVICE_FUNC
constexpr
Index
colStride
()
const
noexcept
{
return
Derived
::
IsRowMajor
?
innerStride
()
:
outerStride
();
}
};
namespace
internal
{
template
<
int
Alignment
,
typename
Derived
,
bool
JustReturnZero
>
struct
first_aligned_impl
{
static
constexpr
Index
run
(
const
Derived
&
)
noexcept
{
return
0
;
}
};
template
<
int
Alignment
,
typename
Derived
>
struct
first_aligned_impl
<
Alignment
,
Derived
,
false
>
{
static
inline
Index
run
(
const
Derived
&
m
)
{
return
internal
::
first_aligned
<
Alignment
>
(
m
.
data
(),
m
.
size
());
}
};
/** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect
* to \a Alignment for vectorization.
*
* \tparam Alignment requested alignment in Bytes.
*
* There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
* documentation.
*/
template
<
int
Alignment
,
typename
Derived
>
static
inline
Index
first_aligned
(
const
DenseBase
<
Derived
>&
m
)
{
enum
{
ReturnZero
=
(
int
(
evaluator
<
Derived
>::
Alignment
)
>=
Alignment
)
||
!
(
Derived
::
Flags
&
DirectAccessBit
)
};
return
first_aligned_impl
<
Alignment
,
Derived
,
ReturnZero
>::
run
(
m
.
derived
());
}
template
<
typename
Derived
>
static
inline
Index
first_default_aligned
(
const
DenseBase
<
Derived
>&
m
)
{
typedef
typename
Derived
::
Scalar
Scalar
;
typedef
typename
packet_traits
<
Scalar
>::
type
DefaultPacketType
;
return
internal
::
first_aligned
<
int
(
unpacket_traits
<
DefaultPacketType
>::
alignment
),
Derived
>
(
m
);
}
template
<
typename
Derived
,
bool
HasDirectAccess
=
has_direct_access
<
Derived
>
::
ret
>
struct
inner_stride_at_compile_time
{
enum
{
ret
=
traits
<
Derived
>::
InnerStrideAtCompileTime
};
};
template
<
typename
Derived
>
struct
inner_stride_at_compile_time
<
Derived
,
false
>
{
enum
{
ret
=
0
};
};
template
<
typename
Derived
,
bool
HasDirectAccess
=
has_direct_access
<
Derived
>
::
ret
>
struct
outer_stride_at_compile_time
{
enum
{
ret
=
traits
<
Derived
>::
OuterStrideAtCompileTime
};
};
template
<
typename
Derived
>
struct
outer_stride_at_compile_time
<
Derived
,
false
>
{
enum
{
ret
=
0
};
};
}
// end namespace internal
}
// end namespace Eigen
#endif // EIGEN_DENSECOEFFSBASE_H
eigen-master/Eigen/src/Core/DenseStorage.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIXSTORAGE_H
#define EIGEN_MATRIXSTORAGE_H
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) \
X; \
EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
#else
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X)
#endif
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
namespace
internal
{
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment)
#else
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment) \
eigen_assert((is_constant_evaluated() || (std::uintptr_t(array) % Alignment == 0)) && \
"this assertion is explained here: " \
"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
" **** READ THIS WEB PAGE !!! ****");
#endif
#if EIGEN_STACK_ALLOCATION_LIMIT
#define EIGEN_MAKE_STACK_ALLOCATION_ASSERT(X) \
EIGEN_STATIC_ASSERT(X <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG)
#else
#define EIGEN_MAKE_STACK_ALLOCATION_ASSERT(X)
#endif
/** \internal
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
* to 16 bytes boundary if the total size is a multiple of 16 bytes.
*/
template
<
typename
T
,
int
Size
,
int
MatrixOrArrayOptions
,
int
Alignment
=
(
MatrixOrArrayOptions
&
DontAlign
)
?
0
:
compute_default_alignment
<
T
,
Size
>
::
value
>
struct
plain_array
{
EIGEN_ALIGN_TO_BOUNDARY
(
Alignment
)
T
array
[
Size
];
#if defined(EIGEN_NO_DEBUG) || defined(EIGEN_TESTING_PLAINOBJECT_CTOR)
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plain_array
()
=
default
;
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plain_array
()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT
(
Alignment
)
EIGEN_MAKE_STACK_ALLOCATION_ASSERT
(
Size
*
sizeof
(
T
))
}
#endif
};
template
<
typename
T
,
int
Size
,
int
MatrixOrArrayOptions
>
struct
plain_array
<
T
,
Size
,
MatrixOrArrayOptions
,
0
>
{
T
array
[
Size
];
#if defined(EIGEN_NO_DEBUG) || defined(EIGEN_TESTING_PLAINOBJECT_CTOR)
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plain_array
()
=
default
;
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plain_array
()
{
EIGEN_MAKE_STACK_ALLOCATION_ASSERT
(
Size
*
sizeof
(
T
))
}
#endif
};
template
<
typename
T
,
int
MatrixOrArrayOptions
,
int
Alignment
>
struct
plain_array
<
T
,
0
,
MatrixOrArrayOptions
,
Alignment
>
{
T
array
[
1
];
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
plain_array
()
=
default
;
};
template
<
typename
T
,
int
Size
,
int
Options
,
int
Alignment
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap_plain_array
(
plain_array
<
T
,
Size
,
Options
,
Alignment
>&
a
,
plain_array
<
T
,
Size
,
Options
,
Alignment
>&
b
,
Index
a_size
,
Index
b_size
)
{
Index
common_size
=
numext
::
mini
(
a_size
,
b_size
);
std
::
swap_ranges
(
a
.
array
,
a
.
array
+
common_size
,
b
.
array
);
if
(
a_size
>
b_size
)
smart_copy
(
a
.
array
+
common_size
,
a
.
array
+
a_size
,
b
.
array
+
common_size
);
else
if
(
b_size
>
a_size
)
smart_copy
(
b
.
array
+
common_size
,
b
.
array
+
b_size
,
a
.
array
+
common_size
);
}
template
<
typename
T
,
int
Size
,
int
Rows
,
int
Cols
,
int
Options
>
class
DenseStorage_impl
{
plain_array
<
T
,
Size
,
Options
>
m_data
;
public:
#ifndef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
)
=
default
;
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
Size
)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
Size
)
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
Size
,
m_data
.
array
);
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
/*size*/
,
Index
/*rows*/
,
Index
/*cols*/
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
{
numext
::
swap
(
m_data
,
other
.
m_data
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
/*cols*/
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
/*cols*/
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
Rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
Rows
*
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
.
array
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
.
array
;
}
};
template
<
typename
T
,
int
Size
,
int
Cols
,
int
Options
>
class
DenseStorage_impl
<
T
,
Size
,
Dynamic
,
Cols
,
Options
>
{
plain_array
<
T
,
Size
,
Options
>
m_data
;
Index
m_rows
=
0
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
:
m_rows
(
other
.
m_rows
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
other
.
size
())
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
other
.
size
(),
m_data
.
array
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
size
,
Index
rows
,
Index
/*cols*/
)
:
m_rows
(
rows
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
EIGEN_UNUSED_VARIABLE
(
size
)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
other
)
{
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
other
.
size
(),
m_data
.
array
);
m_rows
=
other
.
m_rows
;
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
{
swap_plain_array
(
m_data
,
other
.
m_data
,
size
(),
other
.
size
());
numext
::
swap
(
m_rows
,
other
.
m_rows
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
rows
,
Index
/*cols*/
)
{
m_rows
=
rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
rows
,
Index
/*cols*/
)
{
m_rows
=
rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
m_rows
*
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
.
array
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
.
array
;
}
};
template
<
typename
T
,
int
Size
,
int
Rows
,
int
Options
>
class
DenseStorage_impl
<
T
,
Size
,
Rows
,
Dynamic
,
Options
>
{
plain_array
<
T
,
Size
,
Options
>
m_data
;
Index
m_cols
=
0
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
:
m_cols
(
other
.
m_cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
other
.
size
())
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
other
.
size
(),
m_data
.
array
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
size
,
Index
/*rows*/
,
Index
cols
)
:
m_cols
(
cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
EIGEN_UNUSED_VARIABLE
(
size
)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
other
)
{
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
other
.
size
(),
m_data
.
array
);
m_cols
=
other
.
m_cols
;
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
{
swap_plain_array
(
m_data
,
other
.
m_data
,
size
(),
other
.
size
());
numext
::
swap
(
m_cols
,
other
.
m_cols
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
cols
)
{
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
cols
)
{
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
Rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
Rows
*
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
.
array
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
.
array
;
}
};
template
<
typename
T
,
int
Size
,
int
Options
>
class
DenseStorage_impl
<
T
,
Size
,
Dynamic
,
Dynamic
,
Options
>
{
plain_array
<
T
,
Size
,
Options
>
m_data
;
Index
m_rows
=
0
;
Index
m_cols
=
0
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
:
m_rows
(
other
.
m_rows
),
m_cols
(
other
.
m_cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
other
.
size
())
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
other
.
size
(),
m_data
.
array
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
size
,
Index
rows
,
Index
cols
)
:
m_rows
(
rows
),
m_cols
(
cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
EIGEN_UNUSED_VARIABLE
(
size
)
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
other
)
{
smart_copy
(
other
.
m_data
.
array
,
other
.
m_data
.
array
+
other
.
size
(),
m_data
.
array
);
m_rows
=
other
.
m_rows
;
m_cols
=
other
.
m_cols
;
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
{
swap_plain_array
(
m_data
,
other
.
m_data
,
size
(),
other
.
size
());
numext
::
swap
(
m_rows
,
other
.
m_rows
);
numext
::
swap
(
m_cols
,
other
.
m_cols
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
rows
,
Index
cols
)
{
m_rows
=
rows
;
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
rows
,
Index
cols
)
{
m_rows
=
rows
;
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
m_rows
*
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
.
array
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
.
array
;
}
};
// null matrix variants
template
<
typename
T
,
int
Rows
,
int
Cols
,
int
Options
>
class
DenseStorage_impl
<
T
,
0
,
Rows
,
Cols
,
Options
>
{
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
/*size*/
,
Index
/*rows*/
,
Index
/*cols*/
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
/*cols*/
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
/*cols*/
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
Rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
Rows
*
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
nullptr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
nullptr
;
}
};
template
<
typename
T
,
int
Cols
,
int
Options
>
class
DenseStorage_impl
<
T
,
0
,
Dynamic
,
Cols
,
Options
>
{
Index
m_rows
=
0
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
/*size*/
,
Index
rows
,
Index
/*cols*/
)
:
m_rows
(
rows
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
noexcept
{
numext
::
swap
(
m_rows
,
other
.
m_rows
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
rows
,
Index
/*cols*/
)
{
m_rows
=
rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
rows
,
Index
/*cols*/
)
{
m_rows
=
rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
m_rows
*
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
nullptr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
nullptr
;
}
};
template
<
typename
T
,
int
Rows
,
int
Options
>
class
DenseStorage_impl
<
T
,
0
,
Rows
,
Dynamic
,
Options
>
{
Index
m_cols
=
0
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
/*size*/
,
Index
/*rows*/
,
Index
cols
)
:
m_cols
(
cols
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
noexcept
{
numext
::
swap
(
m_cols
,
other
.
m_cols
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
cols
)
{
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
/*rows*/
,
Index
cols
)
{
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
Rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
Rows
*
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
nullptr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
nullptr
;
}
};
template
<
typename
T
,
int
Options
>
class
DenseStorage_impl
<
T
,
0
,
Dynamic
,
Dynamic
,
Options
>
{
Index
m_rows
=
0
;
Index
m_cols
=
0
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
/*size*/
,
Index
rows
,
Index
cols
)
:
m_rows
(
rows
),
m_cols
(
cols
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
noexcept
{
numext
::
swap
(
m_rows
,
other
.
m_rows
);
numext
::
swap
(
m_cols
,
other
.
m_cols
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
/*size*/
,
Index
rows
,
Index
cols
)
{
m_rows
=
rows
;
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
/*size*/
,
Index
rows
,
Index
cols
)
{
m_rows
=
rows
;
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
m_rows
*
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
nullptr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
nullptr
;
}
};
// fixed-size matrix with dynamic memory allocation not currently supported
template
<
typename
T
,
int
Rows
,
int
Cols
,
int
Options
>
class
DenseStorage_impl
<
T
,
Dynamic
,
Rows
,
Cols
,
Options
>
{};
// dynamic-sized variants
template
<
typename
T
,
int
Cols
,
int
Options
>
class
DenseStorage_impl
<
T
,
Dynamic
,
Dynamic
,
Cols
,
Options
>
{
static
constexpr
bool
Align
=
(
Options
&
DontAlign
)
==
0
;
T
*
m_data
=
nullptr
;
Index
m_rows
=
0
;
public:
static
constexpr
int
Size
=
Dynamic
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
:
m_data
(
conditional_aligned_new_auto
<
T
,
Align
>
(
other
.
size
())),
m_rows
(
other
.
m_rows
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
other
.
size
())
smart_copy
(
other
.
m_data
,
other
.
m_data
+
other
.
size
(),
m_data
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
size
,
Index
rows
,
Index
/*cols*/
)
:
m_data
(
conditional_aligned_new_auto
<
T
,
Align
>
(
size
)),
m_rows
(
rows
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
DenseStorage_impl
&&
other
)
noexcept
:
m_data
(
other
.
m_data
),
m_rows
(
other
.
m_rows
)
{
other
.
m_data
=
nullptr
;
other
.
m_rows
=
0
;
}
EIGEN_DEVICE_FUNC
~
DenseStorage_impl
()
{
conditional_aligned_delete_auto
<
T
,
Align
>
(
m_data
,
size
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
other
)
{
resize
(
other
.
size
(),
other
.
rows
(),
other
.
cols
());
smart_copy
(
other
.
m_data
,
other
.
m_data
+
other
.
size
(),
m_data
);
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
DenseStorage_impl
&&
other
)
noexcept
{
this
->
swap
(
other
);
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
noexcept
{
numext
::
swap
(
m_data
,
other
.
m_data
);
numext
::
swap
(
m_rows
,
other
.
m_rows
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
size
,
Index
rows
,
Index
/*cols*/
)
{
m_data
=
conditional_aligned_realloc_new_auto
<
T
,
Align
>
(
m_data
,
size
,
this
->
size
());
m_rows
=
rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
size
,
Index
rows
,
Index
/*cols*/
)
{
Index
oldSize
=
this
->
size
();
if
(
oldSize
!=
size
)
{
conditional_aligned_delete_auto
<
T
,
Align
>
(
m_data
,
oldSize
);
m_data
=
conditional_aligned_new_auto
<
T
,
Align
>
(
size
);
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
}
m_rows
=
rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
m_rows
*
Cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
;
}
};
template
<
typename
T
,
int
Rows
,
int
Options
>
class
DenseStorage_impl
<
T
,
Dynamic
,
Rows
,
Dynamic
,
Options
>
{
static
constexpr
bool
Align
=
(
Options
&
DontAlign
)
==
0
;
T
*
m_data
=
nullptr
;
Index
m_cols
=
0
;
public:
static
constexpr
int
Size
=
Dynamic
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
:
m_data
(
conditional_aligned_new_auto
<
T
,
Align
>
(
other
.
size
())),
m_cols
(
other
.
m_cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
other
.
size
())
smart_copy
(
other
.
m_data
,
other
.
m_data
+
other
.
size
(),
m_data
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
size
,
Index
/*rows*/
,
Index
cols
)
:
m_data
(
conditional_aligned_new_auto
<
T
,
Align
>
(
size
)),
m_cols
(
cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
DenseStorage_impl
&&
other
)
noexcept
:
m_data
(
other
.
m_data
),
m_cols
(
other
.
m_cols
)
{
other
.
m_data
=
nullptr
;
other
.
m_cols
=
0
;
}
EIGEN_DEVICE_FUNC
~
DenseStorage_impl
()
{
conditional_aligned_delete_auto
<
T
,
Align
>
(
m_data
,
size
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
other
)
{
resize
(
other
.
size
(),
other
.
rows
(),
other
.
cols
());
smart_copy
(
other
.
m_data
,
other
.
m_data
+
other
.
size
(),
m_data
);
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
DenseStorage_impl
&&
other
)
noexcept
{
this
->
swap
(
other
);
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
noexcept
{
numext
::
swap
(
m_data
,
other
.
m_data
);
numext
::
swap
(
m_cols
,
other
.
m_cols
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
size
,
Index
/*rows*/
,
Index
cols
)
{
m_data
=
conditional_aligned_realloc_new_auto
<
T
,
Align
>
(
m_data
,
size
,
this
->
size
());
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
size
,
Index
/*rows*/
,
Index
cols
)
{
Index
oldSize
=
this
->
size
();
if
(
oldSize
!=
size
)
{
conditional_aligned_delete_auto
<
T
,
Align
>
(
m_data
,
oldSize
);
m_data
=
conditional_aligned_new_auto
<
T
,
Align
>
(
size
);
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
}
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
Rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
Rows
*
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
;
}
};
template
<
typename
T
,
int
Options
>
class
DenseStorage_impl
<
T
,
Dynamic
,
Dynamic
,
Dynamic
,
Options
>
{
static
constexpr
bool
Align
=
(
Options
&
DontAlign
)
==
0
;
T
*
m_data
=
nullptr
;
Index
m_rows
=
0
;
Index
m_cols
=
0
;
public:
static
constexpr
int
Size
=
Dynamic
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
const
DenseStorage_impl
&
other
)
:
m_data
(
conditional_aligned_new_auto
<
T
,
Align
>
(
other
.
size
())),
m_rows
(
other
.
m_rows
),
m_cols
(
other
.
m_cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
(
Index
size
=
other
.
size
())
smart_copy
(
other
.
m_data
,
other
.
m_data
+
other
.
size
(),
m_data
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
Index
size
,
Index
rows
,
Index
cols
)
:
m_data
(
conditional_aligned_new_auto
<
T
,
Align
>
(
size
)),
m_rows
(
rows
),
m_cols
(
cols
)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
(
DenseStorage_impl
&&
other
)
noexcept
:
m_data
(
other
.
m_data
),
m_rows
(
other
.
m_rows
),
m_cols
(
other
.
m_cols
)
{
other
.
m_data
=
nullptr
;
other
.
m_rows
=
0
;
other
.
m_cols
=
0
;
}
EIGEN_DEVICE_FUNC
~
DenseStorage_impl
()
{
conditional_aligned_delete_auto
<
T
,
Align
>
(
m_data
,
size
());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
const
DenseStorage_impl
&
other
)
{
resize
(
other
.
size
(),
other
.
rows
(),
other
.
cols
());
smart_copy
(
other
.
m_data
,
other
.
m_data
+
other
.
size
(),
m_data
);
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage_impl
&
operator
=
(
DenseStorage_impl
&&
other
)
noexcept
{
this
->
swap
(
other
);
return
*
this
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
swap
(
DenseStorage_impl
&
other
)
noexcept
{
numext
::
swap
(
m_data
,
other
.
m_data
);
numext
::
swap
(
m_rows
,
other
.
m_rows
);
numext
::
swap
(
m_cols
,
other
.
m_cols
);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
conservativeResize
(
Index
size
,
Index
rows
,
Index
cols
)
{
m_data
=
conditional_aligned_realloc_new_auto
<
T
,
Align
>
(
m_data
,
size
,
this
->
size
());
m_rows
=
rows
;
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
resize
(
Index
size
,
Index
rows
,
Index
cols
)
{
Index
oldSize
=
this
->
size
();
if
(
oldSize
!=
size
)
{
conditional_aligned_delete_auto
<
T
,
Align
>
(
m_data
,
oldSize
);
m_data
=
conditional_aligned_new_auto
<
T
,
Align
>
(
size
);
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
({})
}
m_rows
=
rows
;
m_cols
=
cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rows
()
const
{
return
m_rows
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
cols
()
const
{
return
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
size
()
const
{
return
m_rows
*
m_cols
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
T
*
data
()
{
return
m_data
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
const
T
*
data
()
const
{
return
m_data
;
}
};
template
<
typename
T
,
int
Size
,
int
Rows
,
int
Cols
>
struct
use_default_move
{
static
constexpr
bool
DynamicObject
=
Size
==
Dynamic
;
static
constexpr
bool
TrivialObject
=
(
!
NumTraits
<
T
>::
RequireInitialization
)
&&
(
Rows
>=
0
)
&&
(
Cols
>=
0
)
&&
(
Size
==
Rows
*
Cols
);
static
constexpr
bool
value
=
DynamicObject
||
TrivialObject
;
};
}
// end namespace internal
/** \internal
*
* \class DenseStorage_impl
* \ingroup Core_Module
*
* \brief Stores the data of a matrix
*
* This class stores the data of fixed-size, dynamic-size or mixed matrices
* in a way as compact as possible.
*
* \sa Matrix
*/
template
<
typename
T
,
int
Size
,
int
Rows
,
int
Cols
,
int
Options
,
bool
Trivial
=
internal
::
use_default_move
<
T
,
Size
,
Rows
,
Cols
>
::
value
>
class
DenseStorage
:
public
internal
::
DenseStorage_impl
<
T
,
Size
,
Rows
,
Cols
,
Options
>
{
using
Base
=
internal
::
DenseStorage_impl
<
T
,
Size
,
Rows
,
Cols
,
Options
>
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
(
const
DenseStorage
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
(
Index
size
,
Index
rows
,
Index
cols
)
:
Base
(
size
,
rows
,
cols
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
&
operator
=
(
const
DenseStorage
&
)
=
default
;
// if DenseStorage meets the requirements of use_default_move, then use the move construction and move assignment
// operation defined in DenseStorage_impl, or the compiler-generated version if none is defined
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
(
DenseStorage
&&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
&
operator
=
(
DenseStorage
&&
)
=
default
;
};
template
<
typename
T
,
int
Size
,
int
Rows
,
int
Cols
,
int
Options
>
class
DenseStorage
<
T
,
Size
,
Rows
,
Cols
,
Options
,
false
>
:
public
internal
::
DenseStorage_impl
<
T
,
Size
,
Rows
,
Cols
,
Options
>
{
using
Base
=
internal
::
DenseStorage_impl
<
T
,
Size
,
Rows
,
Cols
,
Options
>
;
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
()
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
(
const
DenseStorage
&
)
=
default
;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
(
Index
size
,
Index
rows
,
Index
cols
)
:
Base
(
size
,
rows
,
cols
)
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
&
operator
=
(
const
DenseStorage
&
)
=
default
;
// if DenseStorage does not meet the requirements of use_default_move, then defer to the copy construction and copy
// assignment behavior
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
(
DenseStorage
&&
other
)
:
DenseStorage
(
static_cast
<
const
DenseStorage
&>
(
other
))
{}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
DenseStorage
&
operator
=
(
DenseStorage
&&
other
)
{
*
this
=
other
;
return
*
this
;
}
};
}
// end namespace Eigen
#endif // EIGEN_MATRIX_H
eigen-master/Eigen/src/Core/DeviceWrapper.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2023 Charlie Schlosser <cs.schlosser@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DEVICEWRAPPER_H
#define EIGEN_DEVICEWRAPPER_H
namespace
Eigen
{
template
<
typename
Derived
,
typename
Device
>
struct
DeviceWrapper
{
using
Base
=
EigenBase
<
internal
::
remove_all_t
<
Derived
>>
;
using
Scalar
=
typename
Derived
::
Scalar
;
EIGEN_DEVICE_FUNC
DeviceWrapper
(
Base
&
xpr
,
Device
&
device
)
:
m_xpr
(
xpr
.
derived
()),
m_device
(
device
)
{}
EIGEN_DEVICE_FUNC
DeviceWrapper
(
const
Base
&
xpr
,
Device
&
device
)
:
m_xpr
(
xpr
.
derived
()),
m_device
(
device
)
{}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
=
(
const
EigenBase
<
OtherDerived
>&
other
)
{
using
AssignOp
=
internal
::
assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
;
internal
::
call_assignment
(
*
this
,
other
.
derived
(),
AssignOp
());
return
m_xpr
;
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
+=
(
const
EigenBase
<
OtherDerived
>&
other
)
{
using
AddAssignOp
=
internal
::
add_assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
;
internal
::
call_assignment
(
*
this
,
other
.
derived
(),
AddAssignOp
());
return
m_xpr
;
}
template
<
typename
OtherDerived
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
operator
-=
(
const
EigenBase
<
OtherDerived
>&
other
)
{
using
SubAssignOp
=
internal
::
sub_assign_op
<
Scalar
,
typename
OtherDerived
::
Scalar
>
;
internal
::
call_assignment
(
*
this
,
other
.
derived
(),
SubAssignOp
());
return
m_xpr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Derived
&
derived
()
{
return
m_xpr
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
Device
&
device
()
{
return
m_device
;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
NoAlias
<
DeviceWrapper
,
EigenBase
>
noalias
()
{
return
NoAlias
<
DeviceWrapper
,
EigenBase
>
(
*
this
);
}
Derived
&
m_xpr
;
Device
&
m_device
;
};
namespace
internal
{
// this is where we differentiate between lazy assignment and specialized kernels (e.g. matrix products)
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
,
typename
Device
,
typename
Kind
=
typename
AssignmentKind
<
typename
evaluator_traits
<
DstXprType
>
::
Shape
,
typename
evaluator_traits
<
SrcXprType
>::
Shape
>::
Kind
,
typename
EnableIf
=
void
>
struct
AssignmentWithDevice
;
// unless otherwise specified, use the default product implementation
template
<
typename
DstXprType
,
typename
Lhs
,
typename
Rhs
,
int
Options
,
typename
Functor
,
typename
Device
,
typename
Weak
>
struct
AssignmentWithDevice
<
DstXprType
,
Product
<
Lhs
,
Rhs
,
Options
>
,
Functor
,
Device
,
Dense2Dense
,
Weak
>
{
using
SrcXprType
=
Product
<
Lhs
,
Rhs
,
Options
>
;
using
Base
=
Assignment
<
DstXprType
,
SrcXprType
,
Functor
>
;
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
func
,
Device
&
)
{
Base
::
run
(
dst
,
src
,
func
);
}
};
// specialization for coeffcient-wise assignment
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
,
typename
Device
,
typename
Weak
>
struct
AssignmentWithDevice
<
DstXprType
,
SrcXprType
,
Functor
,
Device
,
Dense2Dense
,
Weak
>
{
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
void
run
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
func
,
Device
&
device
)
{
#ifndef EIGEN_NO_DEBUG
internal
::
check_for_aliasing
(
dst
,
src
);
#endif
call_dense_assignment_loop
(
dst
,
src
,
func
,
device
);
}
};
// this allows us to use the default evaluation scheme if it is not specialized for the device
template
<
typename
Kernel
,
typename
Device
,
int
Traversal
=
Kernel
::
AssignmentTraits
::
Traversal
,
int
Unrolling
=
Kernel
::
AssignmentTraits
::
Unrolling
>
struct
dense_assignment_loop_with_device
{
using
Base
=
dense_assignment_loop
<
Kernel
,
Traversal
,
Unrolling
>
;
static
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
run
(
Kernel
&
kernel
,
Device
&
)
{
Base
::
run
(
kernel
);
}
};
// entry point for a generic expression with device
template
<
typename
Dst
,
typename
Src
,
typename
Func
,
typename
Device
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_assignment_no_alias
(
DeviceWrapper
<
Dst
,
Device
>
dst
,
const
Src
&
src
,
const
Func
&
func
)
{
enum
{
NeedToTranspose
=
((
int
(
Dst
::
RowsAtCompileTime
)
==
1
&&
int
(
Src
::
ColsAtCompileTime
)
==
1
)
||
(
int
(
Dst
::
ColsAtCompileTime
)
==
1
&&
int
(
Src
::
RowsAtCompileTime
)
==
1
))
&&
int
(
Dst
::
SizeAtCompileTime
)
!=
1
};
using
ActualDstTypeCleaned
=
std
::
conditional_t
<
NeedToTranspose
,
Transpose
<
Dst
>
,
Dst
>
;
using
ActualDstType
=
std
::
conditional_t
<
NeedToTranspose
,
Transpose
<
Dst
>
,
Dst
&>
;
ActualDstType
actualDst
(
dst
.
derived
());
// TODO check whether this is the right place to perform these checks:
EIGEN_STATIC_ASSERT_LVALUE
(
Dst
)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE
(
ActualDstTypeCleaned
,
Src
)
EIGEN_CHECK_BINARY_COMPATIBILIY
(
Func
,
typename
ActualDstTypeCleaned
::
Scalar
,
typename
Src
::
Scalar
);
// this provides a mechanism for specializing simple assignments, matrix products, etc
AssignmentWithDevice
<
ActualDstTypeCleaned
,
Src
,
Func
,
Device
>::
run
(
actualDst
,
src
,
func
,
dst
.
device
());
}
// copy and pasted from AssignEvaluator except forward device to kernel
template
<
typename
DstXprType
,
typename
SrcXprType
,
typename
Functor
,
typename
Device
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
void
call_dense_assignment_loop
(
DstXprType
&
dst
,
const
SrcXprType
&
src
,
const
Functor
&
func
,
Device
&
device
)
{
using
DstEvaluatorType
=
evaluator
<
DstXprType
>
;
using
SrcEvaluatorType
=
evaluator
<
SrcXprType
>
;
SrcEvaluatorType
srcEvaluator
(
src
);
// NOTE To properly handle A = (A*A.transpose())/s with A rectangular,
// we need to resize the destination after the source evaluator has been created.
resize_if_allowed
(
dst
,
src
,
func
);
DstEvaluatorType
dstEvaluator
(
dst
);
using
Kernel
=
generic_dense_assignment_kernel
<
DstEvaluatorType
,
SrcEvaluatorType
,
Functor
>
;
Kernel
kernel
(
dstEvaluator
,
srcEvaluator
,
func
,
dst
.
const_cast_derived
());
dense_assignment_loop_with_device
<
Kernel
,
Device
>::
run
(
kernel
,
device
);
}
}
// namespace internal
template
<
typename
Derived
>
template
<
typename
Device
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DeviceWrapper
<
Derived
,
Device
>
EigenBase
<
Derived
>::
device
(
Device
&
device
)
{
return
DeviceWrapper
<
Derived
,
Device
>
(
derived
(),
device
);
}
template
<
typename
Derived
>
template
<
typename
Device
>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
DeviceWrapper
<
const
Derived
,
Device
>
EigenBase
<
Derived
>::
device
(
Device
&
device
)
const
{
return
DeviceWrapper
<
const
Derived
,
Device
>
(
derived
(),
device
);
}
}
// namespace Eigen
#endif
eigen-master/Eigen/src/Core/Diagonal.h
0 → 100644
View file @
266d4fd9
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DIAGONAL_H
#define EIGEN_DIAGONAL_H
// IWYU pragma: private
#include "./InternalHeaderCheck.h"
namespace
Eigen
{
/** \class Diagonal
* \ingroup Core_Module
*
* \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
*
* \tparam MatrixType the type of the object in which we are taking a sub/main/super diagonal
* \tparam DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
* A positive value means a superdiagonal, a negative value means a subdiagonal.
* You can also use DynamicIndex so the index can be set at runtime.
*
* The matrix is not required to be square.
*
* This class represents an expression of the main diagonal, or any sub/super diagonal
* of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
* time this is the only way it is used.
*
* \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
*/
namespace
internal
{
template
<
typename
MatrixType
,
int
DiagIndex
>
struct
traits
<
Diagonal
<
MatrixType
,
DiagIndex
>
>
:
traits
<
MatrixType
>
{
typedef
typename
ref_selector
<
MatrixType
>::
type
MatrixTypeNested
;
typedef
std
::
remove_reference_t
<
MatrixTypeNested
>
MatrixTypeNested_
;
typedef
typename
MatrixType
::
StorageKind
StorageKind
;
enum
{
RowsAtCompileTime
=
(
int
(
DiagIndex
)
==
DynamicIndex
||
int
(
MatrixType
::
SizeAtCompileTime
)
==
Dynamic
)
?
Dynamic
:
(
plain_enum_min
(
MatrixType
::
RowsAtCompileTime
-
plain_enum_max
(
-
DiagIndex
,
0
),
MatrixType
::
ColsAtCompileTime
-
plain_enum_max
(
DiagIndex
,
0
))),
ColsAtCompileTime
=
1
,
MaxRowsAtCompileTime
=
int
(
MatrixType
::
MaxSizeAtCompileTime
)
==
Dynamic
?
Dynamic
:
DiagIndex
==
DynamicIndex
?
min_size_prefer_fixed
(
MatrixType
::
MaxRowsAtCompileTime
,
MatrixType
::
MaxColsAtCompileTime
)
:
(
plain_enum_min
(
MatrixType
::
MaxRowsAtCompileTime
-
plain_enum_max
(
-
DiagIndex
,
0
),
MatrixType
::
MaxColsAtCompileTime
-
plain_enum_max
(
DiagIndex
,
0
))),
MaxColsAtCompileTime
=
1
,
MaskLvalueBit
=
is_lvalue
<
MatrixType
>::
value
?
LvalueBit
:
0
,
Flags
=
(
unsigned
int
)
MatrixTypeNested_
::
Flags
&
(
RowMajorBit
|
MaskLvalueBit
|
DirectAccessBit
)
&
~
RowMajorBit
,
// FIXME DirectAccessBit should not be handled by expressions
MatrixTypeOuterStride
=
outer_stride_at_compile_time
<
MatrixType
>::
ret
,
InnerStrideAtCompileTime
=
MatrixTypeOuterStride
==
Dynamic
?
Dynamic
:
MatrixTypeOuterStride
+
1
,
OuterStrideAtCompileTime
=
0
};
};
}
// namespace internal
template
<
typename
MatrixType
,
int
DiagIndex_
>
class
Diagonal
:
public
internal
::
dense_xpr_base
<
Diagonal
<
MatrixType
,
DiagIndex_
>
>::
type
{
public:
enum
{
DiagIndex
=
DiagIndex_
};
typedef
typename
internal
::
dense_xpr_base
<
Diagonal
>::
type
Base
;
EIGEN_DENSE_PUBLIC_INTERFACE
(
Diagonal
)
EIGEN_DEVICE_FUNC
explicit
inline
Diagonal
(
MatrixType
&
matrix
,
Index
a_index
=
DiagIndex
)
:
m_matrix
(
matrix
),
m_index
(
a_index
)
{
eigen_assert
(
a_index
<=
m_matrix
.
cols
()
&&
-
a_index
<=
m_matrix
.
rows
());
}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS
(
Diagonal
)
EIGEN_DEVICE_FUNC
inline
Index
rows
()
const
{
return
m_index
.
value
()
<
0
?
numext
::
mini
<
Index
>
(
m_matrix
.
cols
(),
m_matrix
.
rows
()
+
m_index
.
value
())
:
numext
::
mini
<
Index
>
(
m_matrix
.
rows
(),
m_matrix
.
cols
()
-
m_index
.
value
());
}
EIGEN_DEVICE_FUNC
constexpr
Index
cols
()
const
noexcept
{
return
1
;
}
EIGEN_DEVICE_FUNC
constexpr
Index
innerStride
()
const
noexcept
{
return
m_matrix
.
outerStride
()
+
1
;
}
EIGEN_DEVICE_FUNC
constexpr
Index
outerStride
()
const
noexcept
{
return
0
;
}
typedef
std
::
conditional_t
<
internal
::
is_lvalue
<
MatrixType
>::
value
,
Scalar
,
const
Scalar
>
ScalarWithConstIfNotLvalue
;
EIGEN_DEVICE_FUNC
inline
ScalarWithConstIfNotLvalue
*
data
()
{
return
&
(
m_matrix
.
coeffRef
(
rowOffset
(),
colOffset
()));
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
*
data
()
const
{
return
&
(
m_matrix
.
coeffRef
(
rowOffset
(),
colOffset
()));
}
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
row
,
Index
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
MatrixType
)
return
m_matrix
.
coeffRef
(
row
+
rowOffset
(),
row
+
colOffset
());
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
row
,
Index
)
const
{
return
m_matrix
.
coeffRef
(
row
+
rowOffset
(),
row
+
colOffset
());
}
EIGEN_DEVICE_FUNC
inline
CoeffReturnType
coeff
(
Index
row
,
Index
)
const
{
return
m_matrix
.
coeff
(
row
+
rowOffset
(),
row
+
colOffset
());
}
EIGEN_DEVICE_FUNC
inline
Scalar
&
coeffRef
(
Index
idx
)
{
EIGEN_STATIC_ASSERT_LVALUE
(
MatrixType
)
return
m_matrix
.
coeffRef
(
idx
+
rowOffset
(),
idx
+
colOffset
());
}
EIGEN_DEVICE_FUNC
inline
const
Scalar
&
coeffRef
(
Index
idx
)
const
{
return
m_matrix
.
coeffRef
(
idx
+
rowOffset
(),
idx
+
colOffset
());
}
EIGEN_DEVICE_FUNC
inline
CoeffReturnType
coeff
(
Index
idx
)
const
{
return
m_matrix
.
coeff
(
idx
+
rowOffset
(),
idx
+
colOffset
());
}
EIGEN_DEVICE_FUNC
inline
const
internal
::
remove_all_t
<
typename
MatrixType
::
Nested
>&
nestedExpression
()
const
{
return
m_matrix
;
}
EIGEN_DEVICE_FUNC
inline
Index
index
()
const
{
return
m_index
.
value
();
}
protected:
typename
internal
::
ref_selector
<
MatrixType
>::
non_const_type
m_matrix
;
const
internal
::
variable_if_dynamicindex
<
Index
,
DiagIndex
>
m_index
;
private:
// some compilers may fail to optimize std::max etc in case of compile-time constants...
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
absDiagIndex
()
const
noexcept
{
return
m_index
.
value
()
>
0
?
m_index
.
value
()
:
-
m_index
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
rowOffset
()
const
noexcept
{
return
m_index
.
value
()
>
0
?
0
:
-
m_index
.
value
();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE
constexpr
Index
colOffset
()
const
noexcept
{
return
m_index
.
value
()
>
0
?
m_index
.
value
()
:
0
;
}
// trigger a compile-time error if someone try to call packet
template
<
int
LoadMode
>
typename
MatrixType
::
PacketReturnType
packet
(
Index
)
const
;
template
<
int
LoadMode
>
typename
MatrixType
::
PacketReturnType
packet
(
Index
,
Index
)
const
;
};
/** \returns an expression of the main diagonal of the matrix \c *this
*
* \c *this is not required to be square.
*
* Example: \include MatrixBase_diagonal.cpp
* Output: \verbinclude MatrixBase_diagonal.out
*
* \sa class Diagonal */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
typename
MatrixBase
<
Derived
>::
DiagonalReturnType
MatrixBase
<
Derived
>::
diagonal
()
{
return
DiagonalReturnType
(
derived
());
}
/** This is the const version of diagonal(). */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
const
typename
MatrixBase
<
Derived
>::
ConstDiagonalReturnType
MatrixBase
<
Derived
>::
diagonal
()
const
{
return
ConstDiagonalReturnType
(
derived
());
}
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
*
* \c *this is not required to be square.
*
* The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
* and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
*
* Example: \include MatrixBase_diagonal_int.cpp
* Output: \verbinclude MatrixBase_diagonal_int.out
*
* \sa MatrixBase::diagonal(), class Diagonal */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
Diagonal
<
Derived
,
DynamicIndex
>
MatrixBase
<
Derived
>::
diagonal
(
Index
index
)
{
return
Diagonal
<
Derived
,
DynamicIndex
>
(
derived
(),
index
);
}
/** This is the const version of diagonal(Index). */
template
<
typename
Derived
>
EIGEN_DEVICE_FUNC
inline
const
Diagonal
<
const
Derived
,
DynamicIndex
>
MatrixBase
<
Derived
>::
diagonal
(
Index
index
)
const
{
return
Diagonal
<
const
Derived
,
DynamicIndex
>
(
derived
(),
index
);
}
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
*
* \c *this is not required to be square.
*
* The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
* and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
*
* Example: \include MatrixBase_diagonal_template_int.cpp
* Output: \verbinclude MatrixBase_diagonal_template_int.out
*
* \sa MatrixBase::diagonal(), class Diagonal */
template
<
typename
Derived
>
template
<
int
Index_
>
EIGEN_DEVICE_FUNC
inline
Diagonal
<
Derived
,
Index_
>
MatrixBase
<
Derived
>::
diagonal
()
{
return
Diagonal
<
Derived
,
Index_
>
(
derived
());
}
/** This is the const version of diagonal<int>(). */
template
<
typename
Derived
>
template
<
int
Index_
>
EIGEN_DEVICE_FUNC
inline
const
Diagonal
<
const
Derived
,
Index_
>
MatrixBase
<
Derived
>::
diagonal
()
const
{
return
Diagonal
<
const
Derived
,
Index_
>
(
derived
());
}
}
// end namespace Eigen
#endif // EIGEN_DIAGONAL_H
Prev
1
2
3
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment