Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
cubvh
Commits
5245f430
Commit
5245f430
authored
Mar 21, 2024
by
ashawkey
Browse files
add the patch for torch 2.1.0 and 2.1.1
parent
408d8fcf
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
1247 additions
and
2 deletions
+1247
-2
patch/eigen/common.h
patch/eigen/common.h
+9
-0
patch/eigen/matrix.h
patch/eigen/matrix.h
+714
-0
patch/eigen/tensor.h
patch/eigen/tensor.h
+516
-0
readme.md
readme.md
+8
-2
No files found.
patch/eigen/common.h
0 → 100644
View file @
5245f430
// Copyright (c) 2023 The pybind Community.
#pragma once
// Common message for `static_assert()`s, which are useful to easily
// preempt much less obvious errors.
#define PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED \
"Pointer types (in particular `PyObject *`) are not supported as scalar types for Eigen " \
"types."
patch/eigen/matrix.h
0 → 100644
View file @
5245f430
/*
pybind11/eigen/matrix.h: Transparent conversion for dense and sparse Eigen matrices
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../numpy.h"
#include "common.h"
/* HINT: To suppress warnings originating from the Eigen headers, use -isystem.
See also:
https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir
https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler
*/
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC
(
5054
)
// https://github.com/pybind/pybind11/pull/3741
// C5054: operator '&': deprecated between enumerations of different types
#if defined(__MINGW32__)
PYBIND11_WARNING_DISABLE_GCC
(
"-Wmaybe-uninitialized"
)
#endif
#include <Eigen/Core>
#include <Eigen/SparseCore>
PYBIND11_WARNING_POP
// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
// move constructors that break things. We could detect this an explicitly copy, but an extra copy
// of matrices seems highly undesirable.
static_assert
(
EIGEN_VERSION_AT_LEAST
(
3
,
2
,
7
),
"Eigen matrix support in pybind11 requires Eigen >= 3.2.7"
);
PYBIND11_NAMESPACE_BEGIN
(
PYBIND11_NAMESPACE
)
PYBIND11_WARNING_DISABLE_MSVC
(
4127
)
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
using
EigenDStride
=
Eigen
::
Stride
<
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
;
template
<
typename
MatrixType
>
using
EigenDRef
=
Eigen
::
Ref
<
MatrixType
,
0
,
EigenDStride
>
;
template
<
typename
MatrixType
>
using
EigenDMap
=
Eigen
::
Map
<
MatrixType
,
0
,
EigenDStride
>
;
PYBIND11_NAMESPACE_BEGIN
(
detail
)
#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
using
EigenIndex
=
Eigen
::
Index
;
template
<
typename
Scalar
,
int
Flags
,
typename
StorageIndex
>
using
EigenMapSparseMatrix
=
Eigen
::
Map
<
Eigen
::
SparseMatrix
<
Scalar
,
Flags
,
StorageIndex
>>
;
#else
using
EigenIndex
=
EIGEN_DEFAULT_DENSE_INDEX_TYPE
;
template
<
typename
Scalar
,
int
Flags
,
typename
StorageIndex
>
using
EigenMapSparseMatrix
=
Eigen
::
MappedSparseMatrix
<
Scalar
,
Flags
,
StorageIndex
>
;
#endif
// Matches Eigen::Map, Eigen::Ref, blocks, etc:
template
<
typename
T
>
using
is_eigen_dense_map
=
all_of
<
is_template_base_of
<
Eigen
::
DenseBase
,
T
>
,
std
::
is_base_of
<
Eigen
::
MapBase
<
T
,
Eigen
::
ReadOnlyAccessors
>
,
T
>>
;
template
<
typename
T
>
using
is_eigen_mutable_map
=
std
::
is_base_of
<
Eigen
::
MapBase
<
T
,
Eigen
::
WriteAccessors
>
,
T
>
;
template
<
typename
T
>
using
is_eigen_dense_plain
=
all_of
<
negation
<
is_eigen_dense_map
<
T
>>
,
is_template_base_of
<
Eigen
::
PlainObjectBase
,
T
>>
;
template
<
typename
T
>
using
is_eigen_sparse
=
is_template_base_of
<
Eigen
::
SparseMatrixBase
,
T
>
;
// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
// basically covers anything that can be assigned to a dense matrix but that don't have a typical
// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
// SelfAdjointView fall into this category.
template
<
typename
T
>
using
is_eigen_other
=
all_of
<
is_template_base_of
<
Eigen
::
EigenBase
,
T
>
,
negation
<
any_of
<
is_eigen_dense_map
<
T
>
,
is_eigen_dense_plain
<
T
>
,
is_eigen_sparse
<
T
>>>>
;
// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
template
<
bool
EigenRowMajor
>
struct
EigenConformable
{
bool
conformable
=
false
;
EigenIndex
rows
=
0
,
cols
=
0
;
EigenDStride
stride
{
0
,
0
};
// Only valid if negativestrides is false!
bool
negativestrides
=
false
;
// If true, do not use stride!
// NOLINTNEXTLINE(google-explicit-constructor)
EigenConformable
(
bool
fits
=
false
)
:
conformable
{
fits
}
{}
// Matrix type:
EigenConformable
(
EigenIndex
r
,
EigenIndex
c
,
EigenIndex
rstride
,
EigenIndex
cstride
)
:
conformable
{
true
},
rows
{
r
},
cols
{
c
},
// TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity.
// http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
stride
{
EigenRowMajor
?
(
rstride
>
0
?
rstride
:
0
)
:
(
cstride
>
0
?
cstride
:
0
)
/* outer stride */
,
EigenRowMajor
?
(
cstride
>
0
?
cstride
:
0
)
:
(
rstride
>
0
?
rstride
:
0
)
/* inner stride */
},
negativestrides
{
rstride
<
0
||
cstride
<
0
}
{}
// Vector type:
EigenConformable
(
EigenIndex
r
,
EigenIndex
c
,
EigenIndex
stride
)
:
EigenConformable
(
r
,
c
,
r
==
1
?
c
*
stride
:
stride
,
c
==
1
?
r
:
r
*
stride
)
{}
template
<
typename
props
>
bool
stride_compatible
()
const
{
// To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
// matching strides, or a dimension size of 1 (in which case the stride value is
// irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant
// (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly).
if
(
negativestrides
)
{
return
false
;
}
if
(
rows
==
0
||
cols
==
0
)
{
return
true
;
}
return
(
props
::
inner_stride
==
Eigen
::
Dynamic
||
props
::
inner_stride
==
stride
.
inner
()
||
(
EigenRowMajor
?
cols
:
rows
)
==
1
)
&&
(
props
::
outer_stride
==
Eigen
::
Dynamic
||
props
::
outer_stride
==
stride
.
outer
()
||
(
EigenRowMajor
?
rows
:
cols
)
==
1
);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator
bool
()
const
{
return
conformable
;
}
};
template
<
typename
Type
>
struct
eigen_extract_stride
{
using
type
=
Type
;
};
template
<
typename
PlainObjectType
,
int
MapOptions
,
typename
StrideType
>
struct
eigen_extract_stride
<
Eigen
::
Map
<
PlainObjectType
,
MapOptions
,
StrideType
>>
{
using
type
=
StrideType
;
};
template
<
typename
PlainObjectType
,
int
Options
,
typename
StrideType
>
struct
eigen_extract_stride
<
Eigen
::
Ref
<
PlainObjectType
,
Options
,
StrideType
>>
{
using
type
=
StrideType
;
};
// Helper struct for extracting information from an Eigen type
template
<
typename
Type_
>
struct
EigenProps
{
using
Type
=
Type_
;
using
Scalar
=
typename
Type
::
Scalar
;
using
StrideType
=
typename
eigen_extract_stride
<
Type
>::
type
;
static
constexpr
EigenIndex
rows
=
Type
::
RowsAtCompileTime
,
cols
=
Type
::
ColsAtCompileTime
,
size
=
Type
::
SizeAtCompileTime
;
static
constexpr
bool
row_major
=
Type
::
IsRowMajor
,
vector
=
Type
::
IsVectorAtCompileTime
,
// At least one dimension has fixed size 1
fixed_rows
=
rows
!=
Eigen
::
Dynamic
,
fixed_cols
=
cols
!=
Eigen
::
Dynamic
,
fixed
=
size
!=
Eigen
::
Dynamic
,
// Fully-fixed size
dynamic
=
!
fixed_rows
&&
!
fixed_cols
;
// Fully-dynamic size
template
<
EigenIndex
i
,
EigenIndex
ifzero
>
using
if_zero
=
std
::
integral_constant
<
EigenIndex
,
i
==
0
?
ifzero
:
i
>
;
static
constexpr
EigenIndex
inner_stride
=
if_zero
<
StrideType
::
InnerStrideAtCompileTime
,
1
>::
value
,
outer_stride
=
if_zero
<
StrideType
::
OuterStrideAtCompileTime
,
vector
?
size
:
row_major
?
cols
:
rows
>
::
value
;
static
constexpr
bool
dynamic_stride
=
inner_stride
==
Eigen
::
Dynamic
&&
outer_stride
==
Eigen
::
Dynamic
;
static
constexpr
bool
requires_row_major
=
!
dynamic_stride
&&
!
vector
&&
(
row_major
?
inner_stride
:
outer_stride
)
==
1
;
static
constexpr
bool
requires_col_major
=
!
dynamic_stride
&&
!
vector
&&
(
row_major
?
outer_stride
:
inner_stride
)
==
1
;
// Takes an input array and determines whether we can make it fit into the Eigen type. If
// the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
// (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
static
EigenConformable
<
row_major
>
conformable
(
const
array
&
a
)
{
const
auto
dims
=
a
.
ndim
();
if
(
dims
<
1
||
dims
>
2
)
{
return
false
;
}
if
(
dims
==
2
)
{
// Matrix type: require exact match (or dynamic)
EigenIndex
np_rows
=
a
.
shape
(
0
),
np_cols
=
a
.
shape
(
1
),
np_rstride
=
a
.
strides
(
0
)
/
static_cast
<
ssize_t
>
(
sizeof
(
Scalar
)),
np_cstride
=
a
.
strides
(
1
)
/
static_cast
<
ssize_t
>
(
sizeof
(
Scalar
));
if
((
fixed_rows
&&
np_rows
!=
rows
)
||
(
fixed_cols
&&
np_cols
!=
cols
))
{
return
false
;
}
return
{
np_rows
,
np_cols
,
np_rstride
,
np_cstride
};
}
// Otherwise we're storing an n-vector. Only one of the strides will be used, but
// whichever is used, we want the (single) numpy stride value.
const
EigenIndex
n
=
a
.
shape
(
0
),
stride
=
a
.
strides
(
0
)
/
static_cast
<
ssize_t
>
(
sizeof
(
Scalar
));
if
(
vector
)
{
// Eigen type is a compile-time vector
if
(
fixed
&&
size
!=
n
)
{
return
false
;
// Vector size mismatch
}
return
{
rows
==
1
?
1
:
n
,
cols
==
1
?
1
:
n
,
stride
};
}
if
(
fixed
)
{
// The type has a fixed size, but is not a vector: abort
return
false
;
}
if
(
fixed_cols
)
{
// Since this isn't a vector, cols must be != 1. We allow this only if it exactly
// equals the number of elements (rows is Dynamic, and so 1 row is allowed).
if
(
cols
!=
n
)
{
return
false
;
}
return
{
1
,
n
,
stride
};
}
// Otherwise it's either fully dynamic, or column dynamic; both become a column vector
if
(
fixed_rows
&&
rows
!=
n
)
{
return
false
;
}
return
{
n
,
1
,
stride
};
}
static
constexpr
bool
show_writeable
=
is_eigen_dense_map
<
Type
>::
value
&&
is_eigen_mutable_map
<
Type
>::
value
;
static
constexpr
bool
show_order
=
is_eigen_dense_map
<
Type
>::
value
;
static
constexpr
bool
show_c_contiguous
=
show_order
&&
requires_row_major
;
static
constexpr
bool
show_f_contiguous
=
!
show_c_contiguous
&&
show_order
&&
requires_col_major
;
static
constexpr
auto
descriptor
=
const_name
(
"numpy.ndarray["
)
+
npy_format_descriptor
<
Scalar
>::
name
+
const_name
(
"["
)
+
const_name
<
fixed_rows
>
(
const_name
<
(
size_t
)
rows
>
(),
const_name
(
"m"
))
+
const_name
(
", "
)
+
const_name
<
fixed_cols
>
(
const_name
<
(
size_t
)
cols
>
(),
const_name
(
"n"
))
+
const_name
(
"]"
)
+
// For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to
// be satisfied: writeable=True (for a mutable reference), and, depending on the map's
// stride options, possibly f_contiguous or c_contiguous. We include them in the
// descriptor output to provide some hint as to why a TypeError is occurring (otherwise
// it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and
// an error message that you *gave* a numpy.ndarray of the right type and dimensions.
const_name
<
show_writeable
>
(
", flags.writeable"
,
""
)
+
const_name
<
show_c_contiguous
>
(
", flags.c_contiguous"
,
""
)
+
const_name
<
show_f_contiguous
>
(
", flags.f_contiguous"
,
""
)
+
const_name
(
"]"
);
};
// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
template
<
typename
props
>
handle
eigen_array_cast
(
typename
props
::
Type
const
&
src
,
handle
base
=
handle
(),
bool
writeable
=
true
)
{
constexpr
ssize_t
elem_size
=
sizeof
(
typename
props
::
Scalar
);
array
a
;
if
(
props
::
vector
)
{
a
=
array
({
src
.
size
()},
{
elem_size
*
src
.
innerStride
()},
src
.
data
(),
base
);
}
else
{
a
=
array
({
src
.
rows
(),
src
.
cols
()},
{
elem_size
*
src
.
rowStride
(),
elem_size
*
src
.
colStride
()},
src
.
data
(),
base
);
}
if
(
!
writeable
)
{
array_proxy
(
a
.
ptr
())
->
flags
&=
~
detail
::
npy_api
::
NPY_ARRAY_WRITEABLE_
;
}
return
a
.
release
();
}
// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
// non-writeable if the given type is const.
template
<
typename
props
,
typename
Type
>
handle
eigen_ref_array
(
Type
&
src
,
handle
parent
=
none
())
{
// none here is to get past array's should-we-copy detection, which currently always
// copies when there is no base. Setting the base to None should be harmless.
return
eigen_array_cast
<
props
>
(
src
,
parent
,
!
std
::
is_const
<
Type
>::
value
);
}
// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a
// numpy array that references the encapsulated data with a python-side reference to the capsule to
// tie its destruction to that of any dependent python objects. Const-ness is determined by
// whether or not the Type of the pointer given is const.
template
<
typename
props
,
typename
Type
,
typename
=
enable_if_t
<
is_eigen_dense_plain
<
Type
>
::
value
>>
handle
eigen_encapsulate
(
Type
*
src
)
{
capsule
base
(
src
,
[](
void
*
o
)
{
delete
static_cast
<
Type
*>
(
o
);
});
return
eigen_ref_array
<
props
>
(
*
src
,
base
);
}
// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
// types.
template
<
typename
Type
>
struct
type_caster
<
Type
,
enable_if_t
<
is_eigen_dense_plain
<
Type
>::
value
>>
{
using
Scalar
=
typename
Type
::
Scalar
;
static_assert
(
!
std
::
is_pointer
<
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
using
props
=
EigenProps
<
Type
>
;
bool
load
(
handle
src
,
bool
convert
)
{
// If we're in no-convert mode, only load if given an array of the correct type
if
(
!
convert
&&
!
isinstance
<
array_t
<
Scalar
>>
(
src
))
{
return
false
;
}
// Coerce into an array, but don't do type conversion yet; the copy below handles it.
auto
buf
=
array
::
ensure
(
src
);
if
(
!
buf
)
{
return
false
;
}
auto
dims
=
buf
.
ndim
();
if
(
dims
<
1
||
dims
>
2
)
{
return
false
;
}
auto
fits
=
props
::
conformable
(
buf
);
if
(
!
fits
)
{
return
false
;
}
// Allocate the new type, then build a numpy reference into it
value
=
Type
(
fits
.
rows
,
fits
.
cols
);
auto
ref
=
reinterpret_steal
<
array
>
(
eigen_ref_array
<
props
>
(
value
));
if
(
dims
==
1
)
{
ref
=
ref
.
squeeze
();
}
else
if
(
ref
.
ndim
()
==
1
)
{
buf
=
buf
.
squeeze
();
}
int
result
=
detail
::
npy_api
::
get
().
PyArray_CopyInto_
(
ref
.
ptr
(),
buf
.
ptr
());
if
(
result
<
0
)
{
// Copy failed!
PyErr_Clear
();
return
false
;
}
return
true
;
}
private:
// Cast implementation
template
<
typename
CType
>
static
handle
cast_impl
(
CType
*
src
,
return_value_policy
policy
,
handle
parent
)
{
switch
(
policy
)
{
case
return_value_policy
::
take_ownership
:
case
return_value_policy
::
automatic
:
return
eigen_encapsulate
<
props
>
(
src
);
case
return_value_policy
::
move
:
return
eigen_encapsulate
<
props
>
(
new
CType
(
std
::
move
(
*
src
)));
case
return_value_policy
::
copy
:
return
eigen_array_cast
<
props
>
(
*
src
);
case
return_value_policy
::
reference
:
case
return_value_policy
::
automatic_reference
:
return
eigen_ref_array
<
props
>
(
*
src
);
case
return_value_policy
::
reference_internal
:
return
eigen_ref_array
<
props
>
(
*
src
,
parent
);
default:
throw
cast_error
(
"unhandled return_value_policy: should not happen!"
);
};
}
public:
// Normal returned non-reference, non-const value:
static
handle
cast
(
Type
&&
src
,
return_value_policy
/* policy */
,
handle
parent
)
{
return
cast_impl
(
&
src
,
return_value_policy
::
move
,
parent
);
}
// If you return a non-reference const, we mark the numpy array readonly:
static
handle
cast
(
const
Type
&&
src
,
return_value_policy
/* policy */
,
handle
parent
)
{
return
cast_impl
(
&
src
,
return_value_policy
::
move
,
parent
);
}
// lvalue reference return; default (automatic) becomes copy
static
handle
cast
(
Type
&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
||
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
copy
;
}
return
cast_impl
(
&
src
,
policy
,
parent
);
}
// const lvalue reference return; default (automatic) becomes copy
static
handle
cast
(
const
Type
&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
||
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
copy
;
}
return
cast
(
&
src
,
policy
,
parent
);
}
// non-const pointer return
static
handle
cast
(
Type
*
src
,
return_value_policy
policy
,
handle
parent
)
{
return
cast_impl
(
src
,
policy
,
parent
);
}
// const pointer return
static
handle
cast
(
const
Type
*
src
,
return_value_policy
policy
,
handle
parent
)
{
return
cast_impl
(
src
,
policy
,
parent
);
}
static
constexpr
auto
name
=
props
::
descriptor
;
// NOLINTNEXTLINE(google-explicit-constructor)
operator
Type
*
()
{
return
&
value
;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator
Type
&
()
{
return
value
;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator
Type
&&
()
&&
{
return
std
::
move
(
value
);
}
template
<
typename
T
>
using
cast_op_type
=
movable_cast_op_type
<
T
>
;
private:
Type
value
;
};
// Base class for casting reference/map/block/etc. objects back to python.
template
<
typename
MapType
>
struct
eigen_map_caster
{
static_assert
(
!
std
::
is_pointer
<
typename
MapType
::
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
private:
using
props
=
EigenProps
<
MapType
>
;
public:
// Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
// to stay around), but we'll allow it under the assumption that you know what you're doing
// (and have an appropriate keep_alive in place). We return a numpy array pointing directly at
// the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.)
// Note that this means you need to ensure you don't destroy the object in some other way (e.g.
// with an appropriate keep_alive, or with a reference to a statically allocated matrix).
static
handle
cast
(
const
MapType
&
src
,
return_value_policy
policy
,
handle
parent
)
{
switch
(
policy
)
{
case
return_value_policy
::
copy
:
return
eigen_array_cast
<
props
>
(
src
);
case
return_value_policy
::
reference_internal
:
return
eigen_array_cast
<
props
>
(
src
,
parent
,
is_eigen_mutable_map
<
MapType
>::
value
);
case
return_value_policy
::
reference
:
case
return_value_policy
::
automatic
:
case
return_value_policy
::
automatic_reference
:
return
eigen_array_cast
<
props
>
(
src
,
none
(),
is_eigen_mutable_map
<
MapType
>::
value
);
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail
(
"Invalid return_value_policy for Eigen Map/Ref/Block type"
);
}
}
static
constexpr
auto
name
=
props
::
descriptor
;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool
load
(
handle
,
bool
)
=
delete
;
operator
MapType
()
=
delete
;
template
<
typename
>
using
cast_op_type
=
MapType
;
};
// We can return any map-like object (but can only load Refs, specialized next):
template
<
typename
Type
>
struct
type_caster
<
Type
,
enable_if_t
<
is_eigen_dense_map
<
Type
>::
value
>>
:
eigen_map_caster
<
Type
>
{};
// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
// copying (it requires some extra effort in many cases).
template
<
typename
PlainObjectType
,
typename
StrideType
>
struct
type_caster
<
Eigen
::
Ref
<
PlainObjectType
,
0
,
StrideType
>
,
enable_if_t
<
is_eigen_dense_map
<
Eigen
::
Ref
<
PlainObjectType
,
0
,
StrideType
>>::
value
>>
:
public
eigen_map_caster
<
Eigen
::
Ref
<
PlainObjectType
,
0
,
StrideType
>>
{
private:
using
Type
=
Eigen
::
Ref
<
PlainObjectType
,
0
,
StrideType
>
;
using
props
=
EigenProps
<
Type
>
;
using
Scalar
=
typename
props
::
Scalar
;
static_assert
(
!
std
::
is_pointer
<
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
using
MapType
=
Eigen
::
Map
<
PlainObjectType
,
0
,
StrideType
>
;
using
Array
=
array_t
<
Scalar
,
array
::
forcecast
|
((
props
::
row_major
?
props
::
inner_stride
:
props
::
outer_stride
)
==
1
?
array
::
c_style
:
(
props
::
row_major
?
props
::
outer_stride
:
props
::
inner_stride
)
==
1
?
array
::
f_style
:
0
)
>
;
static
constexpr
bool
need_writeable
=
is_eigen_mutable_map
<
Type
>::
value
;
// Delay construction (these have no default constructor)
std
::
unique_ptr
<
MapType
>
map
;
std
::
unique_ptr
<
Type
>
ref
;
// Our array. When possible, this is just a numpy array pointing to the source data, but
// sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an
// incompatible layout, or is an array of a type that needs to be converted). Using a numpy
// temporary (rather than an Eigen temporary) saves an extra copy when we need both type
// conversion and storage order conversion. (Note that we refuse to use this temporary copy
// when loading an argument for a Ref<M> with M non-const, i.e. a read-write reference).
Array
copy_or_ref
;
public:
bool
load
(
handle
src
,
bool
convert
)
{
// First check whether what we have is already an array of the right type. If not, we
// can't avoid a copy (because the copy is also going to do type conversion).
bool
need_copy
=
!
isinstance
<
Array
>
(
src
);
EigenConformable
<
props
::
row_major
>
fits
;
if
(
!
need_copy
)
{
// We don't need a converting copy, but we also need to check whether the strides are
// compatible with the Ref's stride requirements
auto
aref
=
reinterpret_borrow
<
Array
>
(
src
);
if
(
aref
&&
(
!
need_writeable
||
aref
.
writeable
()))
{
fits
=
props
::
conformable
(
aref
);
if
(
!
fits
)
{
return
false
;
// Incompatible dimensions
}
if
(
!
fits
.
template
stride_compatible
<
props
>())
{
need_copy
=
true
;
}
else
{
copy_or_ref
=
std
::
move
(
aref
);
}
}
else
{
need_copy
=
true
;
}
}
if
(
need_copy
)
{
// We need to copy: If we need a mutable reference, or we're not supposed to convert
// (either because we're in the no-convert overload pass, or because we're explicitly
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
if
(
!
convert
||
need_writeable
)
{
return
false
;
}
Array
copy
=
Array
::
ensure
(
src
);
if
(
!
copy
)
{
return
false
;
}
fits
=
props
::
conformable
(
copy
);
if
(
!
fits
||
!
fits
.
template
stride_compatible
<
props
>())
{
return
false
;
}
copy_or_ref
=
std
::
move
(
copy
);
loader_life_support
::
add_patient
(
copy_or_ref
);
}
ref
.
reset
();
map
.
reset
(
new
MapType
(
data
(
copy_or_ref
),
fits
.
rows
,
fits
.
cols
,
make_stride
(
fits
.
stride
.
outer
(),
fits
.
stride
.
inner
())));
ref
.
reset
(
new
Type
(
*
map
));
return
true
;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator
Type
*
()
{
return
ref
.
get
();
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator
Type
&
()
{
return
*
ref
;
}
template
<
typename
_T
>
using
cast_op_type
=
pybind11
::
detail
::
cast_op_type
<
_T
>
;
private:
template
<
typename
T
=
Type
,
enable_if_t
<
is_eigen_mutable_map
<
T
>
::
value
,
int
>
=
0
>
Scalar
*
data
(
Array
&
a
)
{
return
a
.
mutable_data
();
}
template
<
typename
T
=
Type
,
enable_if_t
<!
is_eigen_mutable_map
<
T
>
::
value
,
int
>
=
0
>
const
Scalar
*
data
(
Array
&
a
)
{
return
a
.
data
();
}
// Attempt to figure out a constructor of `Stride` that will work.
// If both strides are fixed, use a default constructor:
template
<
typename
S
>
using
stride_ctor_default
=
bool_constant
<
S
::
InnerStrideAtCompileTime
!=
Eigen
::
Dynamic
&&
S
::
OuterStrideAtCompileTime
!=
Eigen
::
Dynamic
&&
std
::
is_default_constructible
<
S
>::
value
>
;
// Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
// Eigen::Stride, and use it:
template
<
typename
S
>
using
stride_ctor_dual
=
bool_constant
<!
stride_ctor_default
<
S
>::
value
&&
std
::
is_constructible
<
S
,
EigenIndex
,
EigenIndex
>::
value
>
;
// Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
// it (passing whichever stride is dynamic).
template
<
typename
S
>
using
stride_ctor_outer
=
bool_constant
<!
any_of
<
stride_ctor_default
<
S
>
,
stride_ctor_dual
<
S
>>::
value
&&
S
::
OuterStrideAtCompileTime
==
Eigen
::
Dynamic
&&
S
::
InnerStrideAtCompileTime
!=
Eigen
::
Dynamic
&&
std
::
is_constructible
<
S
,
EigenIndex
>::
value
>
;
template
<
typename
S
>
using
stride_ctor_inner
=
bool_constant
<!
any_of
<
stride_ctor_default
<
S
>
,
stride_ctor_dual
<
S
>>::
value
&&
S
::
InnerStrideAtCompileTime
==
Eigen
::
Dynamic
&&
S
::
OuterStrideAtCompileTime
!=
Eigen
::
Dynamic
&&
std
::
is_constructible
<
S
,
EigenIndex
>::
value
>
;
template
<
typename
S
=
StrideType
,
enable_if_t
<
stride_ctor_default
<
S
>
::
value
,
int
>
=
0
>
static
S
make_stride
(
EigenIndex
,
EigenIndex
)
{
return
S
();
}
template
<
typename
S
=
StrideType
,
enable_if_t
<
stride_ctor_dual
<
S
>
::
value
,
int
>
=
0
>
static
S
make_stride
(
EigenIndex
outer
,
EigenIndex
inner
)
{
return
S
(
outer
,
inner
);
}
template
<
typename
S
=
StrideType
,
enable_if_t
<
stride_ctor_outer
<
S
>
::
value
,
int
>
=
0
>
static
S
make_stride
(
EigenIndex
outer
,
EigenIndex
)
{
return
S
(
outer
);
}
template
<
typename
S
=
StrideType
,
enable_if_t
<
stride_ctor_inner
<
S
>
::
value
,
int
>
=
0
>
static
S
make_stride
(
EigenIndex
,
EigenIndex
inner
)
{
return
S
(
inner
);
}
};
// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
// load() is not supported, but we can cast them into the python domain by first copying to a
// regular Eigen::Matrix, then casting that.
template
<
typename
Type
>
struct
type_caster
<
Type
,
enable_if_t
<
is_eigen_other
<
Type
>::
value
>>
{
static_assert
(
!
std
::
is_pointer
<
typename
Type
::
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
protected:
using
Matrix
=
Eigen
::
Matrix
<
typename
Type
::
Scalar
,
Type
::
RowsAtCompileTime
,
Type
::
ColsAtCompileTime
>
;
using
props
=
EigenProps
<
Matrix
>
;
public:
static
handle
cast
(
const
Type
&
src
,
return_value_policy
/* policy */
,
handle
/* parent */
)
{
handle
h
=
eigen_encapsulate
<
props
>
(
new
Matrix
(
src
));
return
h
;
}
static
handle
cast
(
const
Type
*
src
,
return_value_policy
policy
,
handle
parent
)
{
return
cast
(
*
src
,
policy
,
parent
);
}
static
constexpr
auto
name
=
props
::
descriptor
;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool
load
(
handle
,
bool
)
=
delete
;
operator
Type
()
=
delete
;
template
<
typename
>
using
cast_op_type
=
Type
;
};
template
<
typename
Type
>
struct
type_caster
<
Type
,
enable_if_t
<
is_eigen_sparse
<
Type
>::
value
>>
{
using
Scalar
=
typename
Type
::
Scalar
;
static_assert
(
!
std
::
is_pointer
<
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
using
StorageIndex
=
remove_reference_t
<
decltype
(
*
std
::
declval
<
Type
>
().
outerIndexPtr
())
>
;
using
Index
=
typename
Type
::
Index
;
static
constexpr
bool
rowMajor
=
Type
::
IsRowMajor
;
bool
load
(
handle
src
,
bool
)
{
if
(
!
src
)
{
return
false
;
}
auto
obj
=
reinterpret_borrow
<
object
>
(
src
);
object
sparse_module
=
module_
::
import
(
"scipy.sparse"
);
object
matrix_type
=
sparse_module
.
attr
(
rowMajor
?
"csr_matrix"
:
"csc_matrix"
);
if
(
!
type
::
handle_of
(
obj
).
is
(
matrix_type
))
{
try
{
obj
=
matrix_type
(
obj
);
}
catch
(
const
error_already_set
&
)
{
return
false
;
}
}
auto
values
=
array_t
<
Scalar
>
((
object
)
obj
.
attr
(
"data"
));
auto
innerIndices
=
array_t
<
StorageIndex
>
((
object
)
obj
.
attr
(
"indices"
));
auto
outerIndices
=
array_t
<
StorageIndex
>
((
object
)
obj
.
attr
(
"indptr"
));
auto
shape
=
pybind11
::
tuple
((
pybind11
::
object
)
obj
.
attr
(
"shape"
));
auto
nnz
=
obj
.
attr
(
"nnz"
).
cast
<
Index
>
();
if
(
!
values
||
!
innerIndices
||
!
outerIndices
)
{
return
false
;
}
value
=
EigenMapSparseMatrix
<
Scalar
,
Type
::
Flags
&
(
Eigen
::
RowMajor
|
Eigen
::
ColMajor
),
StorageIndex
>
(
shape
[
0
].
cast
<
Index
>
(),
shape
[
1
].
cast
<
Index
>
(),
std
::
move
(
nnz
),
outerIndices
.
mutable_data
(),
innerIndices
.
mutable_data
(),
values
.
mutable_data
());
return
true
;
}
static
handle
cast
(
const
Type
&
src
,
return_value_policy
/* policy */
,
handle
/* parent */
)
{
const_cast
<
Type
&>
(
src
).
makeCompressed
();
object
matrix_type
=
module_
::
import
(
"scipy.sparse"
).
attr
(
rowMajor
?
"csr_matrix"
:
"csc_matrix"
);
array
data
(
src
.
nonZeros
(),
src
.
valuePtr
());
array
outerIndices
((
rowMajor
?
src
.
rows
()
:
src
.
cols
())
+
1
,
src
.
outerIndexPtr
());
array
innerIndices
(
src
.
nonZeros
(),
src
.
innerIndexPtr
());
return
matrix_type
(
pybind11
::
make_tuple
(
std
::
move
(
data
),
std
::
move
(
innerIndices
),
std
::
move
(
outerIndices
)),
pybind11
::
make_tuple
(
src
.
rows
(),
src
.
cols
()))
.
release
();
}
PYBIND11_TYPE_CASTER
(
Type
,
const_name
<
(
Type
::
IsRowMajor
)
!=
0
>
(
"scipy.sparse.csr_matrix["
,
"scipy.sparse.csc_matrix["
)
+
npy_format_descriptor
<
Scalar
>::
name
+
const_name
(
"]"
));
};
PYBIND11_NAMESPACE_END
(
detail
)
PYBIND11_NAMESPACE_END
(
PYBIND11_NAMESPACE
)
patch/eigen/tensor.h
0 → 100644
View file @
5245f430
/*
pybind11/eigen/tensor.h: Transparent conversion for Eigen tensors
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../numpy.h"
#include "common.h"
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
static_assert
(
__GNUC__
>
5
,
"Eigen Tensor support in pybind11 requires GCC > 5.0"
);
#endif
// Disable warnings for Eigen
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC
(
4554
)
PYBIND11_WARNING_DISABLE_MSVC
(
4127
)
#if defined(__MINGW32__)
PYBIND11_WARNING_DISABLE_GCC
(
"-Wmaybe-uninitialized"
)
#endif
#include <unsupported/Eigen/CXX11/Tensor>
PYBIND11_WARNING_POP
static_assert
(
EIGEN_VERSION_AT_LEAST
(
3
,
3
,
0
),
"Eigen Tensor support in pybind11 requires Eigen >= 3.3.0"
);
PYBIND11_NAMESPACE_BEGIN
(
PYBIND11_NAMESPACE
)
PYBIND11_WARNING_DISABLE_MSVC
(
4127
)
PYBIND11_NAMESPACE_BEGIN
(
detail
)
inline
bool
is_tensor_aligned
(
const
void
*
data
)
{
return
(
reinterpret_cast
<
std
::
size_t
>
(
data
)
%
EIGEN_DEFAULT_ALIGN_BYTES
)
==
0
;
}
template
<
typename
T
>
constexpr
int
compute_array_flag_from_tensor
()
{
static_assert
((
static_cast
<
int
>
(
T
::
Layout
)
==
static_cast
<
int
>
(
Eigen
::
RowMajor
))
||
(
static_cast
<
int
>
(
T
::
Layout
)
==
static_cast
<
int
>
(
Eigen
::
ColMajor
)),
"Layout must be row or column major"
);
return
(
static_cast
<
int
>
(
T
::
Layout
)
==
static_cast
<
int
>
(
Eigen
::
RowMajor
))
?
array
::
c_style
:
array
::
f_style
;
}
template
<
typename
T
>
struct
eigen_tensor_helper
{};
template
<
typename
Scalar_
,
int
NumIndices_
,
int
Options_
,
typename
IndexType
>
struct
eigen_tensor_helper
<
Eigen
::
Tensor
<
Scalar_
,
NumIndices_
,
Options_
,
IndexType
>>
{
using
Type
=
Eigen
::
Tensor
<
Scalar_
,
NumIndices_
,
Options_
,
IndexType
>
;
using
ValidType
=
void
;
static
Eigen
::
DSizes
<
typename
Type
::
Index
,
Type
::
NumIndices
>
get_shape
(
const
Type
&
f
)
{
return
f
.
dimensions
();
}
static
constexpr
bool
is_correct_shape
(
const
Eigen
::
DSizes
<
typename
Type
::
Index
,
Type
::
NumIndices
>
&
/*shape*/
)
{
return
true
;
}
template
<
typename
T
>
struct
helper
{};
template
<
size_t
...
Is
>
struct
helper
<
index_sequence
<
Is
...
>>
{
static
constexpr
auto
value
=
concat
(
const_name
(((
void
)
Is
,
"?"
))...);
};
static
constexpr
auto
dimensions_descriptor
=
helper
<
decltype
(
make_index_sequence
<
Type
::
NumIndices
>
())
>::
value
;
template
<
typename
...
Args
>
static
Type
*
alloc
(
Args
&&
...
args
)
{
return
new
Type
(
std
::
forward
<
Args
>
(
args
)...);
}
static
void
free
(
Type
*
tensor
)
{
delete
tensor
;
}
};
template
<
typename
Scalar_
,
typename
std
::
ptrdiff_t
...
Indices
,
int
Options_
,
typename
IndexType
>
struct
eigen_tensor_helper
<
Eigen
::
TensorFixedSize
<
Scalar_
,
Eigen
::
Sizes
<
Indices
...
>
,
Options_
,
IndexType
>>
{
using
Type
=
Eigen
::
TensorFixedSize
<
Scalar_
,
Eigen
::
Sizes
<
Indices
...
>
,
Options_
,
IndexType
>
;
using
ValidType
=
void
;
static
constexpr
Eigen
::
DSizes
<
typename
Type
::
Index
,
Type
::
NumIndices
>
get_shape
(
const
Type
&
/*f*/
)
{
return
get_shape
();
}
static
constexpr
Eigen
::
DSizes
<
typename
Type
::
Index
,
Type
::
NumIndices
>
get_shape
()
{
return
Eigen
::
DSizes
<
typename
Type
::
Index
,
Type
::
NumIndices
>
(
Indices
...);
}
static
bool
is_correct_shape
(
const
Eigen
::
DSizes
<
typename
Type
::
Index
,
Type
::
NumIndices
>
&
shape
)
{
return
get_shape
()
==
shape
;
}
static
constexpr
auto
dimensions_descriptor
=
concat
(
const_name
<
Indices
>
()...);
template
<
typename
...
Args
>
static
Type
*
alloc
(
Args
&&
...
args
)
{
Eigen
::
aligned_allocator
<
Type
>
allocator
;
return
::
new
(
allocator
.
allocate
(
1
))
Type
(
std
::
forward
<
Args
>
(
args
)...);
}
static
void
free
(
Type
*
tensor
)
{
Eigen
::
aligned_allocator
<
Type
>
allocator
;
tensor
->~
Type
();
allocator
.
deallocate
(
tensor
,
1
);
}
};
template
<
typename
Type
,
bool
ShowDetails
,
bool
NeedsWriteable
=
false
>
struct
get_tensor_descriptor
{
static
constexpr
auto
details
=
const_name
<
NeedsWriteable
>
(
", flags.writeable"
,
""
)
+
const_name
<
static_cast
<
int
>
(
Type
::
Layout
)
==
static_cast
<
int
>
(
Eigen
::
RowMajor
)
>
(
", flags.c_contiguous"
,
", flags.f_contiguous"
);
static
constexpr
auto
value
=
const_name
(
"numpy.ndarray["
)
+
npy_format_descriptor
<
typename
Type
::
Scalar
>::
name
+
const_name
(
"["
)
+
eigen_tensor_helper
<
remove_cv_t
<
Type
>>::
dimensions_descriptor
+
const_name
(
"]"
)
+
const_name
<
ShowDetails
>
(
details
,
const_name
(
""
))
+
const_name
(
"]"
);
};
// When EIGEN_AVOID_STL_ARRAY is defined, Eigen::DSizes<T, 0> does not have the begin() member
// function. Falling back to a simple loop works around this issue.
//
// We need to disable the type-limits warning for the inner loop when size = 0.
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_GCC
(
"-Wtype-limits"
)
template
<
typename
T
,
int
size
>
std
::
vector
<
T
>
convert_dsizes_to_vector
(
const
Eigen
::
DSizes
<
T
,
size
>
&
arr
)
{
std
::
vector
<
T
>
result
(
size
);
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
result
[
i
]
=
arr
[
i
];
}
return
result
;
}
template
<
typename
T
,
int
size
>
Eigen
::
DSizes
<
T
,
size
>
get_shape_for_array
(
const
array
&
arr
)
{
Eigen
::
DSizes
<
T
,
size
>
result
;
const
T
*
shape
=
arr
.
shape
();
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
result
[
i
]
=
shape
[
i
];
}
return
result
;
}
PYBIND11_WARNING_POP
template
<
typename
Type
>
struct
type_caster
<
Type
,
typename
eigen_tensor_helper
<
Type
>::
ValidType
>
{
static_assert
(
!
std
::
is_pointer
<
typename
Type
::
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
using
Helper
=
eigen_tensor_helper
<
Type
>
;
static
constexpr
auto
temp_name
=
get_tensor_descriptor
<
Type
,
false
>::
value
;
PYBIND11_TYPE_CASTER
(
Type
,
temp_name
);
bool
load
(
handle
src
,
bool
convert
)
{
if
(
!
convert
)
{
if
(
!
isinstance
<
array
>
(
src
))
{
return
false
;
}
array
temp
=
array
::
ensure
(
src
);
if
(
!
temp
)
{
return
false
;
}
if
(
!
temp
.
dtype
().
is
(
dtype
::
of
<
typename
Type
::
Scalar
>
()))
{
return
false
;
}
}
array_t
<
typename
Type
::
Scalar
,
compute_array_flag_from_tensor
<
Type
>
()
>
arr
(
reinterpret_borrow
<
object
>
(
src
));
if
(
arr
.
ndim
()
!=
Type
::
NumIndices
)
{
return
false
;
}
auto
shape
=
get_shape_for_array
<
typename
Type
::
Index
,
Type
::
NumIndices
>
(
arr
);
if
(
!
Helper
::
is_correct_shape
(
shape
))
{
return
false
;
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
auto
data_pointer
=
arr
.
data
();
#else
// Handle Eigen bug
auto
data_pointer
=
const_cast
<
typename
Type
::
Scalar
*>
(
arr
.
data
());
#endif
if
(
is_tensor_aligned
(
arr
.
data
()))
{
value
=
Eigen
::
TensorMap
<
const
Type
,
Eigen
::
Aligned
>
(
data_pointer
,
shape
);
}
else
{
value
=
Eigen
::
TensorMap
<
const
Type
>
(
data_pointer
,
shape
);
}
return
true
;
}
static
handle
cast
(
Type
&&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
reference
||
policy
==
return_value_policy
::
reference_internal
)
{
pybind11_fail
(
"Cannot use a reference return value policy for an rvalue"
);
}
return
cast_impl
(
&
src
,
return_value_policy
::
move
,
parent
);
}
static
handle
cast
(
const
Type
&&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
reference
||
policy
==
return_value_policy
::
reference_internal
)
{
pybind11_fail
(
"Cannot use a reference return value policy for an rvalue"
);
}
return
cast_impl
(
&
src
,
return_value_policy
::
move
,
parent
);
}
static
handle
cast
(
Type
&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
||
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
copy
;
}
return
cast_impl
(
&
src
,
policy
,
parent
);
}
static
handle
cast
(
const
Type
&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
||
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
copy
;
}
return
cast
(
&
src
,
policy
,
parent
);
}
static
handle
cast
(
Type
*
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
)
{
policy
=
return_value_policy
::
take_ownership
;
}
else
if
(
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
reference
;
}
return
cast_impl
(
src
,
policy
,
parent
);
}
static
handle
cast
(
const
Type
*
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
)
{
policy
=
return_value_policy
::
take_ownership
;
}
else
if
(
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
reference
;
}
return
cast_impl
(
src
,
policy
,
parent
);
}
template
<
typename
C
>
static
handle
cast_impl
(
C
*
src
,
return_value_policy
policy
,
handle
parent
)
{
object
parent_object
;
bool
writeable
=
false
;
switch
(
policy
)
{
case
return_value_policy
::
move
:
if
(
std
::
is_const
<
C
>::
value
)
{
pybind11_fail
(
"Cannot move from a constant reference"
);
}
src
=
Helper
::
alloc
(
std
::
move
(
*
src
));
parent_object
=
capsule
(
src
,
[](
void
*
ptr
)
{
Helper
::
free
(
reinterpret_cast
<
Type
*>
(
ptr
));
});
writeable
=
true
;
break
;
case
return_value_policy
::
take_ownership
:
if
(
std
::
is_const
<
C
>::
value
)
{
// This cast is ugly, and might be UB in some cases, but we don't have an
// alternative here as we must free that memory
Helper
::
free
(
const_cast
<
Type
*>
(
src
));
pybind11_fail
(
"Cannot take ownership of a const reference"
);
}
parent_object
=
capsule
(
src
,
[](
void
*
ptr
)
{
Helper
::
free
(
reinterpret_cast
<
Type
*>
(
ptr
));
});
writeable
=
true
;
break
;
case
return_value_policy
::
copy
:
writeable
=
true
;
break
;
case
return_value_policy
::
reference
:
parent_object
=
none
();
writeable
=
!
std
::
is_const
<
C
>::
value
;
break
;
case
return_value_policy
::
reference_internal
:
// Default should do the right thing
if
(
!
parent
)
{
pybind11_fail
(
"Cannot use reference internal when there is no parent"
);
}
parent_object
=
reinterpret_borrow
<
object
>
(
parent
);
writeable
=
!
std
::
is_const
<
C
>::
value
;
break
;
default:
pybind11_fail
(
"pybind11 bug in eigen.h, please file a bug report"
);
}
auto
result
=
array_t
<
typename
Type
::
Scalar
,
compute_array_flag_from_tensor
<
Type
>
()
>
(
convert_dsizes_to_vector
(
Helper
::
get_shape
(
*
src
)),
src
->
data
(),
parent_object
);
if
(
!
writeable
)
{
array_proxy
(
result
.
ptr
())
->
flags
&=
~
detail
::
npy_api
::
NPY_ARRAY_WRITEABLE_
;
}
return
result
.
release
();
}
};
template
<
typename
StoragePointerType
,
bool
needs_writeable
,
enable_if_t
<!
needs_writeable
,
bool
>
=
true
>
StoragePointerType
get_array_data_for_type
(
array
&
arr
)
{
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
return
reinterpret_cast
<
StoragePointerType
>
(
arr
.
data
());
#else
// Handle Eigen bug
return
reinterpret_cast
<
StoragePointerType
>
(
const_cast
<
void
*>
(
arr
.
data
()));
#endif
}
template
<
typename
StoragePointerType
,
bool
needs_writeable
,
enable_if_t
<
needs_writeable
,
bool
>
=
true
>
StoragePointerType
get_array_data_for_type
(
array
&
arr
)
{
return
reinterpret_cast
<
StoragePointerType
>
(
arr
.
mutable_data
());
}
template
<
typename
T
,
typename
=
void
>
struct
get_storage_pointer_type
;
template
<
typename
MapType
>
struct
get_storage_pointer_type
<
MapType
,
void_t
<
typename
MapType
::
StoragePointerType
>>
{
using
SPT
=
typename
MapType
::
StoragePointerType
;
};
template
<
typename
MapType
>
struct
get_storage_pointer_type
<
MapType
,
void_t
<
typename
MapType
::
PointerArgType
>>
{
using
SPT
=
typename
MapType
::
PointerArgType
;
};
template
<
typename
Type
,
int
Options
>
struct
type_caster
<
Eigen
::
TensorMap
<
Type
,
Options
>
,
typename
eigen_tensor_helper
<
remove_cv_t
<
Type
>>::
ValidType
>
{
static_assert
(
!
std
::
is_pointer
<
typename
Type
::
Scalar
>::
value
,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED
);
using
MapType
=
Eigen
::
TensorMap
<
Type
,
Options
>
;
using
Helper
=
eigen_tensor_helper
<
remove_cv_t
<
Type
>>
;
bool
load
(
handle
src
,
bool
/*convert*/
)
{
// Note that we have a lot more checks here as we want to make sure to avoid copies
if
(
!
isinstance
<
array
>
(
src
))
{
return
false
;
}
auto
arr
=
reinterpret_borrow
<
array
>
(
src
);
if
((
arr
.
flags
()
&
compute_array_flag_from_tensor
<
Type
>
())
==
0
)
{
return
false
;
}
if
(
!
arr
.
dtype
().
is
(
dtype
::
of
<
typename
Type
::
Scalar
>
()))
{
return
false
;
}
if
(
arr
.
ndim
()
!=
Type
::
NumIndices
)
{
return
false
;
}
constexpr
bool
is_aligned
=
(
Options
&
Eigen
::
Aligned
)
!=
0
;
if
(
is_aligned
&&
!
is_tensor_aligned
(
arr
.
data
()))
{
return
false
;
}
auto
shape
=
get_shape_for_array
<
typename
Type
::
Index
,
Type
::
NumIndices
>
(
arr
);
if
(
!
Helper
::
is_correct_shape
(
shape
))
{
return
false
;
}
if
(
needs_writeable
&&
!
arr
.
writeable
())
{
return
false
;
}
auto
result
=
get_array_data_for_type
<
typename
get_storage_pointer_type
<
MapType
>::
SPT
,
needs_writeable
>
(
arr
);
value
.
reset
(
new
MapType
(
std
::
move
(
result
),
std
::
move
(
shape
)));
return
true
;
}
static
handle
cast
(
MapType
&&
src
,
return_value_policy
policy
,
handle
parent
)
{
return
cast_impl
(
&
src
,
policy
,
parent
);
}
static
handle
cast
(
const
MapType
&&
src
,
return_value_policy
policy
,
handle
parent
)
{
return
cast_impl
(
&
src
,
policy
,
parent
);
}
static
handle
cast
(
MapType
&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
||
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
copy
;
}
return
cast_impl
(
&
src
,
policy
,
parent
);
}
static
handle
cast
(
const
MapType
&
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
||
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
copy
;
}
return
cast
(
&
src
,
policy
,
parent
);
}
static
handle
cast
(
MapType
*
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
)
{
policy
=
return_value_policy
::
take_ownership
;
}
else
if
(
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
reference
;
}
return
cast_impl
(
src
,
policy
,
parent
);
}
static
handle
cast
(
const
MapType
*
src
,
return_value_policy
policy
,
handle
parent
)
{
if
(
policy
==
return_value_policy
::
automatic
)
{
policy
=
return_value_policy
::
take_ownership
;
}
else
if
(
policy
==
return_value_policy
::
automatic_reference
)
{
policy
=
return_value_policy
::
reference
;
}
return
cast_impl
(
src
,
policy
,
parent
);
}
template
<
typename
C
>
static
handle
cast_impl
(
C
*
src
,
return_value_policy
policy
,
handle
parent
)
{
object
parent_object
;
constexpr
bool
writeable
=
!
std
::
is_const
<
C
>::
value
;
switch
(
policy
)
{
case
return_value_policy
::
reference
:
parent_object
=
none
();
break
;
case
return_value_policy
::
reference_internal
:
// Default should do the right thing
if
(
!
parent
)
{
pybind11_fail
(
"Cannot use reference internal when there is no parent"
);
}
parent_object
=
reinterpret_borrow
<
object
>
(
parent
);
break
;
case
return_value_policy
::
take_ownership
:
delete
src
;
// fallthrough
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail
(
"Invalid return_value_policy for Eigen Map type, must be either "
"reference or reference_internal"
);
}
auto
result
=
array_t
<
typename
Type
::
Scalar
,
compute_array_flag_from_tensor
<
Type
>
()
>
(
convert_dsizes_to_vector
(
Helper
::
get_shape
(
*
src
)),
src
->
data
(),
std
::
move
(
parent_object
));
if
(
!
writeable
)
{
array_proxy
(
result
.
ptr
())
->
flags
&=
~
detail
::
npy_api
::
NPY_ARRAY_WRITEABLE_
;
}
return
result
.
release
();
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
static
constexpr
bool
needs_writeable
=
!
std
::
is_const
<
typename
std
::
remove_pointer
<
typename
get_storage_pointer_type
<
MapType
>::
SPT
>::
type
>::
value
;
#else
// Handle Eigen bug
static
constexpr
bool
needs_writeable
=
!
std
::
is_const
<
Type
>::
value
;
#endif
protected:
// TODO: Move to std::optional once std::optional has more support
std
::
unique_ptr
<
MapType
>
value
;
public:
static
constexpr
auto
name
=
get_tensor_descriptor
<
Type
,
true
,
needs_writeable
>::
value
;
explicit
operator
MapType
*
()
{
return
value
.
get
();
}
explicit
operator
MapType
&
()
{
return
*
value
;
}
explicit
operator
MapType
&&
()
&&
{
return
std
::
move
(
*
value
);
}
template
<
typename
T_
>
using
cast_op_type
=
::
pybind11
::
detail
::
movable_cast_op_type
<
T_
>
;
};
PYBIND11_NAMESPACE_END
(
detail
)
PYBIND11_NAMESPACE_END
(
PYBIND11_NAMESPACE
)
readme.md
View file @
5245f430
...
...
@@ -17,8 +17,14 @@ pip install .
```
#### Trouble Shooting
*
`fatal error: eigen/matrix.h: No such file or directory`
:
This is a known issue for
`torch==2.1.0`
and
`torch==2.1.1`
(https://github.com/pytorch/pytorch/issues/112841). Please avoid using this two versions.
**`fatal error: eigen/matrix.h: No such file or directory`**
This is a known issue for
`torch==2.1.0`
and
`torch==2.1.1`
(https://github.com/pytorch/pytorch/issues/112841).
To patch up these two versions, clone this repository, and copy
`patch/eigen`
to your pytorch include directory:
```
bash
# for example, if you are using anaconda (assume base env)
cp
-r
patch/eigen ~/anaconda3/lib/python3.9/site-packages/torch/include/pybind11/
```
### Usage
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment