Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-spline-conv
Commits
1e216a77
Commit
1e216a77
authored
May 01, 2019
by
rusty1s
Browse files
pytorch 1.1.0 update
parent
2862a818
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
101 additions
and
93 deletions
+101
-93
.travis.yml
.travis.yml
+3
-3
README.md
README.md
+2
-2
cpu/basis.cpp
cpu/basis.cpp
+68
-62
cpu/weighting.cpp
cpu/weighting.cpp
+4
-4
cuda/basis_kernel.cu
cuda/basis_kernel.cu
+18
-16
cuda/weighting_kernel.cu
cuda/weighting_kernel.cu
+4
-4
setup.py
setup.py
+1
-1
torch_spline_conv/__init__.py
torch_spline_conv/__init__.py
+1
-1
No files found.
.travis.yml
View file @
1e216a77
...
@@ -17,9 +17,9 @@ before_install:
...
@@ -17,9 +17,9 @@ before_install:
-
export CC="gcc-4.9"
-
export CC="gcc-4.9"
-
export CXX="g++-4.9"
-
export CXX="g++-4.9"
install
:
install
:
-
if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
0
.0-cp27-cp27mu-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
1
.0-cp27-cp27mu-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
0
.0-cp35-cp35m-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
1
.0-cp35-cp35m-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
0
.0-cp36-cp36m-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
1
.0-cp36-cp36m-linux_x86_64.whl; fi
-
pip install pycodestyle
-
pip install pycodestyle
-
pip install flake8
-
pip install flake8
-
pip install codecov
-
pip install codecov
...
...
README.md
View file @
1e216a77
...
@@ -21,11 +21,11 @@ The operator works on all floating point data types and is implemented both for
...
@@ -21,11 +21,11 @@ The operator works on all floating point data types and is implemented both for
## Installation
## Installation
Ensure that at least PyTorch 1.
0
.0 is installed and verify that
`cuda/bin`
and
`cuda/include`
are in your
`$PATH`
and
`$CPATH`
respectively,
*e.g.*
:
Ensure that at least PyTorch 1.
1
.0 is installed and verify that
`cuda/bin`
and
`cuda/include`
are in your
`$PATH`
and
`$CPATH`
respectively,
*e.g.*
:
```
```
$ python -c "import torch; print(torch.__version__)"
$ python -c "import torch; print(torch.__version__)"
>>> 1.
0
.0
>>> 1.
1
.0
$ echo $PATH
$ echo $PATH
>>> /usr/local/cuda/bin:...
>>> /usr/local/cuda/bin:...
...
...
cpu/basis.cpp
View file @
1e216a77
...
@@ -32,7 +32,8 @@ template <typename scalar_t> inline scalar_t cubic(scalar_t v, int64_t k_mod) {
...
@@ -32,7 +32,8 @@ template <typename scalar_t> inline scalar_t cubic(scalar_t v, int64_t k_mod) {
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
\
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_forward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
PSEUDO.scalar_type(), "basis_forward_##M", [&] { \
auto pseudo_data = PSEUDO.data<scalar_t>(); \
auto pseudo_data = PSEUDO.data<scalar_t>(); \
auto kernel_size_data = KERNEL_SIZE.data<int64_t>(); \
auto kernel_size_data = KERNEL_SIZE.data<int64_t>(); \
auto is_open_spline_data = IS_OPEN_SPLINE.data<uint8_t>(); \
auto is_open_spline_data = IS_OPEN_SPLINE.data<uint8_t>(); \
...
@@ -52,10 +53,12 @@ template <typename scalar_t> inline scalar_t cubic(scalar_t v, int64_t k_mod) {
...
@@ -52,10 +53,12 @@ template <typename scalar_t> inline scalar_t cubic(scalar_t v, int64_t k_mod) {
auto k_mod = k % (M + 1); \
auto k_mod = k % (M + 1); \
k /= M + 1; \
k /= M + 1; \
\
\
auto v = pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
auto v = \
pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
v *= kernel_size_data[d] - M * is_open_spline_data[d]; \
v *= kernel_size_data[d] - M * is_open_spline_data[d]; \
\
\
wi += (((int64_t)v + k_mod) % kernel_size_data[d]) * wi_offset; \
wi += \
(((int64_t)v + k_mod) % kernel_size_data[d]) * wi_offset; \
wi_offset *= kernel_size_data[d]; \
wi_offset *= kernel_size_data[d]; \
\
\
v -= floor(v); \
v -= floor(v); \
...
@@ -121,7 +124,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
...
@@ -121,7 +124,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
auto S = GRAD_BASIS.size(1); \
auto S = GRAD_BASIS.size(1); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
\
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_backward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
PSEUDO.scalar_type(), "basis_backward_##M", [&] { \
auto grad_basis_data = GRAD_BASIS.data<scalar_t>(); \
auto grad_basis_data = GRAD_BASIS.data<scalar_t>(); \
auto pseudo_data = PSEUDO.data<scalar_t>(); \
auto pseudo_data = PSEUDO.data<scalar_t>(); \
auto kernel_size_data = KERNEL_SIZE.data<int64_t>(); \
auto kernel_size_data = KERNEL_SIZE.data<int64_t>(); \
...
@@ -135,7 +139,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
...
@@ -135,7 +139,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
g = 0; \
g = 0; \
for (ptrdiff_t s = 0; s < S; s++) { \
for (ptrdiff_t s = 0; s < S; s++) { \
auto k_mod = (s / (int64_t)(pow(M + 1, d) + 0.5)) % (M + 1); \
auto k_mod = (s / (int64_t)(pow(M + 1, d) + 0.5)) % (M + 1); \
auto v = pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
auto v = \
pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
v *= kernel_size_data[d] - M * is_open_spline_data[d]; \
v *= kernel_size_data[d] - M * is_open_spline_data[d]; \
v -= floor(v); \
v -= floor(v); \
v = GRAD_FUNC<scalar_t>(v, k_mod); \
v = GRAD_FUNC<scalar_t>(v, k_mod); \
...
@@ -146,7 +151,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
...
@@ -146,7 +151,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
k_mod = (s / (int64_t)(pow(M + 1, d_new) + 0.5)) % (M + 1); \
k_mod = (s / (int64_t)(pow(M + 1, d_new) + 0.5)) % (M + 1); \
v = pseudo_data[e * pseudo.stride(0) + \
v = pseudo_data[e * pseudo.stride(0) + \
d_new * pseudo.stride(1)]; \
d_new * pseudo.stride(1)]; \
v *= kernel_size_data[d_new] - M * is_open_spline_data[d_new]; \
v *= kernel_size_data[d_new] - \
M * is_open_spline_data[d_new]; \
v -= floor(v); \
v -= floor(v); \
v = FUNC<scalar_t>(v, k_mod); \
v = FUNC<scalar_t>(v, k_mod); \
tmp *= v; \
tmp *= v; \
...
...
cpu/weighting.cpp
View file @
1e216a77
...
@@ -6,7 +6,7 @@ at::Tensor weighting_fw(at::Tensor x, at::Tensor weight, at::Tensor basis,
...
@@ -6,7 +6,7 @@ at::Tensor weighting_fw(at::Tensor x, at::Tensor weight, at::Tensor basis,
auto
S
=
basis
.
size
(
1
);
auto
S
=
basis
.
size
(
1
);
auto
out
=
at
::
empty
({
E
,
M_out
},
x
.
options
());
auto
out
=
at
::
empty
({
E
,
M_out
},
x
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
out
.
type
(),
"weighting_fw"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
out
.
scalar_
type
(),
"weighting_fw"
,
[
&
]
{
auto
x_data
=
x
.
data
<
scalar_t
>
();
auto
x_data
=
x
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
auto
basis_data
=
basis
.
data
<
scalar_t
>
();
auto
basis_data
=
basis
.
data
<
scalar_t
>
();
...
@@ -43,7 +43,7 @@ at::Tensor weighting_bw_x(at::Tensor grad_out, at::Tensor weight,
...
@@ -43,7 +43,7 @@ at::Tensor weighting_bw_x(at::Tensor grad_out, at::Tensor weight,
auto
S
=
basis
.
size
(
1
);
auto
S
=
basis
.
size
(
1
);
auto
grad_x
=
at
::
zeros
({
E
,
M_in
},
grad_out
.
options
());
auto
grad_x
=
at
::
zeros
({
E
,
M_in
},
grad_out
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"weighting_bw_x"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
scalar_
type
(),
"weighting_bw_x"
,
[
&
]
{
auto
grad_out_data
=
grad_out
.
data
<
scalar_t
>
();
auto
grad_out_data
=
grad_out
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
auto
basis_data
=
basis
.
data
<
scalar_t
>
();
auto
basis_data
=
basis
.
data
<
scalar_t
>
();
...
@@ -77,7 +77,7 @@ at::Tensor weighting_bw_w(at::Tensor grad_out, at::Tensor x, at::Tensor basis,
...
@@ -77,7 +77,7 @@ at::Tensor weighting_bw_w(at::Tensor grad_out, at::Tensor x, at::Tensor basis,
auto
S
=
basis
.
size
(
1
);
auto
S
=
basis
.
size
(
1
);
auto
grad_weight
=
at
::
zeros
({
K
,
M_in
,
M_out
},
grad_out
.
options
());
auto
grad_weight
=
at
::
zeros
({
K
,
M_in
,
M_out
},
grad_out
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"weighting_bw_w"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
scalar_
type
(),
"weighting_bw_w"
,
[
&
]
{
auto
grad_out_data
=
grad_out
.
data
<
scalar_t
>
();
auto
grad_out_data
=
grad_out
.
data
<
scalar_t
>
();
auto
x_data
=
x
.
data
<
scalar_t
>
();
auto
x_data
=
x
.
data
<
scalar_t
>
();
auto
basis_data
=
basis
.
data
<
scalar_t
>
();
auto
basis_data
=
basis
.
data
<
scalar_t
>
();
...
@@ -109,7 +109,7 @@ at::Tensor weighting_bw_b(at::Tensor grad_out, at::Tensor x, at::Tensor weight,
...
@@ -109,7 +109,7 @@ at::Tensor weighting_bw_b(at::Tensor grad_out, at::Tensor x, at::Tensor weight,
auto
S
=
weight_index
.
size
(
1
);
auto
S
=
weight_index
.
size
(
1
);
auto
grad_basis
=
at
::
zeros
({
E
,
S
},
grad_out
.
options
());
auto
grad_basis
=
at
::
zeros
({
E
,
S
},
grad_out
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"weighting_bw_b"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
scalar_
type
(),
"weighting_bw_b"
,
[
&
]
{
auto
grad_out_data
=
grad_out
.
data
<
scalar_t
>
();
auto
grad_out_data
=
grad_out
.
data
<
scalar_t
>
();
auto
x_data
=
x
.
data
<
scalar_t
>
();
auto
x_data
=
x
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
...
...
cuda/basis_kernel.cu
View file @
1e216a77
...
@@ -39,7 +39,8 @@ template <typename scalar_t> struct BasisForward {
...
@@ -39,7 +39,8 @@ template <typename scalar_t> struct BasisForward {
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
\
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_forward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
PSEUDO.scalar_type(), "basis_forward_##M", [&] { \
KERNEL_NAME<scalar_t><<<BLOCKS(basis.numel()), THREADS>>>( \
KERNEL_NAME<scalar_t><<<BLOCKS(basis.numel()), THREADS>>>( \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), \
at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), \
at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), \
...
@@ -169,7 +170,8 @@ template <typename scalar_t> struct BasisBackward {
...
@@ -169,7 +170,8 @@ template <typename scalar_t> struct BasisBackward {
auto D = PSEUDO.size(1); \
auto D = PSEUDO.size(1); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
\
\
AT_DISPATCH_FLOATING_TYPES(GRAD_BASIS.type(), "basis_backward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
GRAD_BASIS.scalar_type(), "basis_backward_##M", [&] { \
KERNEL_NAME<scalar_t><<<BLOCKS(grad_pseudo.numel()), THREADS>>>( \
KERNEL_NAME<scalar_t><<<BLOCKS(grad_pseudo.numel()), THREADS>>>( \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_pseudo), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_pseudo), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(GRAD_BASIS), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(GRAD_BASIS), \
...
...
cuda/weighting_kernel.cu
View file @
1e216a77
...
@@ -42,7 +42,7 @@ at::Tensor weighting_fw_cuda(at::Tensor x, at::Tensor weight, at::Tensor basis,
...
@@ -42,7 +42,7 @@ at::Tensor weighting_fw_cuda(at::Tensor x, at::Tensor weight, at::Tensor basis,
cudaSetDevice
(
x
.
get_device
());
cudaSetDevice
(
x
.
get_device
());
auto
E
=
x
.
size
(
0
),
M_out
=
weight
.
size
(
2
);
auto
E
=
x
.
size
(
0
),
M_out
=
weight
.
size
(
2
);
auto
out
=
at
::
empty
({
E
,
M_out
},
x
.
options
());
auto
out
=
at
::
empty
({
E
,
M_out
},
x
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
out
.
type
(),
"weighting_fw"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
out
.
scalar_
type
(),
"weighting_fw"
,
[
&
]
{
weighting_fw_kernel
<
scalar_t
><<<
BLOCKS
(
out
.
numel
()),
THREADS
>>>
(
weighting_fw_kernel
<
scalar_t
><<<
BLOCKS
(
out
.
numel
()),
THREADS
>>>
(
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
out
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
out
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
x
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
x
),
...
@@ -91,7 +91,7 @@ at::Tensor weighting_bw_x_cuda(at::Tensor grad_out, at::Tensor weight,
...
@@ -91,7 +91,7 @@ at::Tensor weighting_bw_x_cuda(at::Tensor grad_out, at::Tensor weight,
auto
E
=
grad_out
.
size
(
0
),
M_in
=
weight
.
size
(
1
);
auto
E
=
grad_out
.
size
(
0
),
M_in
=
weight
.
size
(
1
);
auto
grad_x
=
at
::
empty
({
E
,
M_in
},
grad_out
.
options
());
auto
grad_x
=
at
::
empty
({
E
,
M_in
},
grad_out
.
options
());
weight
=
weight
.
transpose
(
1
,
2
).
contiguous
();
weight
=
weight
.
transpose
(
1
,
2
).
contiguous
();
AT_DISPATCH_FLOATING_TYPES
(
grad_x
.
type
(),
"weighting_bw_x"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
grad_x
.
scalar_
type
(),
"weighting_bw_x"
,
[
&
]
{
weighting_bw_x_kernel
<
scalar_t
><<<
BLOCKS
(
grad_x
.
numel
()),
THREADS
>>>
(
weighting_bw_x_kernel
<
scalar_t
><<<
BLOCKS
(
grad_x
.
numel
()),
THREADS
>>>
(
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_x
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_x
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_out
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_out
),
...
@@ -136,7 +136,7 @@ at::Tensor weighting_bw_w_cuda(at::Tensor grad_out, at::Tensor x,
...
@@ -136,7 +136,7 @@ at::Tensor weighting_bw_w_cuda(at::Tensor grad_out, at::Tensor x,
cudaSetDevice
(
grad_out
.
get_device
());
cudaSetDevice
(
grad_out
.
get_device
());
auto
M_in
=
x
.
size
(
1
),
M_out
=
grad_out
.
size
(
1
);
auto
M_in
=
x
.
size
(
1
),
M_out
=
grad_out
.
size
(
1
);
auto
grad_weight
=
at
::
zeros
({
K
,
M_in
,
M_out
},
grad_out
.
options
());
auto
grad_weight
=
at
::
zeros
({
K
,
M_in
,
M_out
},
grad_out
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"weighting_bw_w"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
scalar_
type
(),
"weighting_bw_w"
,
[
&
]
{
weighting_bw_w_kernel
<
scalar_t
><<<
BLOCKS
(
grad_out
.
numel
()),
THREADS
>>>
(
weighting_bw_w_kernel
<
scalar_t
><<<
BLOCKS
(
grad_out
.
numel
()),
THREADS
>>>
(
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_weight
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_weight
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_out
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_out
),
...
@@ -181,7 +181,7 @@ at::Tensor weighting_bw_b_cuda(at::Tensor grad_out, at::Tensor x,
...
@@ -181,7 +181,7 @@ at::Tensor weighting_bw_b_cuda(at::Tensor grad_out, at::Tensor x,
cudaSetDevice
(
grad_out
.
get_device
());
cudaSetDevice
(
grad_out
.
get_device
());
auto
E
=
x
.
size
(
0
),
S
=
weight_index
.
size
(
1
);
auto
E
=
x
.
size
(
0
),
S
=
weight_index
.
size
(
1
);
auto
grad_basis
=
at
::
zeros
({
E
,
S
},
grad_out
.
options
());
auto
grad_basis
=
at
::
zeros
({
E
,
S
},
grad_out
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"weighting_bw_b"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
grad_out
.
scalar_
type
(),
"weighting_bw_b"
,
[
&
]
{
weighting_bw_b_kernel
<
scalar_t
><<<
BLOCKS
(
grad_out
.
numel
()),
THREADS
>>>
(
weighting_bw_b_kernel
<
scalar_t
><<<
BLOCKS
(
grad_out
.
numel
()),
THREADS
>>>
(
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_basis
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_basis
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_out
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
grad_out
),
...
...
setup.py
View file @
1e216a77
...
@@ -16,7 +16,7 @@ if CUDA_HOME is not None:
...
@@ -16,7 +16,7 @@ if CUDA_HOME is not None:
[
'cuda/weighting.cpp'
,
'cuda/weighting_kernel.cu'
]),
[
'cuda/weighting.cpp'
,
'cuda/weighting_kernel.cu'
]),
]
]
__version__
=
'1.
0.6
'
__version__
=
'1.
1.0
'
url
=
'https://github.com/rusty1s/pytorch_spline_conv'
url
=
'https://github.com/rusty1s/pytorch_spline_conv'
install_requires
=
[]
install_requires
=
[]
...
...
torch_spline_conv/__init__.py
View file @
1e216a77
...
@@ -2,6 +2,6 @@ from .basis import SplineBasis
...
@@ -2,6 +2,6 @@ from .basis import SplineBasis
from
.weighting
import
SplineWeighting
from
.weighting
import
SplineWeighting
from
.conv
import
SplineConv
from
.conv
import
SplineConv
__version__
=
'1.
0.6
'
__version__
=
'1.
1.0
'
__all__
=
[
'SplineBasis'
,
'SplineWeighting'
,
'SplineConv'
,
'__version__'
]
__all__
=
[
'SplineBasis'
,
'SplineWeighting'
,
'SplineConv'
,
'__version__'
]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment