Commit 1e216a77 authored by rusty1s's avatar rusty1s
Browse files

pytorch 1.1.0 update

parent 2862a818
......@@ -17,9 +17,9 @@ before_install:
- export CC="gcc-4.9"
- export CXX="g++-4.9"
install:
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp27-cp27mu-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp35-cp35m-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp36-cp36m-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp27-cp27mu-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp35-cp35m-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl; fi
- pip install pycodestyle
- pip install flake8
- pip install codecov
......
......@@ -21,11 +21,11 @@ The operator works on all floating point data types and is implemented both for
## Installation
Ensure that at least PyTorch 1.0.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
Ensure that at least PyTorch 1.1.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
```
$ python -c "import torch; print(torch.__version__)"
>>> 1.0.0
>>> 1.1.0
$ echo $PATH
>>> /usr/local/cuda/bin:...
......
......@@ -32,7 +32,8 @@ template <typename scalar_t> inline scalar_t cubic(scalar_t v, int64_t k_mod) {
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_forward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
PSEUDO.scalar_type(), "basis_forward_##M", [&] { \
auto pseudo_data = PSEUDO.data<scalar_t>(); \
auto kernel_size_data = KERNEL_SIZE.data<int64_t>(); \
auto is_open_spline_data = IS_OPEN_SPLINE.data<uint8_t>(); \
......@@ -52,10 +53,12 @@ template <typename scalar_t> inline scalar_t cubic(scalar_t v, int64_t k_mod) {
auto k_mod = k % (M + 1); \
k /= M + 1; \
\
auto v = pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
auto v = \
pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
v *= kernel_size_data[d] - M * is_open_spline_data[d]; \
\
wi += (((int64_t)v + k_mod) % kernel_size_data[d]) * wi_offset; \
wi += \
(((int64_t)v + k_mod) % kernel_size_data[d]) * wi_offset; \
wi_offset *= kernel_size_data[d]; \
\
v -= floor(v); \
......@@ -121,7 +124,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
auto S = GRAD_BASIS.size(1); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_backward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
PSEUDO.scalar_type(), "basis_backward_##M", [&] { \
auto grad_basis_data = GRAD_BASIS.data<scalar_t>(); \
auto pseudo_data = PSEUDO.data<scalar_t>(); \
auto kernel_size_data = KERNEL_SIZE.data<int64_t>(); \
......@@ -135,7 +139,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
g = 0; \
for (ptrdiff_t s = 0; s < S; s++) { \
auto k_mod = (s / (int64_t)(pow(M + 1, d) + 0.5)) % (M + 1); \
auto v = pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
auto v = \
pseudo_data[e * pseudo.stride(0) + d * pseudo.stride(1)]; \
v *= kernel_size_data[d] - M * is_open_spline_data[d]; \
v -= floor(v); \
v = GRAD_FUNC<scalar_t>(v, k_mod); \
......@@ -146,7 +151,8 @@ inline scalar_t grad_cubic(scalar_t v, int64_t k_mod) {
k_mod = (s / (int64_t)(pow(M + 1, d_new) + 0.5)) % (M + 1); \
v = pseudo_data[e * pseudo.stride(0) + \
d_new * pseudo.stride(1)]; \
v *= kernel_size_data[d_new] - M * is_open_spline_data[d_new]; \
v *= kernel_size_data[d_new] - \
M * is_open_spline_data[d_new]; \
v -= floor(v); \
v = FUNC<scalar_t>(v, k_mod); \
tmp *= v; \
......
......@@ -6,7 +6,7 @@ at::Tensor weighting_fw(at::Tensor x, at::Tensor weight, at::Tensor basis,
auto S = basis.size(1);
auto out = at::empty({E, M_out}, x.options());
AT_DISPATCH_FLOATING_TYPES(out.type(), "weighting_fw", [&] {
AT_DISPATCH_FLOATING_TYPES(out.scalar_type(), "weighting_fw", [&] {
auto x_data = x.data<scalar_t>();
auto weight_data = weight.data<scalar_t>();
auto basis_data = basis.data<scalar_t>();
......@@ -43,7 +43,7 @@ at::Tensor weighting_bw_x(at::Tensor grad_out, at::Tensor weight,
auto S = basis.size(1);
auto grad_x = at::zeros({E, M_in}, grad_out.options());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "weighting_bw_x", [&] {
AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_x", [&] {
auto grad_out_data = grad_out.data<scalar_t>();
auto weight_data = weight.data<scalar_t>();
auto basis_data = basis.data<scalar_t>();
......@@ -77,7 +77,7 @@ at::Tensor weighting_bw_w(at::Tensor grad_out, at::Tensor x, at::Tensor basis,
auto S = basis.size(1);
auto grad_weight = at::zeros({K, M_in, M_out}, grad_out.options());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "weighting_bw_w", [&] {
AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_w", [&] {
auto grad_out_data = grad_out.data<scalar_t>();
auto x_data = x.data<scalar_t>();
auto basis_data = basis.data<scalar_t>();
......@@ -109,7 +109,7 @@ at::Tensor weighting_bw_b(at::Tensor grad_out, at::Tensor x, at::Tensor weight,
auto S = weight_index.size(1);
auto grad_basis = at::zeros({E, S}, grad_out.options());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "weighting_bw_b", [&] {
AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_b", [&] {
auto grad_out_data = grad_out.data<scalar_t>();
auto x_data = x.data<scalar_t>();
auto weight_data = weight.data<scalar_t>();
......
......@@ -39,7 +39,8 @@ template <typename scalar_t> struct BasisForward {
auto basis = at::empty({E, S}, PSEUDO.options()); \
auto weight_index = at::empty({E, S}, KERNEL_SIZE.options()); \
\
AT_DISPATCH_FLOATING_TYPES(PSEUDO.type(), "basis_forward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
PSEUDO.scalar_type(), "basis_forward_##M", [&] { \
KERNEL_NAME<scalar_t><<<BLOCKS(basis.numel()), THREADS>>>( \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(basis), \
at::cuda::detail::getTensorInfo<int64_t, int64_t>(weight_index), \
......@@ -169,7 +170,8 @@ template <typename scalar_t> struct BasisBackward {
auto D = PSEUDO.size(1); \
auto grad_pseudo = at::empty({E, D}, PSEUDO.options()); \
\
AT_DISPATCH_FLOATING_TYPES(GRAD_BASIS.type(), "basis_backward_##M", [&] { \
AT_DISPATCH_FLOATING_TYPES( \
GRAD_BASIS.scalar_type(), "basis_backward_##M", [&] { \
KERNEL_NAME<scalar_t><<<BLOCKS(grad_pseudo.numel()), THREADS>>>( \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_pseudo), \
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(GRAD_BASIS), \
......
......@@ -42,7 +42,7 @@ at::Tensor weighting_fw_cuda(at::Tensor x, at::Tensor weight, at::Tensor basis,
cudaSetDevice(x.get_device());
auto E = x.size(0), M_out = weight.size(2);
auto out = at::empty({E, M_out}, x.options());
AT_DISPATCH_FLOATING_TYPES(out.type(), "weighting_fw", [&] {
AT_DISPATCH_FLOATING_TYPES(out.scalar_type(), "weighting_fw", [&] {
weighting_fw_kernel<scalar_t><<<BLOCKS(out.numel()), THREADS>>>(
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(x),
......@@ -91,7 +91,7 @@ at::Tensor weighting_bw_x_cuda(at::Tensor grad_out, at::Tensor weight,
auto E = grad_out.size(0), M_in = weight.size(1);
auto grad_x = at::empty({E, M_in}, grad_out.options());
weight = weight.transpose(1, 2).contiguous();
AT_DISPATCH_FLOATING_TYPES(grad_x.type(), "weighting_bw_x", [&] {
AT_DISPATCH_FLOATING_TYPES(grad_x.scalar_type(), "weighting_bw_x", [&] {
weighting_bw_x_kernel<scalar_t><<<BLOCKS(grad_x.numel()), THREADS>>>(
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_x),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out),
......@@ -136,7 +136,7 @@ at::Tensor weighting_bw_w_cuda(at::Tensor grad_out, at::Tensor x,
cudaSetDevice(grad_out.get_device());
auto M_in = x.size(1), M_out = grad_out.size(1);
auto grad_weight = at::zeros({K, M_in, M_out}, grad_out.options());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "weighting_bw_w", [&] {
AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_w", [&] {
weighting_bw_w_kernel<scalar_t><<<BLOCKS(grad_out.numel()), THREADS>>>(
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_weight),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out),
......@@ -181,7 +181,7 @@ at::Tensor weighting_bw_b_cuda(at::Tensor grad_out, at::Tensor x,
cudaSetDevice(grad_out.get_device());
auto E = x.size(0), S = weight_index.size(1);
auto grad_basis = at::zeros({E, S}, grad_out.options());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "weighting_bw_b", [&] {
AT_DISPATCH_FLOATING_TYPES(grad_out.scalar_type(), "weighting_bw_b", [&] {
weighting_bw_b_kernel<scalar_t><<<BLOCKS(grad_out.numel()), THREADS>>>(
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_basis),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad_out),
......
......@@ -16,7 +16,7 @@ if CUDA_HOME is not None:
['cuda/weighting.cpp', 'cuda/weighting_kernel.cu']),
]
__version__ = '1.0.6'
__version__ = '1.1.0'
url = 'https://github.com/rusty1s/pytorch_spline_conv'
install_requires = []
......
......@@ -2,6 +2,6 @@ from .basis import SplineBasis
from .weighting import SplineWeighting
from .conv import SplineConv
__version__ = '1.0.6'
__version__ = '1.1.0'
__all__ = ['SplineBasis', 'SplineWeighting', 'SplineConv', '__version__']
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment