Commit ac26fc19 authored by rusty1s's avatar rusty1s
Browse files

prepare tracing

parent d3169766
...@@ -3,5 +3,6 @@ source=torch_spline_conv ...@@ -3,5 +3,6 @@ source=torch_spline_conv
[report] [report]
exclude_lines = exclude_lines =
pragma: no cover pragma: no cover
cuda torch.jit.script
backward raise
except
__pycache__/ __pycache__/
_ext/
build/ build/
dist/ dist/
.cache/ .cache/
......
Copyright (c) 2019 Matthias Fey <matthias.fey@tu-dortmund.de> Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
......
include README.md
include LICENSE include LICENSE
recursive-include cpu *
recursive-include cuda * recursive-exclude test *
recursive-include csrc *
...@@ -21,11 +21,30 @@ The operator works on all floating point data types and is implemented both for ...@@ -21,11 +21,30 @@ The operator works on all floating point data types and is implemented both for
## Installation ## Installation
Ensure that at least PyTorch 1.1.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*: ### Binaries
We provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://pytorch-geometric.com/whl).
To install the binaries for PyTorch 1.4.0, simply run
```
pip install torch-spline-conv==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-1.4.0.html
```
where `${CUDA}` should be replaced by either `cpu`, `cu92`, `cu100` or `cu101` depending on your PyTorch installation.
| | `cpu` | `cu92` | `cu100` | `cu101` |
|-------------|-------|--------|---------|---------|
| **Linux** | ✅ | ✅ | ✅ | ✅ |
| **Windows** | ✅ | ❌ | ❌ | ✅ |
| **macOS** | ✅ | | | |
### From source
Ensure that at least PyTorch 1.4.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
``` ```
$ python -c "import torch; print(torch.__version__)" $ python -c "import torch; print(torch.__version__)"
>>> 1.1.0 >>> 1.4.0
$ echo $PATH $ echo $PATH
>>> /usr/local/cuda/bin:... >>> /usr/local/cuda/bin:...
...@@ -40,15 +59,19 @@ Then run: ...@@ -40,15 +59,19 @@ Then run:
pip install torch-spline-conv pip install torch-spline-conv
``` ```
If you are running into any installation problems, please create an [issue](https://github.com/rusty1s/pytorch_spline_conv/issues). When running in a docker container without NVIDIA driver, PyTorch needs to evaluate the compute capabilities and may fail.
Be sure to import `torch` first before using this package to resolve symbols the dynamic linker must see. In this case, ensure that the compute capabilities are set via `TORCH_CUDA_ARCH_LIST`, *e.g.*:
```
export TORCH_CUDA_ARCH_LIST = "6.0 6.1 7.2+PTX 7.5+PTX"
```
## Usage ## Usage
```python ```python
from torch_spline_conv import SplineConv from torch_spline_conv import spline_conv
out = SplineConv.apply(x, out = spline_conv(x,
edge_index, edge_index,
pseudo, pseudo,
weight, weight,
...@@ -93,7 +116,7 @@ The kernel function is defined over the weighted B-spline tensor product basis, ...@@ -93,7 +116,7 @@ The kernel function is defined over the weighted B-spline tensor product basis,
```python ```python
import torch import torch
from torch_spline_conv import SplineConv from torch_spline_conv import spline_conv
x = torch.rand((4, 2), dtype=torch.float) # 4 nodes with 2 features each x = torch.rand((4, 2), dtype=torch.float) # 4 nodes with 2 features each
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) # 6 edges edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) # 6 edges
...@@ -106,7 +129,7 @@ norm = True # Normalize output by node degree. ...@@ -106,7 +129,7 @@ norm = True # Normalize output by node degree.
root_weight = torch.rand((2, 4), dtype=torch.float) # separately weight root nodes root_weight = torch.rand((2, 4), dtype=torch.float) # separately weight root nodes
bias = None # do not apply an additional bias bias = None # do not apply an additional bias
out = SplineConv.apply(x, edge_index, pseudo, weight, kernel_size, out = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, degree, norm, root_weight, bias) is_open_spline, degree, norm, root_weight, bias)
print(out.size()) print(out.size())
......
import os
import os.path as osp
import glob
from setuptools import setup, find_packages from setuptools import setup, find_packages
from sys import argv
import torch import torch
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
TORCH_MAJOR = int(torch.__version__.split('.')[0]) WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TORCH_MINOR = int(torch.__version__.split('.')[1]) if os.getenv('FORCE_CUDA', '0') == '1':
WITH_CUDA = True
if os.getenv('FORCE_CPU', '0') == '1':
WITH_CUDA = False
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
def get_extensions():
Extension = CppExtension
define_macros = []
extra_compile_args = {'cxx': []}
if WITH_CUDA:
Extension = CUDAExtension
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
extra_compile_args['nvcc'] = nvcc_flags
extensions_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
extensions = []
for main in main_files:
name = main.split(os.sep)[-1][:-4]
extra_compile_args = [] sources = [main]
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
extra_compile_args += ['-DVERSION_GE_1_3']
ext_modules = [ path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
CppExtension('torch_spline_conv.basis_cpu', ['cpu/basis.cpp'], if osp.exists(path):
extra_compile_args=extra_compile_args), sources += [path]
CppExtension('torch_spline_conv.weighting_cpu', ['cpu/weighting.cpp'],
extra_compile_args=extra_compile_args),
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}
GPU = True path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
for arg in argv: if WITH_CUDA and osp.exists(path):
if arg == '--cpu': sources += [path]
GPU = False
argv.remove(arg)
if CUDA_HOME is not None and GPU: extension = Extension(
ext_modules += [ 'torch_scatter._' + name,
CUDAExtension('torch_spline_conv.basis_cuda', sources,
['cuda/basis.cpp', 'cuda/basis_kernel.cu'], include_dirs=[extensions_dir],
extra_compile_args=extra_compile_args), define_macros=define_macros,
CUDAExtension('torch_spline_conv.weighting_cuda', extra_compile_args=extra_compile_args,
['cuda/weighting.cpp', 'cuda/weighting_kernel.cu'], )
extra_compile_args=extra_compile_args), extensions += [extension]
]
return extensions
__version__ = '1.1.1'
url = 'https://github.com/rusty1s/pytorch_spline_conv'
install_requires = [] install_requires = []
setup_requires = ['pytest-runner'] setup_requires = ['pytest-runner']
...@@ -43,23 +62,26 @@ tests_require = ['pytest', 'pytest-cov'] ...@@ -43,23 +62,26 @@ tests_require = ['pytest', 'pytest-cov']
setup( setup(
name='torch_spline_conv', name='torch_spline_conv',
version=__version__, version='1.2.0',
description=('Implementation of the Spline-Based Convolution Operator of '
'SplineCNN in PyTorch'),
author='Matthias Fey', author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de', author_email='matthias.fey@tu-dortmund.de',
url=url, url='https://github.com/rusty1s/pytorch_spline_conv',
download_url='{}/archive/{}.tar.gz'.format(url, __version__), description=('Implementation of the Spline-Based Convolution Operator of '
'SplineCNN in PyTorch'),
keywords=[ keywords=[
'pytorch', 'pytorch',
'geometric-deep-learning', 'geometric-deep-learning',
'graph-neural-networks', 'graph-neural-networks',
'spline-cnn', 'spline-cnn',
], ],
license='MIT',
python_requires='>=3.6',
install_requires=install_requires, install_requires=install_requires,
setup_requires=setup_requires, setup_requires=setup_requires,
tests_require=tests_require, tests_require=tests_require,
ext_modules=ext_modules, ext_modules=get_extensions() if not BUILD_DOCS else [],
cmdclass=cmdclass, cmdclass={
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)
},
packages=find_packages(), packages=find_packages(),
) )
...@@ -2,7 +2,7 @@ from itertools import product ...@@ -2,7 +2,7 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch_spline_conv.basis import SplineBasis from torch_spline_conv import spline_basis
from .utils import dtypes, devices, tensor from .utils import dtypes, devices, tensor
...@@ -34,7 +34,7 @@ def test_spline_basis_forward(test, dtype, device): ...@@ -34,7 +34,7 @@ def test_spline_basis_forward(test, dtype, device):
is_open_spline = tensor(test['is_open_spline'], torch.uint8, device) is_open_spline = tensor(test['is_open_spline'], torch.uint8, device)
degree = 1 degree = 1
op = SplineBasis.apply basis, weight_index = spline_basis(pseudo, kernel_size, is_open_spline,
basis, weight_index = op(pseudo, kernel_size, is_open_spline, degree) degree)
assert basis.tolist() == test['basis'] assert basis.tolist() == test['basis']
assert weight_index.tolist() == test['weight_index'] assert weight_index.tolist() == test['weight_index']
...@@ -3,11 +3,12 @@ from itertools import product ...@@ -3,11 +3,12 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch.autograd import gradcheck from torch.autograd import gradcheck
from torch_spline_conv import SplineConv from torch_spline_conv import spline_conv
from torch_spline_conv.basis import implemented_degrees as degrees
from .utils import dtypes, devices, tensor from .utils import dtypes, devices, tensor
degrees = [1, 2, 3]
tests = [{ tests = [{
'x': [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]], 'x': [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]],
'edge_index': [[0, 0, 0, 0], [1, 2, 3, 4]], 'edge_index': [[0, 0, 0, 0], [1, 2, 3, 4]],
...@@ -51,12 +52,12 @@ def test_spline_conv_forward(test, dtype, device): ...@@ -51,12 +52,12 @@ def test_spline_conv_forward(test, dtype, device):
root_weight = tensor(test['root_weight'], dtype, device) root_weight = tensor(test['root_weight'], dtype, device)
bias = tensor(test['bias'], dtype, device) bias = tensor(test['bias'], dtype, device)
out = SplineConv.apply(x, edge_index, pseudo, weight, kernel_size, out = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, True, root_weight, bias) is_open_spline, 1, True, root_weight, bias)
assert out.tolist() == test['expected'] assert out.tolist() == test['expected']
@pytest.mark.parametrize('degree,device', product(degrees.keys(), devices)) @pytest.mark.parametrize('degree,device', product(degrees, devices))
def test_spline_basis_backward(degree, device): def test_spline_basis_backward(degree, device):
x = torch.rand((3, 2), dtype=torch.double, device=device) x = torch.rand((3, 2), dtype=torch.double, device=device)
x.requires_grad_() x.requires_grad_()
...@@ -74,4 +75,4 @@ def test_spline_basis_backward(degree, device): ...@@ -74,4 +75,4 @@ def test_spline_basis_backward(degree, device):
data = (x, edge_index, pseudo, weight, kernel_size, is_open_spline, degree, data = (x, edge_index, pseudo, weight, kernel_size, is_open_spline, degree,
True, root_weight, bias) True, root_weight, bias)
assert gradcheck(SplineConv.apply, data, eps=1e-6, atol=1e-4) is True assert gradcheck(spline_conv, data, eps=1e-6, atol=1e-4) is True
...@@ -3,8 +3,7 @@ from itertools import product ...@@ -3,8 +3,7 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch.autograd import gradcheck from torch.autograd import gradcheck
from torch_spline_conv.weighting import SplineWeighting from torch_spline_conv import spline_weighting, spline_basis
from torch_spline_conv.basis import SplineBasis
from .utils import dtypes, devices, tensor from .utils import dtypes, devices, tensor
...@@ -27,7 +26,7 @@ def test_spline_weighting_forward(test, dtype, device): ...@@ -27,7 +26,7 @@ def test_spline_weighting_forward(test, dtype, device):
basis = tensor(test['basis'], dtype, device) basis = tensor(test['basis'], dtype, device)
weight_index = tensor(test['weight_index'], torch.long, device) weight_index = tensor(test['weight_index'], torch.long, device)
out = SplineWeighting.apply(x, weight, basis, weight_index) out = spline_weighting(x, weight, basis, weight_index)
assert out.tolist() == test['expected'] assert out.tolist() == test['expected']
...@@ -38,8 +37,8 @@ def test_spline_weighting_backward(device): ...@@ -38,8 +37,8 @@ def test_spline_weighting_backward(device):
is_open_spline = tensor([1, 1], torch.uint8, device) is_open_spline = tensor([1, 1], torch.uint8, device)
degree = 1 degree = 1
op = SplineBasis.apply basis, weight_index = spline_basis(pseudo, kernel_size, is_open_spline,
basis, weight_index = op(pseudo, kernel_size, is_open_spline, degree) degree)
basis.requires_grad_() basis.requires_grad_()
x = torch.rand((4, 2), dtype=torch.double, device=device) x = torch.rand((4, 2), dtype=torch.double, device=device)
...@@ -48,4 +47,4 @@ def test_spline_weighting_backward(device): ...@@ -48,4 +47,4 @@ def test_spline_weighting_backward(device):
weight.requires_grad_() weight.requires_grad_()
data = (x, weight, basis, weight_index) data = (x, weight, basis, weight_index)
assert gradcheck(SplineWeighting.apply, data, eps=1e-6, atol=1e-4) is True assert gradcheck(spline_weighting, data, eps=1e-6, atol=1e-4) is True
...@@ -4,7 +4,7 @@ dtypes = [torch.float, torch.double] ...@@ -4,7 +4,7 @@ dtypes = [torch.float, torch.double]
devices = [torch.device('cpu')] devices = [torch.device('cpu')]
if torch.cuda.is_available(): if torch.cuda.is_available():
devices += [torch.device('cuda:{}'.format(torch.cuda.current_device()))] devices += [torch.device(f'cuda:{torch.cuda.current_device()}')]
def tensor(x, dtype, device): def tensor(x, dtype, device):
......
from .basis import SplineBasis import importlib
from .weighting import SplineWeighting import os.path as osp
from .conv import SplineConv
__version__ = '1.1.1' import torch
__all__ = ['SplineBasis', 'SplineWeighting', 'SplineConv', '__version__'] __version__ = '1.2.0'
expected_torch_version = (1, 4)
try:
for library in ['_version', '_basis', '_weighting']:
torch.ops.load_library(importlib.machinery.PathFinder().find_spec(
library, [osp.dirname(__file__)]).origin)
except OSError as e:
major, minor = [int(x) for x in torch.__version__.split('.')[:2]]
t_major, t_minor = expected_torch_version
if major != t_major or (major == t_major and minor != t_minor):
raise RuntimeError(
f'Expected PyTorch version {t_major}.{t_minor} but found '
f'version {major}.{minor}.')
raise OSError(e)
if torch.version.cuda is not None: # pragma: no cover
cuda_version = torch.ops.torch_scatter.cuda_version()
if cuda_version == -1:
major = minor = 0
elif cuda_version < 10000:
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')]
if t_major != major or t_minor != minor:
raise RuntimeError(
f'Detected that PyTorch and torch_spline_conv were compiled with '
f'different CUDA versions. PyTorch has CUDA version '
f'{t_major}.{t_minor} and torch_spline_conv has CUDA version '
f'{major}.{minor}. Please reinstall the torch_spline_conv that '
f'matches your PyTorch install.')
from .basis import spline_basis # noqa
from .weighting import spline_weighting # noqa
from .conv import spline_conv # noqa
__all__ = [
'spline_basis',
'spline_weighting',
'spline_conv',
'__version__',
]
import torch from typing import Tuple
import torch_spline_conv.basis_cpu
if torch.cuda.is_available():
import torch_spline_conv.basis_cuda
implemented_degrees = {1: 'linear', 2: 'quadratic', 3: 'cubic'} import torch
def get_func(name, tensor): @torch.jit.script
if tensor.is_cuda: def spline_basis(pseudo: torch.Tensor, kernel_size: torch.Tensor,
return getattr(torch_spline_conv.basis_cuda, name) is_open_spline: torch.Tensor,
else: degree: int) -> Tuple[torch.Tensor, torch.Tensor]:
return getattr(torch_spline_conv.basis_cpu, name) return torch.ops.torch_spline_conv.spline_basis(pseudo, kernel_size,
is_open_spline, degree)
class SplineBasis(torch.autograd.Function): # class SplineBasis(torch.autograd.Function):
@staticmethod # @staticmethod
def forward(ctx, pseudo, kernel_size, is_open_spline, degree): # def forward(ctx, pseudo, kernel_size, is_open_spline, degree):
ctx.save_for_backward(pseudo) # ctx.save_for_backward(pseudo)
ctx.kernel_size = kernel_size # ctx.kernel_size = kernel_size
ctx.is_open_spline = is_open_spline # ctx.is_open_spline = is_open_spline
ctx.degree = degree # ctx.degree = degree
op = get_func('{}_fw'.format(implemented_degrees[degree]), pseudo) # op = get_func('{}_fw'.format(implemented_degrees[degree]), pseudo)
basis, weight_index = op(pseudo, kernel_size, is_open_spline) # basis, weight_index = op(pseudo, kernel_size, is_open_spline)
return basis, weight_index # return basis, weight_index
@staticmethod # @staticmethod
def backward(ctx, grad_basis, grad_weight_index): # def backward(ctx, grad_basis, grad_weight_index):
pseudo, = ctx.saved_tensors # pseudo, = ctx.saved_tensors
kernel_size, is_open_spline = ctx.kernel_size, ctx.is_open_spline # kernel_size, is_open_spline = ctx.kernel_size, ctx.is_open_spline
degree = ctx.degree # degree = ctx.degree
grad_pseudo = None # grad_pseudo = None
if ctx.needs_input_grad[0]: # if ctx.needs_input_grad[0]:
op = get_func('{}_bw'.format(implemented_degrees[degree]), pseudo) # grad_pseudo = op(grad_basis, pseudo, kernel_size, is_open_spline)
grad_pseudo = op(grad_basis, pseudo, kernel_size, is_open_spline)
return grad_pseudo, None, None, None # return grad_pseudo, None, None, None
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment