Commit 36ed7951 authored by rusty1s's avatar rusty1s
Browse files

new aten build

parent 7f7b2b0a
...@@ -8,9 +8,11 @@ from torch.utils.ffi import create_extension ...@@ -8,9 +8,11 @@ from torch.utils.ffi import create_extension
if osp.exists('build'): if osp.exists('build'):
shutil.rmtree('build') shutil.rmtree('build')
headers = ['torch_spline_conv/src/cpu.h'] files = ['Basis']
sources = ['torch_spline_conv/src/cpu.c']
include_dirs = ['torch_spline_conv/src'] headers = ['aten/TH/TH{}.h'.format(f) for f in files]
sources = ['aten/TH/TH{}.c'.format(f) for f in files]
include_dirs = ['aten/TH']
define_macros = [] define_macros = []
extra_objects = [] extra_objects = []
with_cuda = False with_cuda = False
...@@ -18,11 +20,11 @@ with_cuda = False ...@@ -18,11 +20,11 @@ with_cuda = False
if torch.cuda.is_available(): if torch.cuda.is_available():
subprocess.call(['./build.sh', osp.dirname(torch.__file__)]) subprocess.call(['./build.sh', osp.dirname(torch.__file__)])
headers += ['torch_spline_conv/src/cuda.h'] headers += ['aten/THCC/THCC{}.h'.format(f) for f in files]
sources += ['torch_spline_conv/src/cuda.c'] sources += ['aten/THCC/THCC{}.c'.format(f) for f in files]
include_dirs += ['torch_spline_conv/kernel'] include_dirs += ['aten/THCC']
define_macros += [('WITH_CUDA', None)] define_macros += [('WITH_CUDA', None)]
extra_objects += ['torch_spline_conv/build/kernel.so'] extra_objects += ['torch_spline_conv/_ext/THC.so']
with_cuda = True with_cuda = True
ffi = create_extension( ffi = create_extension(
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
echo "Compiling kernel..." echo "Compiling kernel..."
if [ -z "$1" ]; then TORCH=$(python -c "import os; import torch; print(os.path.dirname(torch.__file__))"); else TORCH="$1"; fi if [ -z "$1" ]; then TORCH=$(python -c "import os; import torch; print(os.path.dirname(torch.__file__))"); else TORCH="$1"; fi
SRC_DIR=torch_spline_conv/kernel SRC_DIR=aten/THC
BUILD_DIR=torch_spline_conv/build BUILD_DIR=torch_spline_conv/_ext
mkdir -p "$BUILD_DIR" mkdir -p "$BUILD_DIR"
$(which nvcc) -c -o "$BUILD_DIR/kernel.so" "$SRC_DIR/kernel.cu" -arch=sm_35 -Xcompiler -fPIC -shared "-I$TORCH/lib/include/TH" "-I$TORCH/lib/include/THC" "-I$SRC_DIR" $(which nvcc) -c -o "$BUILD_DIR/THC.so" "$SRC_DIR/THC.cu" -arch=sm_52 -Xcompiler -fPIC -shared "-I$TORCH/lib/include/TH" "-I$TORCH/lib/include" "-I$SRC_DIR"
...@@ -5,4 +5,4 @@ description-file = README.md ...@@ -5,4 +5,4 @@ description-file = README.md
test=pytest test=pytest
[tool:pytest] [tool:pytest]
addopts = --capture=no --cov addopts = --capture=no
tensors = ['FloatTensor', 'DoubleTensor']
from os import path as osp
from itertools import product from itertools import product
import pytest import pytest
import json
import torch import torch
from torch_spline_conv.functions.ffi import spline_basis_forward from torch_spline_conv.basis import basis_forward
from .utils import tensors, Tensor from .tensor import tensors
f = open(osp.join(osp.dirname(__file__), 'basis.json'), 'r') tests = [{
data = json.load(f) 'pseudo': [0, 0.0625, 0.25, 0.75, 0.9375, 1],
f.close() 'kernel_size': [5],
'is_open_spline': [1],
'basis': [[1, 0], [0.75, 0.25], [1, 0], [1, 0], [0.25, 0.75], [1, 0]],
'weight_index': [[0, 1], [0, 1], [1, 2], [3, 4], [3, 4], [4, 0]],
}, {
'pseudo': [0, 0.0625, 0.25, 0.75, 0.9375, 1],
'kernel_size': [4],
'is_open_spline': [0],
'basis': [[1, 0], [0.75, 0.25], [1, 0], [1, 0], [0.25, 0.75], [1, 0]],
'weight_index': [[0, 1], [0, 1], [1, 2], [3, 0], [3, 0], [0, 1]],
}, {
'pseudo': [[0.125, 0.5], [0.5, 0.5], [0.75, 0.125]],
'kernel_size': [5, 5],
'is_open_spline': [1, 1],
'basis': [[0.5, 0.5, 0, 0], [1, 0, 0, 0], [0.5, 0, 0.5, 0]],
'weight_index': [[10, 11, 15, 16], [12, 13, 17, 18], [3, 4, 8, 9]]
}]
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data)))) @pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests))))
def test_spline_basis_cpu(tensor, i): def test_basis_forward_cpu(tensor, i):
degree = data[i].get('degree') data = tests[i]
pseudo = Tensor(tensor, data[i]['pseudo'])
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
kernel_size = torch.LongTensor(data[i]['kernel_size'])
is_open_spline = torch.ByteTensor(data[i]['is_open_spline'])
K = kernel_size.prod()
expected_basis = Tensor(tensor, data[i]['expected_basis'])
expected_index = torch.LongTensor(data[i]['expected_index'])
basis, index = spline_basis_forward(degree, pseudo, kernel_size, pseudo = getattr(torch, tensor)(data['pseudo'])
is_open_spline, K) kernel_size = torch.LongTensor(data['kernel_size'])
basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()] is_open_spline = torch.ByteTensor(data['is_open_spline'])
assert basis == expected_basis.view(-1).tolist() basis, weight_index = basis_forward(1, pseudo, kernel_size, is_open_spline)
assert index.tolist() == expected_index.tolist() assert basis.tolist() == data['basis']
assert weight_index.tolist() == data['weight_index']
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data)))) def test_basis_forward_gpu(): # pragma: no cover
def test_spline_basis_gpu(tensor, i): # pragma: no cover pseudo = torch.cuda.FloatTensor([0, 0.0625, 0.25, 0.75, 0.9375, 1])
degree = data[i].get('degree') kernel_size = torch.cuda.LongTensor([5])
pseudo = Tensor(tensor, data[i]['pseudo']).cuda() is_open_spline = torch.cuda.ByteTensor([1])
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
kernel_size = torch.cuda.LongTensor(data[i]['kernel_size']) basis, weight_index = basis_forward(1, pseudo, kernel_size, is_open_spline)
is_open_spline = torch.cuda.ByteTensor(data[i]['is_open_spline']) print(basis.cpu().tolist())
K = kernel_size.prod() print(weight_index.cpu().tolist())
expected_basis = Tensor(tensor, data[i]['expected_basis']) # 'basis': [[1, 0], [0.75, 0.25], [1, 0], [1, 0], [0.25, 0.75], [1, 0]],
expected_index = torch.LongTensor(data[i]['expected_index']) # 'weight_index': [[0, 1], [0, 1], [1, 2], [3, 4], [3, 4], [4, 0]],
basis, index = spline_basis_forward(degree, pseudo, kernel_size,
is_open_spline, K) # @pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
basis, index = basis.cpu(), index.cpu() # def test_spline_basis_cpu(tensor, i):
basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()] # degree = data[i].get('degree')
# pseudo = Tensor(tensor, data[i]['pseudo'])
assert basis == expected_basis.view(-1).tolist() # pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
assert index.tolist() == expected_index.tolist() # kernel_size = torch.LongTensor(data[i]['kernel_size'])
# is_open_spline = torch.ByteTensor(data[i]['is_open_spline'])
# K = kernel_size.prod()
# expected_basis = Tensor(tensor, data[i]['expected_basis'])
# expected_index = torch.LongTensor(data[i]['expected_index'])
# basis, index = spline_basis_forward(degree, pseudo, kernel_size,
# is_open_spline, K)
# basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()]
# assert basis == expected_basis.view(-1).tolist()
# assert index.tolist() == expected_index.tolist()
# @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
# @pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
# def test_spline_basis_gpu(tensor, i): # pragma: no cover
# degree = data[i].get('degree')
# pseudo = Tensor(tensor, data[i]['pseudo']).cuda()
# pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
# kernel_size = torch.cuda.LongTensor(data[i]['kernel_size'])
# is_open_spline = torch.cuda.ByteTensor(data[i]['is_open_spline'])
# K = kernel_size.prod()
# expected_basis = Tensor(tensor, data[i]['expected_basis'])
# expected_index = torch.LongTensor(data[i]['expected_index'])
# basis, index = spline_basis_forward(degree, pseudo, kernel_size,
# is_open_spline, K)
# basis, index = basis.cpu(), index.cpu()
# basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()]
# assert basis == expected_basis.view(-1).tolist()
# assert index.tolist() == expected_index.tolist()
import pytest # import pytest
import torch # import torch
from torch.autograd import Variable, gradcheck # from torch.autograd import Variable, gradcheck
from torch_spline_conv import spline_conv # from torch_spline_conv import spline_conv
from torch_spline_conv.functions.spline_weighting import SplineWeighting # from torch_spline_conv.functions.spline_weighting import SplineWeighting
from torch_spline_conv.functions.ffi import implemented_degrees # from torch_spline_conv.functions.ffi import implemented_degrees
from .utils import tensors, Tensor # from .utils import tensors, Tensor
# @pytest.mark.parametrize('tensor', tensors)
@pytest.mark.parametrize('tensor', tensors) # def test_spline_conv_cpu(tensor):
def test_spline_conv_cpu(tensor): # x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]]) # edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]]) # pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]] # pseudo = Tensor(tensor, pseudo)
pseudo = Tensor(tensor, pseudo) # weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1)
weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1) # kernel_size = torch.LongTensor([3, 4])
kernel_size = torch.LongTensor([3, 4]) # is_open_spline = torch.ByteTensor([1, 0])
is_open_spline = torch.ByteTensor([1, 0]) # root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1)
root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1) # bias = Tensor(tensor, [1])
bias = Tensor(tensor, [1])
# output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
output = spline_conv(x, edge_index, pseudo, weight, kernel_size, # is_open_spline, 1, root_weight, bias)
is_open_spline, 1, root_weight, bias)
# edgewise_output = [
edgewise_output = [ # 1 * 0.25 * (0.5 + 1.5 + 4.5 + 5.5) + 2 * 0.25 * (1 + 2 + 5 + 6),
1 * 0.25 * (0.5 + 1.5 + 4.5 + 5.5) + 2 * 0.25 * (1 + 2 + 5 + 6), # 3 * 0.25 * (1.5 + 2.5 + 5.5 + 6.5) + 4 * 0.25 * (2 + 3 + 6 + 7),
3 * 0.25 * (1.5 + 2.5 + 5.5 + 6.5) + 4 * 0.25 * (2 + 3 + 6 + 7), # 5 * 0.25 * (6.5 + 7.5 + 10.5 + 11.5) + 6 * 0.25 * (7 + 8 + 11 + 12),
5 * 0.25 * (6.5 + 7.5 + 10.5 + 11.5) + 6 * 0.25 * (7 + 8 + 11 + 12), # 7 * 0.25 * (7.5 + 4.5 + 11.5 + 8.5) + 8 * 0.25 * (8 + 5 + 12 + 9),
7 * 0.25 * (7.5 + 4.5 + 11.5 + 8.5) + 8 * 0.25 * (8 + 5 + 12 + 9), # ]
]
# expected_output = [
expected_output = [ # [1 + 12.5 * 9 + 13 * 10 + sum(edgewise_output) / 4],
[1 + 12.5 * 9 + 13 * 10 + sum(edgewise_output) / 4], # [1 + 12.5 * 1 + 13 * 2],
[1 + 12.5 * 1 + 13 * 2], # [1 + 12.5 * 3 + 13 * 4],
[1 + 12.5 * 3 + 13 * 4], # [1 + 12.5 * 5 + 13 * 6],
[1 + 12.5 * 5 + 13 * 6], # [1 + 12.5 * 7 + 13 * 8],
[1 + 12.5 * 7 + 13 * 8], # ]
]
# assert output.tolist() == expected_output
assert output.tolist() == expected_output
# x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo)
x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo) # root_weight, bias = Variable(root_weight), Variable(bias)
root_weight, bias = Variable(root_weight), Variable(bias)
# output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
output = spline_conv(x, edge_index, pseudo, weight, kernel_size, # is_open_spline, 1, root_weight, bias)
is_open_spline, 1, root_weight, bias)
# assert output.data.tolist() == expected_output
assert output.data.tolist() == expected_output
# def test_spline_weighting_backward_cpu():
# for degree in implemented_degrees.keys():
def test_spline_weighting_backward_cpu(): # kernel_size = torch.LongTensor([5, 5, 5])
for degree in implemented_degrees.keys(): # is_open_spline = torch.ByteTensor([1, 0, 1])
kernel_size = torch.LongTensor([5, 5, 5]) # op = SplineWeighting(kernel_size, is_open_spline, degree)
is_open_spline = torch.ByteTensor([1, 0, 1])
op = SplineWeighting(kernel_size, is_open_spline, degree) # x = torch.DoubleTensor(16, 2).uniform_(-1, 1)
# x = Variable(x, requires_grad=True)
x = torch.DoubleTensor(16, 2).uniform_(-1, 1) # pseudo = torch.DoubleTensor(16, 3).uniform_(0, 1)
x = Variable(x, requires_grad=True) # pseudo = Variable(pseudo, requires_grad=True)
pseudo = torch.DoubleTensor(16, 3).uniform_(0, 1) # weight = torch.DoubleTensor(25, 2, 4).uniform_(-1, 1)
pseudo = Variable(pseudo, requires_grad=True) # weight = Variable(weight, requires_grad=True)
weight = torch.DoubleTensor(25, 2, 4).uniform_(-1, 1)
weight = Variable(weight, requires_grad=True) # assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True # @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
# @pytest.mark.parametrize('tensor', tensors)
# def test_spline_conv_gpu(tensor): # pragma: no cover
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') # x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
@pytest.mark.parametrize('tensor', tensors) # edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
def test_spline_conv_gpu(tensor): # pragma: no cover # pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]]) # pseudo = Tensor(tensor, pseudo)
edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]]) # weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1)
pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]] # kernel_size = torch.LongTensor([3, 4])
pseudo = Tensor(tensor, pseudo) # is_open_spline = torch.ByteTensor([1, 0])
weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1) # root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1)
kernel_size = torch.LongTensor([3, 4]) # bias = Tensor(tensor, [1])
is_open_spline = torch.ByteTensor([1, 0])
root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1) # expected_output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
bias = Tensor(tensor, [1]) # is_open_spline, 1, root_weight, bias)
expected_output = spline_conv(x, edge_index, pseudo, weight, kernel_size, # x, edge_index, pseudo = x.cuda(), edge_index.cuda(), pseudo.cuda()
is_open_spline, 1, root_weight, bias) # weight, kernel_size = weight.cuda(), kernel_size.cuda()
# is_open_spline, root_weight = is_open_spline.cuda(), root_weight.cuda()
x, edge_index, pseudo = x.cuda(), edge_index.cuda(), pseudo.cuda() # bias = bias.cuda()
weight, kernel_size = weight.cuda(), kernel_size.cuda()
is_open_spline, root_weight = is_open_spline.cuda(), root_weight.cuda() # output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
bias = bias.cuda() # is_open_spline, 1, root_weight, bias)
# assert output.cpu().tolist() == expected_output.tolist()
output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias) # x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo)
assert output.cpu().tolist() == expected_output.tolist() # root_weight, bias = Variable(root_weight), Variable(bias)
x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo) # output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
root_weight, bias = Variable(root_weight), Variable(bias) # is_open_spline, 1, root_weight, bias)
output = spline_conv(x, edge_index, pseudo, weight, kernel_size, # assert output.data.cpu().tolist() == expected_output.tolist()
is_open_spline, 1, root_weight, bias)
# @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
assert output.data.cpu().tolist() == expected_output.tolist() # def test_spline_weighting_backward_gpu(): # pragma: no cover
# for degree in implemented_degrees.keys():
# kernel_size = torch.cuda.LongTensor([5, 5, 5])
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') # is_open_spline = torch.cuda.ByteTensor([1, 0, 1])
def test_spline_weighting_backward_gpu(): # pragma: no cover # op = SplineWeighting(kernel_size, is_open_spline, degree)
for degree in implemented_degrees.keys():
kernel_size = torch.cuda.LongTensor([5, 5, 5]) # x = torch.cuda.DoubleTensor(16, 2).uniform_(-1, 1)
is_open_spline = torch.cuda.ByteTensor([1, 0, 1]) # x = Variable(x, requires_grad=True)
op = SplineWeighting(kernel_size, is_open_spline, degree) # pseudo = torch.cuda.DoubleTensor(16, 3).uniform_(0, 1)
# pseudo = Variable(pseudo, requires_grad=False) # TODO
x = torch.cuda.DoubleTensor(16, 2).uniform_(-1, 1) # weight = torch.cuda.DoubleTensor(25, 2, 4).uniform_(-1, 1)
x = Variable(x, requires_grad=True) # weight = Variable(weight, requires_grad=True)
pseudo = torch.cuda.DoubleTensor(16, 3).uniform_(0, 1)
pseudo = Variable(pseudo, requires_grad=False) # TODO # assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
weight = torch.cuda.DoubleTensor(25, 2, 4).uniform_(-1, 1)
weight = Variable(weight, requires_grad=True)
assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
from .functions.spline_conv import spline_conv from .spline_conv import spline_conv
__version__ = '0.1.0' __version__ = '0.1.0'
......
from .utils.ffi import basis_forward as ffi_basis_forward
def basis_forward(degree, pseudo, kernel_size, is_open_spline):
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
num_nodes, S = pseudo.size(0), (degree + 1)**kernel_size.size(0)
basis = pseudo.new(num_nodes, S)
weight_index = kernel_size.new(num_nodes, S)
ffi_basis_forward(degree, basis, weight_index, pseudo, kernel_size,
is_open_spline)
return basis, weight_index
def spline_conv(x,
edge_index,
pseudo,
weight,
kernel_size,
is_open_spline,
degree=1,
root_weight=None,
bias=None):
pass
from .._ext import ffi
implemented_degrees = {1: 'linear', 2: 'quadratic', 3: 'cubic'}
def get_func(name, is_cuda, tensor=None):
prefix = 'THCC' if is_cuda else 'TH'
prefix += 'Tensor' if tensor is None else type(tensor).__name__
return getattr(ffi, '{}_{}'.format(prefix, name))
def get_degree_str(degree):
degree = implemented_degrees.get(degree)
assert degree is not None, (
'No implementation found for specified B-spline degree')
return degree
def basis_forward(degree, basis, weight_index, pseudo, kernel_size,
is_open_spline):
name = '{}BasisForward'.format(get_degree_str(degree))
func = get_func(name, basis.is_cuda, basis)
func(basis, weight_index, pseudo, kernel_size, is_open_spline)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment