Commit 36ed7951 authored by rusty1s's avatar rusty1s
Browse files

new aten build

parent 7f7b2b0a
......@@ -8,9 +8,11 @@ from torch.utils.ffi import create_extension
if osp.exists('build'):
shutil.rmtree('build')
headers = ['torch_spline_conv/src/cpu.h']
sources = ['torch_spline_conv/src/cpu.c']
include_dirs = ['torch_spline_conv/src']
files = ['Basis']
headers = ['aten/TH/TH{}.h'.format(f) for f in files]
sources = ['aten/TH/TH{}.c'.format(f) for f in files]
include_dirs = ['aten/TH']
define_macros = []
extra_objects = []
with_cuda = False
......@@ -18,11 +20,11 @@ with_cuda = False
if torch.cuda.is_available():
subprocess.call(['./build.sh', osp.dirname(torch.__file__)])
headers += ['torch_spline_conv/src/cuda.h']
sources += ['torch_spline_conv/src/cuda.c']
include_dirs += ['torch_spline_conv/kernel']
headers += ['aten/THCC/THCC{}.h'.format(f) for f in files]
sources += ['aten/THCC/THCC{}.c'.format(f) for f in files]
include_dirs += ['aten/THCC']
define_macros += [('WITH_CUDA', None)]
extra_objects += ['torch_spline_conv/build/kernel.so']
extra_objects += ['torch_spline_conv/_ext/THC.so']
with_cuda = True
ffi = create_extension(
......
......@@ -3,8 +3,8 @@
echo "Compiling kernel..."
if [ -z "$1" ]; then TORCH=$(python -c "import os; import torch; print(os.path.dirname(torch.__file__))"); else TORCH="$1"; fi
SRC_DIR=torch_spline_conv/kernel
BUILD_DIR=torch_spline_conv/build
SRC_DIR=aten/THC
BUILD_DIR=torch_spline_conv/_ext
mkdir -p "$BUILD_DIR"
$(which nvcc) -c -o "$BUILD_DIR/kernel.so" "$SRC_DIR/kernel.cu" -arch=sm_35 -Xcompiler -fPIC -shared "-I$TORCH/lib/include/TH" "-I$TORCH/lib/include/THC" "-I$SRC_DIR"
$(which nvcc) -c -o "$BUILD_DIR/THC.so" "$SRC_DIR/THC.cu" -arch=sm_52 -Xcompiler -fPIC -shared "-I$TORCH/lib/include/TH" "-I$TORCH/lib/include" "-I$SRC_DIR"
......@@ -5,4 +5,4 @@ description-file = README.md
test=pytest
[tool:pytest]
addopts = --capture=no --cov
addopts = --capture=no
tensors = ['FloatTensor', 'DoubleTensor']
from os import path as osp
from itertools import product
import pytest
import json
import torch
from torch_spline_conv.functions.ffi import spline_basis_forward
from torch_spline_conv.basis import basis_forward
from .utils import tensors, Tensor
from .tensor import tensors
f = open(osp.join(osp.dirname(__file__), 'basis.json'), 'r')
data = json.load(f)
f.close()
tests = [{
'pseudo': [0, 0.0625, 0.25, 0.75, 0.9375, 1],
'kernel_size': [5],
'is_open_spline': [1],
'basis': [[1, 0], [0.75, 0.25], [1, 0], [1, 0], [0.25, 0.75], [1, 0]],
'weight_index': [[0, 1], [0, 1], [1, 2], [3, 4], [3, 4], [4, 0]],
}, {
'pseudo': [0, 0.0625, 0.25, 0.75, 0.9375, 1],
'kernel_size': [4],
'is_open_spline': [0],
'basis': [[1, 0], [0.75, 0.25], [1, 0], [1, 0], [0.25, 0.75], [1, 0]],
'weight_index': [[0, 1], [0, 1], [1, 2], [3, 0], [3, 0], [0, 1]],
}, {
'pseudo': [[0.125, 0.5], [0.5, 0.5], [0.75, 0.125]],
'kernel_size': [5, 5],
'is_open_spline': [1, 1],
'basis': [[0.5, 0.5, 0, 0], [1, 0, 0, 0], [0.5, 0, 0.5, 0]],
'weight_index': [[10, 11, 15, 16], [12, 13, 17, 18], [3, 4, 8, 9]]
}]
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_spline_basis_cpu(tensor, i):
degree = data[i].get('degree')
pseudo = Tensor(tensor, data[i]['pseudo'])
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
kernel_size = torch.LongTensor(data[i]['kernel_size'])
is_open_spline = torch.ByteTensor(data[i]['is_open_spline'])
K = kernel_size.prod()
expected_basis = Tensor(tensor, data[i]['expected_basis'])
expected_index = torch.LongTensor(data[i]['expected_index'])
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests))))
def test_basis_forward_cpu(tensor, i):
data = tests[i]
basis, index = spline_basis_forward(degree, pseudo, kernel_size,
is_open_spline, K)
basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()]
pseudo = getattr(torch, tensor)(data['pseudo'])
kernel_size = torch.LongTensor(data['kernel_size'])
is_open_spline = torch.ByteTensor(data['is_open_spline'])
assert basis == expected_basis.view(-1).tolist()
assert index.tolist() == expected_index.tolist()
basis, weight_index = basis_forward(1, pseudo, kernel_size, is_open_spline)
assert basis.tolist() == data['basis']
assert weight_index.tolist() == data['weight_index']
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_spline_basis_gpu(tensor, i): # pragma: no cover
degree = data[i].get('degree')
pseudo = Tensor(tensor, data[i]['pseudo']).cuda()
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
kernel_size = torch.cuda.LongTensor(data[i]['kernel_size'])
is_open_spline = torch.cuda.ByteTensor(data[i]['is_open_spline'])
K = kernel_size.prod()
expected_basis = Tensor(tensor, data[i]['expected_basis'])
expected_index = torch.LongTensor(data[i]['expected_index'])
basis, index = spline_basis_forward(degree, pseudo, kernel_size,
is_open_spline, K)
basis, index = basis.cpu(), index.cpu()
basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()]
assert basis == expected_basis.view(-1).tolist()
assert index.tolist() == expected_index.tolist()
def test_basis_forward_gpu(): # pragma: no cover
pseudo = torch.cuda.FloatTensor([0, 0.0625, 0.25, 0.75, 0.9375, 1])
kernel_size = torch.cuda.LongTensor([5])
is_open_spline = torch.cuda.ByteTensor([1])
basis, weight_index = basis_forward(1, pseudo, kernel_size, is_open_spline)
print(basis.cpu().tolist())
print(weight_index.cpu().tolist())
# 'basis': [[1, 0], [0.75, 0.25], [1, 0], [1, 0], [0.25, 0.75], [1, 0]],
# 'weight_index': [[0, 1], [0, 1], [1, 2], [3, 4], [3, 4], [4, 0]],
# @pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
# def test_spline_basis_cpu(tensor, i):
# degree = data[i].get('degree')
# pseudo = Tensor(tensor, data[i]['pseudo'])
# pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
# kernel_size = torch.LongTensor(data[i]['kernel_size'])
# is_open_spline = torch.ByteTensor(data[i]['is_open_spline'])
# K = kernel_size.prod()
# expected_basis = Tensor(tensor, data[i]['expected_basis'])
# expected_index = torch.LongTensor(data[i]['expected_index'])
# basis, index = spline_basis_forward(degree, pseudo, kernel_size,
# is_open_spline, K)
# basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()]
# assert basis == expected_basis.view(-1).tolist()
# assert index.tolist() == expected_index.tolist()
# @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
# @pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
# def test_spline_basis_gpu(tensor, i): # pragma: no cover
# degree = data[i].get('degree')
# pseudo = Tensor(tensor, data[i]['pseudo']).cuda()
# pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
# kernel_size = torch.cuda.LongTensor(data[i]['kernel_size'])
# is_open_spline = torch.cuda.ByteTensor(data[i]['is_open_spline'])
# K = kernel_size.prod()
# expected_basis = Tensor(tensor, data[i]['expected_basis'])
# expected_index = torch.LongTensor(data[i]['expected_index'])
# basis, index = spline_basis_forward(degree, pseudo, kernel_size,
# is_open_spline, K)
# basis, index = basis.cpu(), index.cpu()
# basis = [pytest.approx(b, 0.01) for b in basis.view(-1).tolist()]
# assert basis == expected_basis.view(-1).tolist()
# assert index.tolist() == expected_index.tolist()
import pytest
import torch
from torch.autograd import Variable, gradcheck
from torch_spline_conv import spline_conv
from torch_spline_conv.functions.spline_weighting import SplineWeighting
from torch_spline_conv.functions.ffi import implemented_degrees
from .utils import tensors, Tensor
@pytest.mark.parametrize('tensor', tensors)
def test_spline_conv_cpu(tensor):
x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
pseudo = Tensor(tensor, pseudo)
weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1)
kernel_size = torch.LongTensor([3, 4])
is_open_spline = torch.ByteTensor([1, 0])
root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1)
bias = Tensor(tensor, [1])
output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
edgewise_output = [
1 * 0.25 * (0.5 + 1.5 + 4.5 + 5.5) + 2 * 0.25 * (1 + 2 + 5 + 6),
3 * 0.25 * (1.5 + 2.5 + 5.5 + 6.5) + 4 * 0.25 * (2 + 3 + 6 + 7),
5 * 0.25 * (6.5 + 7.5 + 10.5 + 11.5) + 6 * 0.25 * (7 + 8 + 11 + 12),
7 * 0.25 * (7.5 + 4.5 + 11.5 + 8.5) + 8 * 0.25 * (8 + 5 + 12 + 9),
]
expected_output = [
[1 + 12.5 * 9 + 13 * 10 + sum(edgewise_output) / 4],
[1 + 12.5 * 1 + 13 * 2],
[1 + 12.5 * 3 + 13 * 4],
[1 + 12.5 * 5 + 13 * 6],
[1 + 12.5 * 7 + 13 * 8],
]
assert output.tolist() == expected_output
x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo)
root_weight, bias = Variable(root_weight), Variable(bias)
output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
assert output.data.tolist() == expected_output
def test_spline_weighting_backward_cpu():
for degree in implemented_degrees.keys():
kernel_size = torch.LongTensor([5, 5, 5])
is_open_spline = torch.ByteTensor([1, 0, 1])
op = SplineWeighting(kernel_size, is_open_spline, degree)
x = torch.DoubleTensor(16, 2).uniform_(-1, 1)
x = Variable(x, requires_grad=True)
pseudo = torch.DoubleTensor(16, 3).uniform_(0, 1)
pseudo = Variable(pseudo, requires_grad=True)
weight = torch.DoubleTensor(25, 2, 4).uniform_(-1, 1)
weight = Variable(weight, requires_grad=True)
assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor', tensors)
def test_spline_conv_gpu(tensor): # pragma: no cover
x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
pseudo = Tensor(tensor, pseudo)
weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1)
kernel_size = torch.LongTensor([3, 4])
is_open_spline = torch.ByteTensor([1, 0])
root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1)
bias = Tensor(tensor, [1])
expected_output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
x, edge_index, pseudo = x.cuda(), edge_index.cuda(), pseudo.cuda()
weight, kernel_size = weight.cuda(), kernel_size.cuda()
is_open_spline, root_weight = is_open_spline.cuda(), root_weight.cuda()
bias = bias.cuda()
output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
assert output.cpu().tolist() == expected_output.tolist()
x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo)
root_weight, bias = Variable(root_weight), Variable(bias)
output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
assert output.data.cpu().tolist() == expected_output.tolist()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_spline_weighting_backward_gpu(): # pragma: no cover
for degree in implemented_degrees.keys():
kernel_size = torch.cuda.LongTensor([5, 5, 5])
is_open_spline = torch.cuda.ByteTensor([1, 0, 1])
op = SplineWeighting(kernel_size, is_open_spline, degree)
x = torch.cuda.DoubleTensor(16, 2).uniform_(-1, 1)
x = Variable(x, requires_grad=True)
pseudo = torch.cuda.DoubleTensor(16, 3).uniform_(0, 1)
pseudo = Variable(pseudo, requires_grad=False) # TODO
weight = torch.cuda.DoubleTensor(25, 2, 4).uniform_(-1, 1)
weight = Variable(weight, requires_grad=True)
assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
# import pytest
# import torch
# from torch.autograd import Variable, gradcheck
# from torch_spline_conv import spline_conv
# from torch_spline_conv.functions.spline_weighting import SplineWeighting
# from torch_spline_conv.functions.ffi import implemented_degrees
# from .utils import tensors, Tensor
# @pytest.mark.parametrize('tensor', tensors)
# def test_spline_conv_cpu(tensor):
# x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
# edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
# pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
# pseudo = Tensor(tensor, pseudo)
# weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1)
# kernel_size = torch.LongTensor([3, 4])
# is_open_spline = torch.ByteTensor([1, 0])
# root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1)
# bias = Tensor(tensor, [1])
# output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
# is_open_spline, 1, root_weight, bias)
# edgewise_output = [
# 1 * 0.25 * (0.5 + 1.5 + 4.5 + 5.5) + 2 * 0.25 * (1 + 2 + 5 + 6),
# 3 * 0.25 * (1.5 + 2.5 + 5.5 + 6.5) + 4 * 0.25 * (2 + 3 + 6 + 7),
# 5 * 0.25 * (6.5 + 7.5 + 10.5 + 11.5) + 6 * 0.25 * (7 + 8 + 11 + 12),
# 7 * 0.25 * (7.5 + 4.5 + 11.5 + 8.5) + 8 * 0.25 * (8 + 5 + 12 + 9),
# ]
# expected_output = [
# [1 + 12.5 * 9 + 13 * 10 + sum(edgewise_output) / 4],
# [1 + 12.5 * 1 + 13 * 2],
# [1 + 12.5 * 3 + 13 * 4],
# [1 + 12.5 * 5 + 13 * 6],
# [1 + 12.5 * 7 + 13 * 8],
# ]
# assert output.tolist() == expected_output
# x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo)
# root_weight, bias = Variable(root_weight), Variable(bias)
# output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
# is_open_spline, 1, root_weight, bias)
# assert output.data.tolist() == expected_output
# def test_spline_weighting_backward_cpu():
# for degree in implemented_degrees.keys():
# kernel_size = torch.LongTensor([5, 5, 5])
# is_open_spline = torch.ByteTensor([1, 0, 1])
# op = SplineWeighting(kernel_size, is_open_spline, degree)
# x = torch.DoubleTensor(16, 2).uniform_(-1, 1)
# x = Variable(x, requires_grad=True)
# pseudo = torch.DoubleTensor(16, 3).uniform_(0, 1)
# pseudo = Variable(pseudo, requires_grad=True)
# weight = torch.DoubleTensor(25, 2, 4).uniform_(-1, 1)
# weight = Variable(weight, requires_grad=True)
# assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
# @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
# @pytest.mark.parametrize('tensor', tensors)
# def test_spline_conv_gpu(tensor): # pragma: no cover
# x = Tensor(tensor, [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
# edge_index = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
# pseudo = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
# pseudo = Tensor(tensor, pseudo)
# weight = torch.arange(0.5, 0.5 * 25, step=0.5, out=x.new()).view(12, 2, 1)
# kernel_size = torch.LongTensor([3, 4])
# is_open_spline = torch.ByteTensor([1, 0])
# root_weight = torch.arange(12.5, 13.5, step=0.5, out=x.new()).view(2, 1)
# bias = Tensor(tensor, [1])
# expected_output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
# is_open_spline, 1, root_weight, bias)
# x, edge_index, pseudo = x.cuda(), edge_index.cuda(), pseudo.cuda()
# weight, kernel_size = weight.cuda(), kernel_size.cuda()
# is_open_spline, root_weight = is_open_spline.cuda(), root_weight.cuda()
# bias = bias.cuda()
# output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
# is_open_spline, 1, root_weight, bias)
# assert output.cpu().tolist() == expected_output.tolist()
# x, weight, pseudo = Variable(x), Variable(weight), Variable(pseudo)
# root_weight, bias = Variable(root_weight), Variable(bias)
# output = spline_conv(x, edge_index, pseudo, weight, kernel_size,
# is_open_spline, 1, root_weight, bias)
# assert output.data.cpu().tolist() == expected_output.tolist()
# @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
# def test_spline_weighting_backward_gpu(): # pragma: no cover
# for degree in implemented_degrees.keys():
# kernel_size = torch.cuda.LongTensor([5, 5, 5])
# is_open_spline = torch.cuda.ByteTensor([1, 0, 1])
# op = SplineWeighting(kernel_size, is_open_spline, degree)
# x = torch.cuda.DoubleTensor(16, 2).uniform_(-1, 1)
# x = Variable(x, requires_grad=True)
# pseudo = torch.cuda.DoubleTensor(16, 3).uniform_(0, 1)
# pseudo = Variable(pseudo, requires_grad=False) # TODO
# weight = torch.cuda.DoubleTensor(25, 2, 4).uniform_(-1, 1)
# weight = Variable(weight, requires_grad=True)
# assert gradcheck(op, (x, pseudo, weight), eps=1e-6, atol=1e-4) is True
from .functions.spline_conv import spline_conv
from .spline_conv import spline_conv
__version__ = '0.1.0'
......
from .utils.ffi import basis_forward as ffi_basis_forward
def basis_forward(degree, pseudo, kernel_size, is_open_spline):
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
num_nodes, S = pseudo.size(0), (degree + 1)**kernel_size.size(0)
basis = pseudo.new(num_nodes, S)
weight_index = kernel_size.new(num_nodes, S)
ffi_basis_forward(degree, basis, weight_index, pseudo, kernel_size,
is_open_spline)
return basis, weight_index
def spline_conv(x,
edge_index,
pseudo,
weight,
kernel_size,
is_open_spline,
degree=1,
root_weight=None,
bias=None):
pass
from .._ext import ffi
implemented_degrees = {1: 'linear', 2: 'quadratic', 3: 'cubic'}
def get_func(name, is_cuda, tensor=None):
prefix = 'THCC' if is_cuda else 'TH'
prefix += 'Tensor' if tensor is None else type(tensor).__name__
return getattr(ffi, '{}_{}'.format(prefix, name))
def get_degree_str(degree):
degree = implemented_degrees.get(degree)
assert degree is not None, (
'No implementation found for specified B-spline degree')
return degree
def basis_forward(degree, basis, weight_index, pseudo, kernel_size,
is_open_spline):
name = '{}BasisForward'.format(get_degree_str(degree))
func = get_func(name, basis.is_cuda, basis)
func(basis, weight_index, pseudo, kernel_size, is_open_spline)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment