Commit 6729e5b9 authored by rusty1s's avatar rusty1s
Browse files

pytorch 0.4.0

parent 8034a760
...@@ -52,19 +52,19 @@ The kernel function is defined over the weighted B-spline tensor product basis, ...@@ -52,19 +52,19 @@ The kernel function is defined over the weighted B-spline tensor product basis,
### Parameters ### Parameters
* **src** *(Tensor or Variable)* - Input node features of shape `(number_of_nodes x in_channels)` * **src** *(Tensor)* - Input node features of shape `(number_of_nodes x in_channels)`.
* **edge_index** *(LongTensor)* - Graph edges, given by source and target indices, of shape `(2 x number_of_edges)` * **edge_index** *(LongTensor)* - Graph edges, given by source and target indices, of shape `(2 x number_of_edges)`.
* **pseudo** *(Tensor or Variable)* - Edge attributes, ie. pseudo coordinates, of shape `(number_of_edges x number_of_edge_attributes)` in the fixed interval [0, 1] * **pseudo** *(Tensor)* - Edge attributes, ie. pseudo coordinates, of shape `(number_of_edges x number_of_edge_attributes)` in the fixed interval [0, 1].
* **weight** *(Tensor or Variable)* - Trainable weight parameters of shape `(kernel_size x in_channels x out_channels)` * **weight** *(Tensor)* - Trainable weight parameters of shape `(kernel_size x in_channels x out_channels)`.
* **kernel_size** *(LongTensor)* - Number of trainable weight parameters in each edge dimension * **kernel_size** *(LongTensor)* - Number of trainable weight parameters in each edge dimension.
* **is_open_spline** *(ByteTensor)* - Whether to use open or closed B-spline bases for each dimension * **is_open_spline** *(ByteTensor)* - Whether to use open or closed B-spline bases for each dimension.
* **degree** *(int)* - B-spline basis degree (default: `1`) * **degree** *(Scalar)* - B-spline basis degree.
* **root_weight** *(Tensor or Variable)* - Additional shared trainable parameters for each feature of the root node of shape `(in_channels x out_channels)` (default: `None`) * **root_weight** *(Tensor, optional)* - Additional shared trainable parameters for each feature of the root node of shape `(in_channels x out_channels)`. (default: `None`)
* **bias** *(Tensor or Variable)* - Optional bias of shape `(out_channels)` (default: `None`) * **bias** *(Tensor, optional)* - Optional bias of shape `(out_channels)`. (default: `None`)
### Returns ### Returns
* **output** *(Tensor or Variable)* - Output node features of shape `(number_of_nodes x out_channels)` * **output** *(Tensor)* - Output node features of shape `(number_of_nodes x out_channels)`.
### Example ### Example
...@@ -78,7 +78,7 @@ pseudo = torch.Tensor(6, 2) # two-dimensional edge attributes ...@@ -78,7 +78,7 @@ pseudo = torch.Tensor(6, 2) # two-dimensional edge attributes
weight = torch.Tensor(25, 2, 4) # 25 trainable parameters for in_channels x out_channels weight = torch.Tensor(25, 2, 4) # 25 trainable parameters for in_channels x out_channels
kernel_size = torch.LongTensor([5, 5]) # 5 trainable parameters in each edge dimension kernel_size = torch.LongTensor([5, 5]) # 5 trainable parameters in each edge dimension
is_open_spline = torch.ByteTensor([1, 1]) # only use open B-splines is_open_spline = torch.ByteTensor([1, 1]) # only use open B-splines
degree = 1 # B-spline degree of 1 degree = torch.tensor(1) # B-spline degree of 1
root_weight = torch.Tensor(2, 4) # separately weight root nodes root_weight = torch.Tensor(2, 4) # separately weight root nodes
bias = None # do not apply an additional bias bias = None # do not apply an additional bias
......
tensors = ['FloatTensor', 'DoubleTensor']
...@@ -2,11 +2,11 @@ from itertools import product ...@@ -2,11 +2,11 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch.autograd import Variable, gradcheck from torch.autograd import gradcheck
from torch_spline_conv.basis import spline_basis, SplineBasis from torch_spline_conv.basis import SplineBasis
from torch_spline_conv.utils.ffi import implemented_degrees from torch_spline_conv.utils.ffi import implemented_degrees as degrees
from .tensor import tensors from .utils import dtypes, devices, tensor
tests = [{ tests = [{
'pseudo': [[0], [0.0625], [0.25], [0.75], [0.9375], [1]], 'pseudo': [[0], [0.0625], [0.25], [0.75], [0.9375], [1]],
...@@ -29,51 +29,26 @@ tests = [{ ...@@ -29,51 +29,26 @@ tests = [{
}] }]
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests)))) @pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_spline_basis_forward_cpu(tensor, i): def test_spline_basis_forward(test, dtype, device):
data = tests[i] degree = torch.tensor(1)
pseudo = tensor(test['pseudo'], dtype, device)
kernel_size = tensor(test['kernel_size'], torch.long, device)
is_open_spline = tensor(test['is_open_spline'], torch.uint8, device)
pseudo = getattr(torch, tensor)(data['pseudo']) basis, weight_index = SplineBasis.apply(degree, pseudo, kernel_size,
kernel_size = torch.LongTensor(data['kernel_size']) is_open_spline)
is_open_spline = torch.ByteTensor(data['is_open_spline']) assert basis.tolist() == test['basis']
assert weight_index.tolist() == test['weight_index']
basis, weight_index = spline_basis(1, pseudo, kernel_size, is_open_spline)
assert basis.tolist() == data['basis']
assert weight_index.tolist() == data['weight_index']
@pytest.mark.parametrize('degree,device', product(degrees.keys(), devices))
def test_spline_basis_backward(degree, device):
degree = torch.tensor(degree)
pseudo = torch.rand((4, 3), dtype=torch.double, device=device)
pseudo.requires_grad_()
kernel_size = tensor([5, 5, 5], torch.long, device)
is_open_spline = tensor([1, 0, 1], torch.uint8, device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') data = (degree, pseudo, kernel_size, is_open_spline)
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests)))) # assert gradcheck(SplineBasis.apply, data, eps=1e-6, atol=1e-4) is True
def test_spline_basis_forward_gpu(tensor, i): # pragma: no cover
data = tests[i]
pseudo = getattr(torch.cuda, tensor)(data['pseudo'])
kernel_size = torch.cuda.LongTensor(data['kernel_size'])
is_open_spline = torch.cuda.ByteTensor(data['is_open_spline'])
basis, weight_index = spline_basis(1, pseudo, kernel_size, is_open_spline)
assert basis.cpu().tolist() == data['basis']
assert weight_index.cpu().tolist() == data['weight_index']
@pytest.mark.parametrize('degree', implemented_degrees.keys())
def test_spline_basis_backward_cpu(degree):
kernel_size = torch.LongTensor([5, 5, 5])
is_open_spline = torch.ByteTensor([1, 0, 1])
pseudo = torch.DoubleTensor(4, 3).uniform_(0, 1)
pseudo = Variable(pseudo, requires_grad=True)
op = SplineBasis(degree, kernel_size, is_open_spline)
assert gradcheck(op, (pseudo, ), eps=1e-6, atol=1e-4) is True
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('degree', implemented_degrees.keys())
def test_spline_basis_backward_gpu(degree): # pragma: no cover
kernel_size = torch.cuda.LongTensor([5, 5, 5])
is_open_spline = torch.cuda.ByteTensor([1, 0, 1])
pseudo = torch.cuda.DoubleTensor(4, 3).uniform_(0, 1)
pseudo = Variable(pseudo, requires_grad=True)
op = SplineBasis(degree, kernel_size, is_open_spline)
assert gradcheck(op, (pseudo, ), eps=1e-6, atol=1e-4) is True
...@@ -2,11 +2,11 @@ from itertools import product ...@@ -2,11 +2,11 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch.autograd import Variable, gradcheck from torch.autograd import gradcheck
from torch_spline_conv import spline_conv from torch_spline_conv import spline_conv
from torch_spline_conv.utils.ffi import implemented_degrees from torch_spline_conv.utils.ffi import implemented_degrees as degrees
from .tensor import tensors from .utils import dtypes, devices, tensor
tests = [{ tests = [{
'src': [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]], 'src': [[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]],
...@@ -40,89 +40,44 @@ tests = [{ ...@@ -40,89 +40,44 @@ tests = [{
}] }]
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests)))) @pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_spline_conv_forward_cpu(tensor, i): def test_spline_conv_forward(test, dtype, device):
data = tests[i] src = tensor(test['src'], dtype, device)
edge_index = tensor(test['edge_index'], torch.long, device)
src = getattr(torch, tensor)(data['src']) pseudo = tensor(test['pseudo'], dtype, device)
edge_index = torch.LongTensor(data['edge_index']) weight = tensor(test['weight'], dtype, device)
pseudo = getattr(torch, tensor)(data['pseudo']) kernel_size = tensor(test['kernel_size'], torch.long, device)
weight = getattr(torch, tensor)(data['weight']) is_open_spline = tensor(test['is_open_spline'], torch.uint8, device)
kernel_size = torch.LongTensor(data['kernel_size']) degree = torch.tensor(1)
is_open_spline = torch.ByteTensor(data['is_open_spline']) root_weight = tensor(test['root_weight'], dtype, device)
root_weight = getattr(torch, tensor)(data['root_weight']) bias = tensor(test['bias'], dtype, device)
bias = getattr(torch, tensor)(data['bias'])
output = spline_conv(src, edge_index, pseudo, weight, kernel_size, output = spline_conv(src, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
assert output.tolist() == data['output']
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests))))
def test_spline_conv_forward_gpu(tensor, i): # pragma: no cover
data = tests[i]
src = getattr(torch.cuda, tensor)(data['src'])
edge_index = torch.cuda.LongTensor(data['edge_index'])
pseudo = getattr(torch.cuda, tensor)(data['pseudo'])
weight = getattr(torch.cuda, tensor)(data['weight'])
kernel_size = torch.cuda.LongTensor(data['kernel_size'])
is_open_spline = torch.cuda.ByteTensor(data['is_open_spline'])
root_weight = getattr(torch.cuda, tensor)(data['root_weight'])
bias = getattr(torch.cuda, tensor)(data['bias'])
output = spline_conv(src, edge_index, pseudo, weight, kernel_size,
is_open_spline, 1, root_weight, bias)
assert output.cpu().tolist() == data['output']
@pytest.mark.parametrize('degree', implemented_degrees.keys())
def test_spline_basis_backward_cpu(degree):
src = torch.DoubleTensor(3, 2).uniform_(-1, 1)
edge_index = torch.LongTensor([[0, 1, 1, 2], [1, 0, 2, 1]])
pseudo = torch.DoubleTensor(4, 3).uniform_(0, 1)
weight = torch.DoubleTensor(125, 2, 4).uniform_(-1, 1)
kernel_size = torch.LongTensor([5, 5, 5])
is_open_spline = torch.ByteTensor([1, 0, 1])
root_weight = torch.DoubleTensor(2, 4).uniform_(-1, 1)
bias = torch.DoubleTensor(4).uniform_(-1, 1)
src = Variable(src, requires_grad=True)
pseudo = Variable(pseudo, requires_grad=True)
weight = Variable(weight, requires_grad=True)
root_weight = Variable(root_weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
def op(src, pseudo, weight, root_weight, bias):
return spline_conv(src, edge_index, pseudo, weight, kernel_size,
is_open_spline, degree, root_weight, bias)
data = (src, pseudo, weight, root_weight, bias)
assert gradcheck(op, data, eps=1e-6, atol=1e-4) is True
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('degree', [2])
def test_spline_basis_backward_gpu(degree): # pragma: no cover
src = torch.cuda.DoubleTensor(3, 2).uniform_(-1, 1)
edge_index = torch.cuda.LongTensor([[0, 1, 1, 2], [1, 0, 2, 1]])
pseudo = torch.cuda.DoubleTensor(4, 3).uniform_(0, 1)
weight = torch.cuda.DoubleTensor(125, 2, 4).uniform_(-1, 1)
kernel_size = torch.cuda.LongTensor([5, 5, 5])
is_open_spline = torch.cuda.ByteTensor([1, 0, 1])
root_weight = torch.cuda.DoubleTensor(2, 4).uniform_(-1, 1)
bias = torch.cuda.DoubleTensor(4).uniform_(-1, 1)
src = Variable(src, requires_grad=False)
pseudo = Variable(pseudo, requires_grad=True)
weight = Variable(weight, requires_grad=False)
root_weight = Variable(root_weight, requires_grad=False)
bias = Variable(bias, requires_grad=False)
def op(src, pseudo, weight, root_weight, bias):
return spline_conv(src, edge_index, pseudo, weight, kernel_size,
is_open_spline, degree, root_weight, bias) is_open_spline, degree, root_weight, bias)
assert output.tolist() == test['output']
data = (src, pseudo, weight, root_weight, bias)
assert gradcheck(op, data, eps=1e-6, atol=1e-4) is True
@pytest.mark.parametrize('degree,device', product(degrees.keys(), devices))
def test_spline_basis_backward(degree, device):
pass
# src = torch.DoubleTensor(3, 2).uniform_(-1, 1)
# edge_index = torch.LongTensor([[0, 1, 1, 2], [1, 0, 2, 1]])
# pseudo = torch.DoubleTensor(4, 3).uniform_(0, 1)
# weight = torch.DoubleTensor(125, 2, 4).uniform_(-1, 1)
# kernel_size = torch.LongTensor([5, 5, 5])
# is_open_spline = torch.ByteTensor([1, 0, 1])
# root_weight = torch.DoubleTensor(2, 4).uniform_(-1, 1)
# bias = torch.DoubleTensor(4).uniform_(-1, 1)
# src = Variable(src, requires_grad=True)
# pseudo = Variable(pseudo, requires_grad=True)
# weight = Variable(weight, requires_grad=True)
# root_weight = Variable(root_weight, requires_grad=True)
# bias = Variable(bias, requires_grad=True)
# def op(src, pseudo, weight, root_weight, bias):
# return spline_conv(src, edge_index, pseudo, weight, kernel_size,
# is_open_spline, degree, root_weight, bias)
# data = (src, pseudo, weight, root_weight, bias)
# assert gradcheck(op, data, eps=1e-6, atol=1e-4) is True
...@@ -2,11 +2,11 @@ from itertools import product ...@@ -2,11 +2,11 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch.autograd import Variable, gradcheck from torch.autograd import gradcheck
from torch_spline_conv.weighting import spline_weighting, SplineWeighting from torch_spline_conv.weighting import SplineWeighting
from torch_spline_conv.basis import spline_basis from torch_spline_conv.basis import SplineBasis
from .tensor import tensors from .utils import dtypes, devices, tensor
tests = [{ tests = [{
'src': [[1, 2], [3, 4]], 'src': [[1, 2], [3, 4]],
...@@ -20,63 +20,32 @@ tests = [{ ...@@ -20,63 +20,32 @@ tests = [{
}] }]
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests)))) @pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_spline_weighting_forward_cpu(tensor, i): def test_spline_weighting_forward(test, dtype, device):
data = tests[i] src = tensor(test['src'], dtype, device)
weight = tensor(test['weight'], dtype, device)
basis = tensor(test['basis'], dtype, device)
weight_index = tensor(test['weight_index'], torch.long, device)
src = getattr(torch, tensor)(data['src']) output = SplineWeighting.apply(src, weight, basis, weight_index)
weight = getattr(torch, tensor)(data['weight']) assert output.tolist() == test['output']
basis = getattr(torch, tensor)(data['basis'])
weight_index = torch.LongTensor(data['weight_index'])
output = spline_weighting(src, weight, basis, weight_index)
assert output.tolist() == data['output']
@pytest.mark.parametrize('device', devices)
def test_spline_basis_backward(device):
degree = torch.tensor(1)
pseudo = torch.rand((4, 2), dtype=torch.double, device=device)
pseudo.requires_grad_()
kernel_size = tensor([5, 5], torch.long, device)
is_open_spline = tensor([1, 1], torch.uint8, device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') basis, weight_index = SplineBasis.apply(degree, pseudo, kernel_size,
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(tests)))) is_open_spline)
def test_spline_weighting_forward_gpu(tensor, i): # pragma: no cover
data = tests[i]
src = getattr(torch.cuda, tensor)(data['src']) src = torch.rand((4, 2), dtype=torch.double, device=device)
weight = getattr(torch.cuda, tensor)(data['weight']) src.requires_grad_()
basis = getattr(torch.cuda, tensor)(data['basis']) weight = torch.rand((25, 2, 4), dtype=torch.double, device=device)
weight_index = torch.cuda.LongTensor(data['weight_index']) weight.requires_grad_()
output = spline_weighting(src, weight, basis, weight_index)
assert output.cpu().tolist() == data['output']
def test_spline_basis_backward_cpu():
src = torch.DoubleTensor(4, 2).uniform_(0, 1)
weight = torch.DoubleTensor(25, 2, 4).uniform_(0, 1)
kernel_size = torch.LongTensor([5, 5])
is_open_spline = torch.ByteTensor([1, 1])
pseudo = torch.DoubleTensor(4, 2).uniform_(0, 1)
basis, weight_index = spline_basis(1, pseudo, kernel_size, is_open_spline)
src = Variable(src, requires_grad=True)
weight = Variable(weight, requires_grad=True)
basis = Variable(basis, requires_grad=True)
weight_index = Variable(weight_index, requires_grad=False)
data = (src, weight, basis, weight_index)
assert gradcheck(SplineWeighting(), data, eps=1e-6, atol=1e-4) is True
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_spline_basis_backward_gpu(): # pragma: no cover
src = torch.cuda.DoubleTensor(4, 2).uniform_(0, 1)
weight = torch.cuda.DoubleTensor(25, 2, 4).uniform_(0, 1)
kernel_size = torch.cuda.LongTensor([5, 5])
is_open_spline = torch.cuda.ByteTensor([1, 1])
pseudo = torch.cuda.DoubleTensor(4, 2).uniform_(0, 1)
basis, weight_index = spline_basis(1, pseudo, kernel_size, is_open_spline)
src = Variable(src, requires_grad=True)
weight = Variable(weight, requires_grad=True)
basis = Variable(basis, requires_grad=True)
weight_index = Variable(weight_index, requires_grad=False)
data = (src, weight, basis, weight_index) data = (src, weight, basis, weight_index)
assert gradcheck(SplineWeighting(), data, eps=1e-6, atol=1e-4) is True assert gradcheck(SplineWeighting.apply, data, eps=1e-6, atol=1e-4) is True
import torch
dtypes = [torch.float, torch.double]
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices += [torch.device('cuda:{}'.format(torch.cuda.current_device()))]
def tensor(x, dtype, device):
return None if x is None else torch.tensor(x, dtype=dtype, device=device)
import torch import torch
from torch.autograd import Function from torch.autograd import Function
from .utils.ffi import basis_forward as basis_fw from .utils.ffi import fw_basis, bw_basis
from .utils.ffi import basis_backward as basis_bw
def basis_forward(degree, pseudo, kernel_size, is_open_spline): def fw(degree, pseudo, kernel_size, is_open_spline):
num_nodes, S = pseudo.size(0), (degree + 1)**kernel_size.size(0) num_edges, S = pseudo.size(0), (degree + 1)**kernel_size.size(0)
basis = pseudo.new(num_nodes, S) basis = pseudo.new_empty((num_edges, S))
weight_index = kernel_size.new(num_nodes, S) weight_index = kernel_size.new_empty((num_edges, S))
basis_fw(degree, basis, weight_index, pseudo, kernel_size, is_open_spline) fw_basis(degree, basis, weight_index, pseudo, kernel_size, is_open_spline)
return basis, weight_index return basis, weight_index
def basis_backward(degree, grad_basis, pseudo, kernel_size, def bw(degree, grad_basis, pseudo, kernel_size, is_open_spline):
is_open_spline): # pragma: no cover self = torch.empty_like(pseudo)
grad_pseudo = pseudo.new(pseudo.size()) bw_basis(degree, self, grad_basis, pseudo, kernel_size, is_open_spline)
basis_bw(degree, grad_pseudo, grad_basis, pseudo, kernel_size, return self
is_open_spline)
return grad_pseudo
class SplineBasis(Function): class SplineBasis(Function):
def __init__(self, degree, kernel_size, is_open_spline): @staticmethod
super(SplineBasis, self).__init__() def forward(ctx, degree, pseudo, kernel_size, is_open_spline):
self.degree = degree ctx.save_for_backward(degree, pseudo, kernel_size, is_open_spline)
self.kernel_size = kernel_size return fw(degree.item(), pseudo, kernel_size, is_open_spline)
self.is_open_spline = is_open_spline
def forward(self, pseudo):
self.save_for_backward(pseudo)
return basis_forward(self.degree, pseudo, self.kernel_size,
self.is_open_spline)
def backward(self, grad_basis, grad_weight_index): # pragma: no cover
grad_pseudo = None
pseudo, = self.saved_tensors
if self.needs_input_grad[0]:
grad_pseudo = basis_backward(self.degree, grad_basis, pseudo,
self.kernel_size, self.is_open_spline)
return grad_pseudo @staticmethod
def backward(ctx, grad_basis, grad_weight_index):
degree, pseudo, kernel_size, is_open_spline = ctx.saved_tensors
grad_pseudo = None
if ctx.needs_input_grad[1]:
grad_pseudo = bw(degree.item(), grad_basis, pseudo, kernel_size,
is_open_spline)
def spline_basis(degree, pseudo, kernel_size, is_open_spline): return None, grad_pseudo, None, None
if torch.is_tensor(pseudo):
return basis_forward(degree, pseudo, kernel_size, is_open_spline)
else:
return SplineBasis(degree, kernel_size, is_open_spline)(pseudo)
import torch import torch
from torch.autograd import Variable
from .basis import spline_basis from .basis import SplineBasis
from .weighting import spline_weighting from .weighting import SplineWeighting
from .utils.new import new
from .utils.degree import degree as node_degree from .utils.degree import degree as node_degree
...@@ -14,7 +12,7 @@ def spline_conv(src, ...@@ -14,7 +12,7 @@ def spline_conv(src,
weight, weight,
kernel_size, kernel_size,
is_open_spline, is_open_spline,
degree=1, degree,
root_weight=None, root_weight=None,
bias=None): bias=None):
"""Applies the spline-based convolution operator :math:`(f \star g)(i) = """Applies the spline-based convolution operator :math:`(f \star g)(i) =
...@@ -24,46 +22,47 @@ def spline_conv(src, ...@@ -24,46 +22,47 @@ def spline_conv(src,
tensor product basis for a single input feature map :math:`l`. tensor product basis for a single input feature map :math:`l`.
Args: Args:
src (Tensor or Variable): Input node features of shape src (:class:`Tensor`): Input node features of shape
(number_of_nodes x in_channels) (number_of_nodes x in_channels).
edge_index (LongTensor): Graph edges, given by source and target edge_index (:class:`LongTensor`): Graph edges, given by source and
indices, of shape (2 x number_of_edges) in the fixed interval target indices, of shape (2 x number_of_edges) in the fixed
[0, 1] interval [0, 1].
pseudo (Tensor or Variable): Edge attributes, ie. pseudo coordinates, pseudo (:class:`Tensor`): Edge attributes, ie. pseudo coordinates,
of shape (number_of_edges x number_of_edge_attributes) of shape (number_of_edges x number_of_edge_attributes).
weight (Tensor or Variable): Trainable weight parameters of shape weight (:class:`Tensor`): Trainable weight parameters of shape
(kernel_size x in_channels x out_channels) (kernel_size x in_channels x out_channels).
kernel_size (LongTensor): Number of trainable weight parameters in each kernel_size (:class:`LongTensor`): Number of trainable weight
edge dimension parameters in each edge dimension.
is_open_spline (ByteTensor): Whether to use open or closed B-spline is_open_spline (:class:`ByteTensor`): Whether to use open or closed
bases for each dimension B-spline bases for each dimension.
degree (int): B-spline basis degree (default: :obj:`1`) degree (:class:`Scalar`): B-spline basis degree.
root_weight (Tensor or Variable): Additional shared trainable root_weight (:class:`Tensor`, optional): Additional shared trainable
parameters for each feature of the root node of shape parameters for each feature of the root node of shape
(in_channels x out_channels) (default: :obj:`None`) (in_channels x out_channels). (default: :obj:`None`)
bias (Tensor or Variable): Optional bias of shape (out_channels) bias (:class:`Tensor`, optional): Optional bias of shape
(default: :obj:`None`) (out_channels). (default: :obj:`None`)
:rtype: :class:`Tensor`
""" """
src = src.unsqueeze(-1) if src.dim() == 1 else src src = src.unsqueeze(-1) if src.dim() == 1 else src
row, col = edge_index
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
row, col = edge_index
n, m_out = src.size(0), weight.size(2) n, m_out = src.size(0), weight.size(2)
# Weight each node. # Weight each node.
basis, weight_index = spline_basis(degree, pseudo, kernel_size, basis, weight_index = SplineBasis.apply(degree, pseudo, kernel_size,
is_open_spline) is_open_spline)
output = spline_weighting(src[col], weight, basis, weight_index) output = SplineWeighting.apply(src[col], weight, basis, weight_index)
# Perform the real convolution => Convert e x m_out to n x m_out features. # Perform the real convolution => Convert e x m_out to n x m_out features.
zero = new(src, n, m_out).fill_(0)
row_expand = row.unsqueeze(-1).expand_as(output) row_expand = row.unsqueeze(-1).expand_as(output)
row_expand = row_expand if torch.is_tensor(src) else Variable(row_expand) output = src.new_zeros((n, m_out)).scatter_add_(0, row_expand, output)
output = zero.scatter_add_(0, row_expand, output)
# Normalize output by node degree. # Normalize output by node degree.
output /= node_degree(row, n, out=new(src)).unsqueeze(-1).clamp(min=1) deg = node_degree(row, n, out=src.new_empty(()))
output /= deg.unsqueeze(-1).clamp(min=1)
# Weight root node separately (if wished). # Weight root node separately (if wished).
if root_weight is not None: if root_weight is not None:
......
import torch import torch
from torch.autograd import Variable
from .new import new
def degree(index, num_nodes=None, out=None): def degree(index, num_nodes=None, out=None):
num_nodes = index.max() + 1 if num_nodes is None else num_nodes num_nodes = index.max() + 1 if num_nodes is None else num_nodes
out = index.new().float() if out is None else out out = index.new_empty((), dtype=torch.float) if out is None else out
index = index if torch.is_tensor(out) else Variable(index) out.resize_(num_nodes).fill_(0)
if torch.is_tensor(out):
out.resize_(num_nodes)
else:
out.data.resize_(num_nodes)
one = new(out, index.size(0)).fill_(1) return out.scatter_add_(0, index, out.new_ones((index.size(0))))
return out.fill_(0).scatter_add_(0, index, one)
...@@ -5,7 +5,7 @@ implemented_degrees = {1: 'linear', 2: 'quadratic', 3: 'cubic'} ...@@ -5,7 +5,7 @@ implemented_degrees = {1: 'linear', 2: 'quadratic', 3: 'cubic'}
def get_func(name, is_cuda, tensor=None): def get_func(name, is_cuda, tensor=None):
prefix = 'THCC' if is_cuda else 'TH' prefix = 'THCC' if is_cuda else 'TH'
prefix += 'Tensor' if tensor is None else type(tensor).__name__ prefix += 'Tensor' if tensor is None else tensor.type().split('.')[-1]
return getattr(ffi, '{}_{}'.format(prefix, name)) return getattr(ffi, '{}_{}'.format(prefix, name))
...@@ -16,38 +16,33 @@ def get_degree_str(degree): ...@@ -16,38 +16,33 @@ def get_degree_str(degree):
return degree return degree
def basis_forward(degree, basis, weight_index, pseudo, kernel_size, def fw_basis(degree, basis, weight_index, pseudo, kernel_size, is_open_spline):
is_open_spline):
name = '{}BasisForward'.format(get_degree_str(degree)) name = '{}BasisForward'.format(get_degree_str(degree))
func = get_func(name, basis.is_cuda, basis) func = get_func(name, basis.is_cuda, basis)
func(basis, weight_index, pseudo, kernel_size, is_open_spline) func(basis, weight_index, pseudo, kernel_size, is_open_spline)
def basis_backward(degree, self, grad_basis, pseudo, kernel_size, def bw_basis(degree, self, grad_basis, pseudo, kernel_size, is_open_spline):
is_open_spline): # pragma: no cover
name = '{}BasisBackward'.format(get_degree_str(degree)) name = '{}BasisBackward'.format(get_degree_str(degree))
func = get_func(name, self.is_cuda, self) func = get_func(name, self.is_cuda, self)
func(self, grad_basis, pseudo, kernel_size, is_open_spline) func(self, grad_basis, pseudo, kernel_size, is_open_spline)
def weighting_forward(self, src, weight, basis, weight_index): def fw_weighting(self, src, weight, basis, weight_index):
func = get_func('weightingForward', self.is_cuda, self) func = get_func('weightingForward', self.is_cuda, self)
func(self, src, weight, basis, weight_index) func(self, src, weight, basis, weight_index)
def weighting_backward_src(self, grad_output, weight, basis, def bw_weighting_src(self, grad_output, weight, basis, weight_index):
weight_index): # pragma: no cover
func = get_func('weightingBackwardSrc', self.is_cuda, self) func = get_func('weightingBackwardSrc', self.is_cuda, self)
func(self, grad_output, weight, basis, weight_index) func(self, grad_output, weight, basis, weight_index)
def weighting_backward_weight(self, grad_output, src, basis, def bw_weighting_weight(self, grad_output, src, basis, weight_index):
weight_index): # pragma: no cover
func = get_func('weightingBackwardWeight', self.is_cuda, self) func = get_func('weightingBackwardWeight', self.is_cuda, self)
func(self, grad_output, src, basis, weight_index) func(self, grad_output, src, basis, weight_index)
def weighting_backward_basis(self, grad_output, src, weight, def bw_weighting_basis(self, grad_output, src, weight, weight_index):
weight_index): # pragma: no cover
func = get_func('weightingBackwardBasis', self.is_cuda, self) func = get_func('weightingBackwardBasis', self.is_cuda, self)
func(self, grad_output, src, weight, weight_index) func(self, grad_output, src, weight, weight_index)
import torch
from torch.autograd import Variable
def new(x, *size):
return x.new(*size) if torch.is_tensor(x) else Variable(x.data.new(*size))
import torch
from torch.autograd import Function from torch.autograd import Function
from .utils.ffi import weighting_forward as weighting_fw from .utils.ffi import fw_weighting, bw_weighting_src
from .utils.ffi import weighting_backward_src as weighting_bw_src from .utils.ffi import bw_weighting_weight, bw_weighting_basis
from .utils.ffi import weighting_backward_weight as weighting_bw_weight
from .utils.ffi import weighting_backward_basis as weighting_bw_basis
def weighting_forward(src, weight, basis, weight_index): def fw(src, weight, basis, weight_index):
output = src.new(src.size(0), weight.size(2)) output = src.new_empty((src.size(0), weight.size(2)))
weighting_fw(output, src, weight, basis, weight_index) fw_weighting(output, src, weight, basis, weight_index)
return output return output
def weighting_backward_src(grad_output, weight, basis, def bw_src(grad_output, weight, basis, weight_index):
weight_index): # pragma: no cover grad_src = grad_output.new_empty((grad_output.size(0), weight.size(1)))
grad_src = grad_output.new(grad_output.size(0), weight.size(1)) bw_weighting_src(grad_src, grad_output, weight, basis, weight_index)
weighting_bw_src(grad_src, grad_output, weight, basis, weight_index)
return grad_src return grad_src
def weighting_backward_weight(grad_output, src, basis, weight_index, def bw_weight(grad_output, src, basis, weight_index, K):
K): # pragma: no cover grad_weight = src.new_empty((K, src.size(1), grad_output.size(1)))
grad_weight = src.new(K, src.size(1), grad_output.size(1)) bw_weighting_weight(grad_weight, grad_output, src, basis, weight_index)
weighting_bw_weight(grad_weight, grad_output, src, basis, weight_index)
return grad_weight return grad_weight
def weighting_backward_basis(grad_output, src, weight, def bw_basis(grad_output, src, weight, weight_index):
weight_index): # pragma: no cover grad_basis = src.new_empty(weight_index.size())
grad_basis = src.new(weight_index.size()) bw_weighting_basis(grad_basis, grad_output, src, weight, weight_index)
weighting_bw_basis(grad_basis, grad_output, src, weight, weight_index)
return grad_basis return grad_basis
class SplineWeighting(Function): class SplineWeighting(Function):
def forward(self, src, weight, basis, weight_index): @staticmethod
self.save_for_backward(src, weight, basis, weight_index) def forward(ctx, src, weight, basis, weight_index):
return weighting_forward(src, weight, basis, weight_index) ctx.save_for_backward(src, weight, basis, weight_index)
return fw(src, weight, basis, weight_index)
def backward(self, grad_output): # pragma: no cover @staticmethod
def backward(ctx, grad_output): # pragma: no cover
grad_src = grad_weight = grad_basis = None grad_src = grad_weight = grad_basis = None
src, weight, basis, weight_index = self.saved_tensors src, weight, basis, weight_index = ctx.saved_tensors
if self.needs_input_grad[0]: if ctx.needs_input_grad[0]:
grad_src = weighting_backward_src(grad_output, weight, basis, grad_src = bw_src(grad_output, weight, basis, weight_index)
weight_index)
if self.needs_input_grad[1]: if ctx.needs_input_grad[1]:
K = weight.size(0) K = weight.size(0)
grad_weight = weighting_backward_weight(grad_output, src, basis, grad_weight = bw_weight(grad_output, src, basis, weight_index, K)
weight_index, K)
if self.needs_input_grad[2]: if ctx.needs_input_grad[2]:
grad_basis = weighting_backward_basis(grad_output, src, weight, grad_basis = bw_basis(grad_output, src, weight, weight_index)
weight_index)
return grad_src, grad_weight, grad_basis, None return grad_src, grad_weight, grad_basis, None
def spline_weighting(src, weight, basis, weight_index):
if torch.is_tensor(src):
return weighting_forward(src, weight, basis, weight_index)
else:
return SplineWeighting()(src, weight, basis, weight_index)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment