Commit 9c208e8e authored by Jan Eric Lenssen's avatar Jan Eric Lenssen
Browse files

tests and small fixes

parent 7761cb1d
'''
import unittest
import torch
from torch.autograd import Variable, gradcheck
from numpy.testing import assert_equal
from .spline import spline
......@@ -52,3 +54,4 @@ class EdgewiseSplineWeightingGPUTest(unittest.TestCase):
op = EdgewiseSplineWeightingGPU(amount, index)
test = gradcheck(op, (input, weight), eps=1e-6, atol=1e-4)
self.assertTrue(test)
'''
\ No newline at end of file
......@@ -144,20 +144,16 @@ const ${Dtype}* amount, const long* index, int num_threads) {
// Calculate B-spline basis tensor product gradient
adj_g += g * f * w;
}
atomicAdd(&(grad_amount[e_idx,k_idx]), adj_g);
atomicAdd(&(grad_amount[e_idx*${k_max} +k_idx]), adj_g);
}
}
}
'''
def get_weighting_forward_kernel(M_in, M_out, k_max, bt_to_adj=False):
def get_weighting_forward_kernel(M_in, M_out, k_max):
cuda_tensor = torch.FloatTensor([1]).cuda()
if bt_to_adj:
kernel = _edgewise_spline_weighting_forward_kernel
else:
kernel = _edgewise_spline_weighting_forward_kernel
kernel = _edgewise_spline_weighting_forward_kernel
with torch.cuda.device_of(cuda_tensor):
f_fw = load_kernel(
'edgewise_spline_weighting_forward_kernel',
......@@ -169,12 +165,11 @@ def get_weighting_forward_kernel(M_in, M_out, k_max, bt_to_adj=False):
return f_fw
def get_weighting_backward_kernel(M_in, M_out, k_max, K, bt_to_adj=False):
def get_weighting_backward_kernel(M_in, M_out, k_max, K, bp_to_adj=False):
cuda_tensor = torch.FloatTensor([1]).cuda()
if bt_to_adj:
if bp_to_adj:
kernel = _edgewise_spline_weighting_backward_kernel_bp2adj
else:
kernel = _edgewise_spline_weighting_backward_kernel
with torch.cuda.device_of(cuda_tensor):
f_bw = load_kernel(
......@@ -199,7 +194,7 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
const int e_idx = idx / ${k_max};
int k_idx = idx % ${k_max};
int K = 1.0;
int K = ${K};
int k_idx_mod;
int bot;
int top;
......@@ -209,6 +204,7 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
long i = 0;
for (int d_idx = 0; d_idx < ${dim}; d_idx++) {
K/=kernel_size[d_idx];
k_idx_mod = k_idx % 2;
k_idx >>= 1;
......@@ -224,7 +220,6 @@ const long* kernel_size, const long* is_open_spline, int num_threads) {
top = (bot + 1) % kernel_size[d_idx];
bot %= kernel_size[d_idx];
i += ((1 - k_idx_mod) * bot + k_idx_mod * top) * K;
K *= kernel_size[d_idx];
}
amount[idx] = a;
......@@ -360,13 +355,13 @@ int num_threads) {
grad_out += grad_amount[a_idx]*amount[a_idx]/residual;
}
grad_adj[e_idx* ${dim} + d_idx] = grad_out;
grad_adj[e_idx*${dim} + d_idx] = grad_out;
}
}
'''
def get_basis_kernel(k_max, K, dim, degree, bt_to_adj=False):
def get_basis_kernel(k_max, K, dim, degree):
if degree == 3:
_spline_kernel = _spline_kernel_cubic
elif degree == 2:
......@@ -388,9 +383,9 @@ def get_basis_kernel(k_max, K, dim, degree, bt_to_adj=False):
def get_basis_backward_kernel(k_max, K, dim, degree):
if degree == 3:
_spline_kernel = _spline_kernel_cubic
raise NotImplementedError
elif degree == 2:
_spline_kernel = _spline_kernel_quadratic
raise NotImplementedError
else:
_spline_kernel = _spline_kernel_linear_backward
......@@ -423,15 +418,20 @@ class SplineConvGPU(Function):
def forward(self, input, weight, adj_values):
assert input.is_cuda and weight.is_cuda
self.K, self.M_in, self.M_out = weight.size()
# Compute B-spline basis tensor products
adj_values = adj_values.unsqueeze(1) if len(adj_values.size()) < 2 \
else adj_values
if self.bp_to_adj:
self.save_for_backward(input, weight, adj_values)
#adj_values = torch.clamp(adj_values,min=0.0,max=1.0)
else:
self.save_for_backward(input, weight)
num_edges, dim = adj_values.size()
k_max = 2 ** dim
amount = adj_values.new(num_edges, k_max)
index = adj_values.new(num_edges, k_max).long()
num_threads = amount.numel()
......@@ -445,7 +445,8 @@ class SplineConvGPU(Function):
amount.data_ptr(),
index.data_ptr(),
self.kernel_size.data_ptr(),
self.is_open_spline.data_ptr(), num_threads
self.is_open_spline.data_ptr(),
num_threads
],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
......@@ -462,14 +463,12 @@ class SplineConvGPU(Function):
weight.data_ptr(),
output.data_ptr(),
amount.data_ptr(),
index.data_ptr(), num_threads
index.data_ptr(),
num_threads
],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
if self.bp_to_adj:
self.save_for_backward(input, weight, adj_values)
else:
self.save_for_backward(input, weight)
self.amount = amount
self.index = index
......@@ -477,16 +476,19 @@ class SplineConvGPU(Function):
return output
def backward(self, grad_output):
print('grad_output:',grad_output.min(), grad_output.max())
grad_input = grad_output.new(grad_output.size(0), self.M_in).fill_(0)
grad_weight = grad_output.new(self.K, self.M_in, self.M_out).fill_(0)
num_threads = grad_output.numel()
if self.bp_to_adj:
input, weight, adj_values = self.saved_tensors
#adj_values = torch.clamp(adj_values,min=0.0,max=1.0)
amount = self.amount
index = self.index
grad_amount = grad_output.new(amount.size(0),
amount.size(1)).fill_(0)
with torch.cuda.device_of(grad_output):
self.f_weighting_bw(
block=(cuda_num_threads, 1, 1),
......@@ -505,6 +507,7 @@ class SplineConvGPU(Function):
grad_adj = grad_amount.new(grad_amount.size(0),
self.kernel_size.size(0)).fill_(0)
num_threads = grad_adj.numel()
with torch.cuda.device_of(grad_amount):
......@@ -522,12 +525,19 @@ class SplineConvGPU(Function):
],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return grad_input, grad_weight, None
#print('grad_input:',grad_input.min(), grad_input.max())
#print('grad_weight:',grad_weight[:,:,-1].min(), grad_weight[:,:,-1].max())
#print('grad_amount:',grad_amount.min(), grad_amount.max())
#print('grad_adj:',grad_adj.min(), grad_adj.max())
return grad_input, grad_weight, grad_adj
else:
input, weight = self.saved_tensors
amount = self.amount
index = self.index
grad_amount = grad_output.new(amount.size(0),
amount.size(1)).fill_(0)
with torch.cuda.device_of(grad_output):
self.f_weighting_bw(
block=(cuda_num_threads, 1, 1),
......@@ -536,6 +546,7 @@ class SplineConvGPU(Function):
grad_output.data_ptr(),
grad_input.data_ptr(),
grad_weight.data_ptr(),
grad_amount.data_ptr(),
input.data_ptr(),
weight.data_ptr(),
amount.data_ptr(),
......@@ -543,4 +554,5 @@ class SplineConvGPU(Function):
],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return grad_input, grad_weight, None
......@@ -2,10 +2,12 @@ from __future__ import division
import unittest
import torch
from torch.autograd import Variable
from torch.autograd import Variable, gradcheck
from numpy.testing import assert_almost_equal
from .spline_conv import spline_conv
from .spline_conv_gpu import get_basis_kernel,get_basis_backward_kernel, \
get_weighting_forward_kernel, get_weighting_backward_kernel, SplineConvGPU
class SplineConvTest(unittest.TestCase):
......@@ -14,7 +16,9 @@ class SplineConvTest(unittest.TestCase):
edges = torch.LongTensor([[0, 0, 0, 0], [1, 2, 3, 4]])
values = [[0.25, 0.125], [0.25, 0.375], [0.75, 0.625], [0.75, 0.875]]
values = torch.FloatTensor(values)
adj = torch.sparse.FloatTensor(edges, values, torch.Size([5, 5, 2]))
adj = {'indices': edges.cuda(), 'values': Variable(values.cuda()),
'size': torch.Size([5, 5, 2])}
kernel_size = torch.cuda.LongTensor([3, 4])
is_open_spline = torch.cuda.LongTensor([1, 0])
......@@ -22,11 +26,26 @@ class SplineConvTest(unittest.TestCase):
input = torch.FloatTensor([[9, 10], [1, 2], [3, 4], [5, 6], [7, 8]])
weight = torch.arange(0.5, 0.5 * 27, step=0.5).view(13, 2, 1)
adj, input, weight = adj.cuda(), input.cuda(), weight.cuda()
input, weight = input.cuda(), weight.cuda()
input, weight = Variable(input), Variable(weight)
K = 12
in_features = 2
out_features = 1
degree = 1
dim = 2
k_max = (degree+1)**dim
fw_k = get_weighting_forward_kernel(in_features, out_features, k_max)
bw_k = get_weighting_backward_kernel(in_features, out_features, k_max,
K, True)
basis_fw_k = get_basis_kernel(k_max, K, dim, degree)
basis_bw_k = get_basis_backward_kernel(k_max, K, dim, degree)
output = spline_conv(
adj, input, weight, kernel_size, is_open_spline, K=12, degree=1)
adj, input, weight, kernel_size, is_open_spline, K, fw_k, bw_k,
basis_fw_k, basis_bw_k,bp_to_adj=True)
expected_output = [
[(12.5 * 9 + 13 * 10 + 266) / 4],
......@@ -35,5 +54,37 @@ class SplineConvTest(unittest.TestCase):
[12.5 * 5 + 13 * 6],
[12.5 * 7 + 13 * 8],
]
assert_almost_equal(output.cpu().data.numpy(), expected_output, 1)
@unittest.skipIf(not torch.cuda.is_available(), 'no GPU')
def test_backward(self):
kernel_size = torch.cuda.LongTensor([3, 4])
is_open_spline = torch.cuda.LongTensor([1, 0])
input = torch.randn(4, 2).double().cuda()
weight = torch.randn(12, 2, 1).double().cuda()
values = torch.randn(4, 2).double().cuda()
input = Variable(input, requires_grad=True)
weight = Variable(weight, requires_grad=True)
values = Variable(values, requires_grad=True)
K = 12
in_features = 2
out_features = 1
degree = 1
dim = 2
k_max = (degree + 1) ** dim
fw_k = get_weighting_forward_kernel(in_features, out_features, k_max)
bw_k = get_weighting_backward_kernel(in_features, out_features, k_max,
K, bp_to_adj=True)
basis_fw_k = get_basis_kernel(k_max, K, dim, degree)
basis_bw_k = get_basis_backward_kernel(k_max, K, dim, degree)
op = SplineConvGPU(kernel_size, is_open_spline, K, degree,
basis_fw_k, basis_bw_k, fw_k, bw_k, bp_to_adj=True)
test = gradcheck(op, (input, weight, values), eps=1e-6, atol=1e-4)
print(test)
self.assertTrue(test)
\ No newline at end of file
'''
import unittest
import torch
......@@ -62,3 +63,4 @@ class SplineQuadraticGPUTest(unittest.TestCase):
assert_almost_equal(a1.cpu().numpy(), a2, 4)
assert_equal(i1.cpu().numpy(), i2)
'''
\ No newline at end of file
'''
import unittest
import torch
......@@ -44,3 +45,5 @@ class SplineLinearGPUTest(unittest.TestCase):
assert_almost_equal(a1.cpu().numpy(), a2, 2)
assert_equal(i1.cpu().numpy(), i2)
'''
\ No newline at end of file
'''
import unittest
import torch
......@@ -49,3 +50,4 @@ class SplineQuadraticGPUTest(unittest.TestCase):
assert_almost_equal(a1.cpu().numpy(), a2, 2)
assert_equal(i1.cpu().numpy(), i2)
'''
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment