Commit d6564de5 authored by rusty1s's avatar rusty1s
Browse files

runnings tests

parent cadfd303
......@@ -32,6 +32,11 @@ spspmm_cuda(at::Tensor indexA, at::Tensor valueA, at::Tensor indexB,
at::Tensor valueB, int m, int k, int n) {
init_cusparse();
indexA = indexA.contiguous();
valueA = valueA.contiguous();
indexB = indexB.contiguous();
valueB = valueB.contiguous();
auto nnzA = valueA.size(0);
auto nnzB = valueB.size(0);
......
......@@ -13,8 +13,8 @@ cmdclass = {}
if torch.cuda.is_available():
ext_modules += [
CUDAExtension('matmul_cuda',
['cuda/matmul.cpp', 'cuda/matmul_kernel.cu'])
CUDAExtension('spspmm_cuda',
['cuda/spspmm.cpp', 'cuda/spspmm_kernel.cu'])
]
cmdclass['build_ext'] = BuildExtension
......
# from itertools import product
# import pytest
# import torch
# from torch_sparse import sparse_coo_tensor, spspmm, to_value
# from .utils import dtypes, devices, tensor
# tests = [{
# 'name': 'Test coalesced input',
# 'indexA': [[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]],
# 'valueA': [1, 2, 3, 4, 5],
# 'sizeA': [3, 3],
# 'indexB': [[0, 2], [1, 0]],
# 'valueB': [2, 4],
# 'sizeB': [3, 2],
# }, {
# 'name': 'Test uncoalesced input',
# 'indexA': [[2, 2, 1, 0, 2, 0], [1, 1, 0, 2, 0, 1]],
# 'valueA': [3, 2, 3, 2, 4, 1],
# 'sizeA': [3, 3],
# 'indexB': [[2, 0, 2], [0, 1, 0]],
# 'valueB': [2, 2, 2],
# 'sizeB': [3, 2],
# }]
# @pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
# def test_spspmm(test, dtype, device):
# indexA = torch.tensor(test['indexA'], device=device)
# valueA = tensor(test['valueA'], dtype, device, requires_grad=True)
# sizeA = torch.Size(test['sizeA'])
# A = sparse_coo_tensor(indexA, valueA, sizeA)
# denseA = A.detach().to_dense().requires_grad_()
# indexB = torch.tensor(test['indexB'], device=device)
# valueB = tensor(test['valueB'], dtype, device, requires_grad=True)
# sizeB = torch.Size(test['sizeB'])
# B = sparse_coo_tensor(indexB, valueB, sizeB)
# denseB = B.detach().to_dense().requires_grad_()
# C = spspmm(A, B)
# denseC = torch.matmul(denseA, denseB)
# assert C.detach().to_dense().tolist() == denseC.tolist()
# to_value(C).sum().backward()
# denseC.sum().backward()
# assert valueA.grad.tolist() == denseA.grad[indexA[0], indexA[1]].tolist()
import torch
from torch_sparse import spmm
def test_spmm():
row = torch.tensor([0, 0, 1, 2, 2])
col = torch.tensor([0, 2, 1, 0, 1])
index = torch.stack([row, col], dim=0)
value = torch.tensor([1, 2, 4, 1, 3])
matrix = torch.tensor([[1, 4], [2, 5], [3, 6]])
out = spmm(index, value, 3, matrix)
assert out.tolist() == [[7, 16], [8, 20], [7, 19]]
from itertools import product
import pytest
import torch
from torch_sparse import spspmm
from .utils import dtypes, devices, tensor
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_spspmm(dtype, device):
indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
valueA = tensor([1, 2, 3, 4, 5], dtype, device)
sizeA = torch.Size([3, 3])
indexB = torch.tensor([[0, 2], [1, 0]], device=device)
valueB = tensor([2, 4], dtype, device)
sizeB = torch.Size([3, 2])
indexC, valueC = spspmm(indexA, valueA, indexB, valueB, 3, 3, 2)
assert indexC.tolist() == [[0, 1, 2], [0, 1, 1]]
assert valueC.tolist() == [8, 6, 8]
A = torch.sparse_coo_tensor(indexA, valueA, sizeA, device=device)
A = A.to_dense().requires_grad_()
B = torch.sparse_coo_tensor(indexB, valueB, sizeB, device=device)
B = B.to_dense().requires_grad_()
torch.matmul(A, B).sum().backward()
valueA = valueA.requires_grad_()
valueB = valueB.requires_grad_()
indexC, valueC = spspmm(indexA, valueA, indexB, valueB, 3, 3, 2)
valueC.sum().backward()
assert valueA.grad.tolist() == A.grad[indexA[0], indexA[1]].tolist()
assert valueB.grad.tolist() == B.grad[indexB[0], indexB[1]].tolist()
from .coalesce import coalesce
from .transpose import transpose
from .matmul import spspmm
from .spmm import spmm
from .spspmm import spspmm
__version__ = '0.2.0'
......@@ -8,5 +9,6 @@ __all__ = [
'__version__',
'coalesce',
'transpose',
'spmm',
'spspmm',
]
......@@ -3,6 +3,8 @@ import torch_scatter
def coalesce(index, value, m, n, op='add', fill_value=0):
"""Row-wise reorders and removes duplicate entries in sparse matrixx."""
row, col = index
unique, inv = torch.unique(row * n + col, sorted=True, return_inverse=True)
......
from torch_scatter import scatter_add
def spmm(index, value, m, matrix):
"""Matrix product of sparse matrix with dense matrix."""
row, col = index
matrix = matrix if matrix.dim() > 1 else matrix.unsqueeze(-1)
out = matrix[col]
out = out * value.unsqueeze(-1)
out = scatter_add(out, row, dim=0, dim_size=m)
return out
import torch
from torch import from_numpy
import scipy.sparse
from torch_sparse import transpose
if torch.cuda.is_available():
import matmul_cuda
import spspmm_cuda
class SpSpMM(torch.autograd.Function):
"""Sparse matrix product of two sparse tensors with autograd support."""
"""Sparse matrix product of two sparse matrices with autograd support."""
@staticmethod
def forward(ctx, indexA, valueA, indexB, valueB, m, k, n):
......@@ -24,14 +25,16 @@ class SpSpMM(torch.autograd.Function):
grad_valueA = grad_valueB = None
if ctx.needs_input_grad[1]:
indexB, valueB = transpose(indexB, valueB, k, n)
_, grad_valueA = mm(indexC, grad_valueC, indexB, valueB, m, n, k)
# TODO: Filter values.
indexB_T, valueB_T = transpose(indexB, valueB, k, n)
grad_indexA, grad_valueA = mm(indexC, grad_valueC, indexB_T,
valueB_T, m, n, k)
grad_valueA = lift(grad_indexA, grad_valueA, indexA, k)
if ctx.needs_input_grad[4]:
indexA, valueA = transpose(indexA, valueA, m, k)
_, grad_valueB = mm(indexA, valueA, indexC, grad_valueC, k, m, n)
# TODO: Filter values.
if ctx.needs_input_grad[3]:
indexA_T, valueA_T = transpose(indexA, valueA, m, k)
grad_indexB, grad_valueB = mm(indexA_T, valueA_T, indexC,
grad_valueC, k, m, n)
grad_valueB = lift(grad_indexB, grad_valueB, indexB, n)
return None, grad_valueA, None, grad_valueB, None, None, None
......@@ -43,7 +46,7 @@ def mm(indexA, valueA, indexB, valueB, m, k, n):
assert valueA.dtype == valueB.dtype
if indexA.is_cuda:
return matmul_cuda.spspmm(indexA, valueA, indexB, valueB, m, k, n)
return spspmm_cuda.spspmm(indexA, valueA, indexB, valueB, m, k, n)
A = to_scipy(indexA, valueA, m, k)
B = to_scipy(indexB, valueB, k, n)
......@@ -58,6 +61,17 @@ def to_scipy(index, value, m, n):
def from_scipy(A):
row, col, value = A.row, A.col, A.data
row, col, value = from_numpy(A.row), from_numpy(A.col), from_numpy(A.data)
index = torch.stack([row, col], dim=0).to(torch.long)
return index, value
def lift(indexA, valueA, indexB, n):
indexA = indexA[0] * n + indexA[1]
indexB = indexB[0] * n + indexB[1]
value = valueA.new_zeros(indexB.max().item() + 1)
value[indexA] = valueA
value = value[indexB]
return value
......@@ -3,6 +3,8 @@ from torch_sparse import coalesce
def transpose(index, value, m, n):
"""Transpose of sparse matrix."""
row, col = index
index = torch.stack([col, row], dim=0)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment