Commit 76872e45 authored by rusty1s's avatar rusty1s
Browse files

few fixes

parent ffec0a56
...@@ -2,28 +2,52 @@ from itertools import product ...@@ -2,28 +2,52 @@ from itertools import product
import pytest import pytest
import torch import torch
from torch.autograd import gradcheck
from torch_sparse import spspmm from torch_sparse import spspmm
from torch_sparse.matmul import SpSpMM
from .utils import dtypes, devices, tensor from .utils import dtypes, devices, tensor
dtypes = [torch.double]
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_spspmm(dtype, device):
index = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
value = tensor([1, 2, 3, 4, 5], dtype, device)
A = (index, value, torch.Size([3, 3]))
index = torch.tensor([[0, 2], [1, 0]], device=device) @pytest.mark.parametrize('dtype,device', product(dtypes, devices))
value = tensor([2, 4], dtype, device) def test_coalesced_spspmm(dtype, device):
B = (index, value, torch.Size([3, 2])) indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
valueA = tensor([1, 2, 3, 4, 5], dtype, device, requires_grad=True)
sizeA = torch.Size([3, 3])
A = (indexA, valueA, sizeA)
A_dense = torch.sparse_coo_tensor(indexA, valueA, sizeA).to_dense()
A_dense = A_dense.requires_grad_()
print('A', A_dense)
indexB = torch.tensor([[0, 2], [1, 0]], device=device)
valueB = tensor([2, 4], dtype, device, requires_grad=True)
sizeB = torch.Size([3, 2])
B = (indexB, valueB, sizeB)
B_dense = torch.sparse_coo_tensor(indexB, valueB, sizeB).to_dense()
B_dense = B_dense.requires_grad_()
index, value, size = spspmm(*A, *B) index, value, size = spspmm(*A, *B)
print(index)
print(value)
print(size)
# out = torch.sparse_coo_tensor(index, value, size) # out = torch.sparse_coo_tensor(index, value, size)
# assert out.to_dense().tolist() == [[8, 0], [0, 6], [0, 8]] expected = torch.matmul(A_dense, B_dense)
# assert out.to_dense().tolist() == expected.tolist()
# valueA = valueA.requires_grad_()
# valueB = valueB.requires_grad_()
# data = (indexA, valueA, sizeA, indexB, valueB, sizeB)
# assert gradcheck(SpSpMM.apply, data, eps=1e-6, atol=1e-4) is True
# print(expected)
value.sum().backward()
expected.sum().backward()
print(valueA.grad)
print(A_dense.grad)
# print(valueB.grad)
# print(B_dense.grad)
# TODO TEST backward # # TODO TEST backward
# value.sum().backward() # # value.sum().backward()
...@@ -7,5 +7,6 @@ if torch.cuda.is_available(): # pragma: no cover ...@@ -7,5 +7,6 @@ if torch.cuda.is_available(): # pragma: no cover
devices += [torch.device('cuda:{}'.format(torch.cuda.current_device()))] devices += [torch.device('cuda:{}'.format(torch.cuda.current_device()))]
def tensor(x, dtype, device): def tensor(x, dtype, device, requires_grad=False):
return None if x is None else torch.tensor(x, dtype=dtype, device=device) return None if x is None else torch.tensor(
x, dtype=dtype, device=device, requires_grad=requires_grad)
from .matmul import spspmm
from .transpose import transpose from .transpose import transpose
from .matmul import spspmm
__all__ = [ __all__ = [
'spspmm',
'transpose', 'transpose',
'spspmm',
] ]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment