Commit 56ec830f authored by rusty1s's avatar rusty1s
Browse files

spspmm half and version up

parent 3e87af1c
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
project(torchsparse) project(torchsparse)
set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD 14)
set(TORCHSPARSE_VERSION 0.6.10) set(TORCHSPARSE_VERSION 0.6.11)
option(WITH_CUDA "Enable CUDA support" OFF) option(WITH_CUDA "Enable CUDA support" OFF)
......
package: package:
name: pytorch-sparse name: pytorch-sparse
version: 0.6.10 version: 0.6.11
source: source:
path: ../.. path: ../..
......
...@@ -102,7 +102,7 @@ tests_require = ['pytest', 'pytest-runner', 'pytest-cov'] ...@@ -102,7 +102,7 @@ tests_require = ['pytest', 'pytest-runner', 'pytest-cov']
setup( setup(
name='torch_sparse', name='torch_sparse',
version='0.6.10', version='0.6.11',
author='Matthias Fey', author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de', author_email='matthias.fey@tu-dortmund.de',
url='https://github.com/rusty1s/pytorch_sparse', url='https://github.com/rusty1s/pytorch_sparse',
......
...@@ -47,9 +47,6 @@ def test_spmm(dtype, device, reduce): ...@@ -47,9 +47,6 @@ def test_spmm(dtype, device, reduce):
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_spspmm(dtype, device): def test_spspmm(dtype, device):
if dtype == torch.half:
return # TODO
src = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=dtype, src = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=dtype,
device=device) device=device)
......
...@@ -9,9 +9,6 @@ from .utils import grad_dtypes, devices, tensor ...@@ -9,9 +9,6 @@ from .utils import grad_dtypes, devices, tensor
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_spspmm(dtype, device): def test_spspmm(dtype, device):
if dtype == torch.half:
return # TODO
indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device) indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
valueA = tensor([1, 2, 3, 4, 5], dtype, device) valueA = tensor([1, 2, 3, 4, 5], dtype, device)
indexB = torch.tensor([[0, 2], [1, 0]], device=device) indexB = torch.tensor([[0, 2], [1, 0]], device=device)
...@@ -24,9 +21,6 @@ def test_spspmm(dtype, device): ...@@ -24,9 +21,6 @@ def test_spspmm(dtype, device):
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_sparse_tensor_spspmm(dtype, device): def test_sparse_tensor_spspmm(dtype, device):
if dtype == torch.half:
return # TODO
x = SparseTensor( x = SparseTensor(
row=torch.tensor( row=torch.tensor(
[0, 1, 1, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9], [0, 1, 1, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9],
...@@ -44,8 +38,8 @@ def test_sparse_tensor_spspmm(dtype, device): ...@@ -44,8 +38,8 @@ def test_sparse_tensor_spspmm(dtype, device):
expected = torch.eye(10, dtype=dtype, device=device) expected = torch.eye(10, dtype=dtype, device=device)
out = x @ x.to_dense().t() out = x @ x.to_dense().t()
assert torch.allclose(out, expected, atol=1e-7) assert torch.allclose(out, expected, atol=1e-2)
out = x @ x.t() out = x @ x.t()
out = out.to_dense() out = out.to_dense()
assert torch.allclose(out, expected, atol=1e-7) assert torch.allclose(out, expected, atol=1e-2)
...@@ -3,7 +3,7 @@ import os.path as osp ...@@ -3,7 +3,7 @@ import os.path as osp
import torch import torch
__version__ = '0.6.10' __version__ = '0.6.11'
suffix = 'cuda' if torch.cuda.is_available() else 'cpu' suffix = 'cuda' if torch.cuda.is_available() else 'cpu'
......
...@@ -78,9 +78,16 @@ def spspmm_sum(src: SparseTensor, other: SparseTensor) -> SparseTensor: ...@@ -78,9 +78,16 @@ def spspmm_sum(src: SparseTensor, other: SparseTensor) -> SparseTensor:
assert src.sparse_size(1) == other.sparse_size(0) assert src.sparse_size(1) == other.sparse_size(0)
rowptrA, colA, valueA = src.csr() rowptrA, colA, valueA = src.csr()
rowptrB, colB, valueB = other.csr() rowptrB, colB, valueB = other.csr()
value = valueA
if valueA is not None and valueA.dtype == torch.half:
valueA = valueA.to(torch.float)
if valueB is not None and valueB.dtype == torch.half:
valueB = valueB.to(torch.float)
M, K = src.sparse_size(0), other.sparse_size(1) M, K = src.sparse_size(0), other.sparse_size(1)
rowptrC, colC, valueC = torch.ops.torch_sparse.spspmm_sum( rowptrC, colC, valueC = torch.ops.torch_sparse.spspmm_sum(
rowptrA, colA, valueA, rowptrB, colB, valueB, K) rowptrA, colA, valueA, rowptrB, colB, valueB, K)
if valueC is not None and value is not None:
valueC = valueC.to(value.dtype)
return SparseTensor(row=None, rowptr=rowptrC, col=colC, value=valueC, return SparseTensor(row=None, rowptr=rowptrC, col=colC, value=valueC,
sparse_sizes=(M, K), is_sorted=True) sparse_sizes=(M, K), is_sorted=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment