Unverified Commit 7dba1991 authored by Israt Nisa's avatar Israt Nisa Committed by GitHub
Browse files

Sparse ops: Support elementwise ops between sparse and diagonal matrix (#4617)



* initial commit

* address comments

* lint check
Co-authored-by: default avatarIsrat Nisa <nisisrat@amazon.com>
parent ace76327
"""dgl elementwise operators for sparse matrix module."""
from typing import Union
import torch
from .sp_matrix import SparseMatrix
from .diag_matrix import DiagMatrix
__all__ = ['add', 'sub', 'mul', 'div', 'rdiv', 'power', 'rpower']
__all__ = ["add", "sub", "mul", "div", "rdiv", "power", "rpower"]
def add(A, B):
def add(
A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix]
) -> SparseMatrix:
"""Elementwise addition.
Parameters
----------
A : SparseMatrix
Sparse matrix
B : SparseMatrix
Sparse matrix
A : SparseMatrix or DiagMatrix
Sparse matrix or diagonal matrix
B : SparseMatrix or DiagMatrix
Sparse matrix or diagonal matrix
Returns
-------
......@@ -21,53 +28,70 @@ def add(A, B):
Examples
--------
Case 1: Add two matrices of same sparsity structure
Case 1: Add two sparse matrices of same sparsity structure
>>> rowA = torch.tensor([1, 0, 2, 7, 1])
>>> colA = torch.tensor([0, 49, 2, 1, 7])
>>> valA = torch.tensor([10, 20, 30, 40, 50])
>>> A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
>>> rowA = torch.tensor([1, 0, 2])
>>> colA = torch.tensor([0, 3, 2])
>>> valA = torch.tensor([10, 20, 30])
>>> A = SparseMatrix(rowA, colA, valA, shape=(3, 4))
>>> A + A
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([ 40, 20, 100, 60, 80]),
shape=(10, 50), nnz=5)
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
values=tensor([40, 20, 60]),
shape=(3, 4), nnz=3)
>>> w = torch.arange(1, len(rowA)+1)
>>> A + A(w)
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([21, 12, 53, 34, 45]),
shape=(10, 50), nnz=5)
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
values=tensor([21, 12, 33]),
shape=(3, 4), nnz=3)
Case 2: Add two matrices of different sparsity structure
Case 2: Add two sparse matrices of different sparsity structure
>>> rowB = torch.tensor([1, 9, 2, 7, 1, 1, 0])
>>> colB = torch.tensor([0, 1, 2, 1, 7, 11, 15])
>>> valB = torch.tensor([1, 2, 3, 4, 5, 6])
>>> B = SparseMatrix(rowB, colB, valB, shape=(10, 50))
>>> rowB = torch.tensor([1, 2, 0, 2, 1])
>>> colB = torch.tensor([0, 2, 1, 3, 3])
>>> valB = torch.tensor([1, 2, 3, 4, 5])
>>> B = SparseMatrix(rowB, colB, valB, shape=(3 ,4))
>>> A + B
SparseMatrix(indices=tensor([[ 0, 1, 1, 1, 2, 7, 9],
[49, 0, 7, 11, 2, 1, 1]]),
values=tensor([20, 11, 55, 6, 33, 44, 2]),
shape=(10, 50), nnz=7)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2, 2],
[1, 3, 0, 3, 2, 3]]),
values=tensor([ 3, 20, 11, 5, 32, 4]),
shape=(3, 4), nnz=6)
Case 3: Add sparse matrix and diagonal matrix
>>> D = diag(torch.arange(2, 5), shape=A.shape)
>>> A + D
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 3, 0, 1, 2]]),
values=tensor([ 2, 20, 10, 3, 34]),
shape=(3, 4), nnz=5)
"""
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
assert A.shape == B.shape, 'The shape of sparse matrix A {} and' \
' B {} are expected to match'.format(A.shape, B.shape)
assert A.shape == B.shape, (
"The shape of sparse matrix A {} and"
" B {} are expected to match".format(A.shape, B.shape)
)
C = A.adj + B.adj
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
raise RuntimeError('Elementwise addition between {} and {} is not ' \
'supported.'.format(type(A), type(B)))
raise RuntimeError(
"Elementwise addition between {} and {} is not "
"supported.".format(type(A), type(B))
)
def sub(A, B):
def sub(
A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix]
) -> SparseMatrix:
"""Elementwise subtraction.
Parameters
----------
A : SparseMatrix
Sparse matrix
B : SparseMatrix
Sparse matrix
A : SparseMatrix or DiagMatrix
Sparse matrix or diagonal matrix
B : SparseMatrix or DiagMatrix
Sparse matrix or diagonal matrix
Returns
-------
......@@ -76,37 +100,57 @@ def sub(A, B):
Examples
--------
>>> rowA = torch.tensor([1, 0, 2, 7, 1])
>>> colA = torch.tensor([0, 49, 2, 1, 7])
>>> valA = torch.tensor([10, 20, 30, 40, 50])
>>> A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
>>> rowB = torch.tensor([1, 9, 2, 7, 1, 1])
>>> colB = torch.tensor([0, 1, 2, 1, 7, 11])
>>> valB = torch.tensor([1, 2, 3, 4, 5, 6])
>>> B = SparseMatrix(rowB, colB, valB, shape=(10, 50))
Case 1: Subtract two sparse matrices
>>> rowA = torch.tensor([1, 0, 2])
>>> colA = torch.tensor([0, 3, 2])
>>> valA = torch.tensor([10, 20, 30])
>>> A = SparseMatrix(rowA, colA, valA, shape=(3, 4))
>>> rowB = torch.tensor([1, 2, 0, 2, 1])
>>> colB = torch.tensor([0, 2, 1, 3, 3])
>>> valB = torch.tensor([1, 2, 3, 4, 5])
>>> B = SparseMatrix(rowB, colB, valB, shape=(3 ,4))
>>> A - B
SparseMatrix(indices=tensor([[ 0, 1, 1, 1, 2, 7, 9],
[49, 0, 7, 11, 2, 1, 1]]),
values=tensor([20, 9, 45, -6, 27, 36, -2]),
shape=(10, 50), nnz=7
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2, 2],
[1, 3, 0, 3, 2, 3]]),
values=tensor([-3, 20, 9, -5, 28, -4]),
shape=(3, 4), nnz=6)
Case 2: Subtract sparse matrix and diagonal matrix
>>> D = diag(torch.arange(2, 5), shape=A.shape)
>>> A - D
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 3, 0, 1, 2]]),
values=tensor([-2, 20, 10, -3, 26]),
shape=(3, 4), nnz=5)
"""
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
assert A.shape == B.shape, 'The shape of sparse matrix A {} and' \
' B {} are expected to match.'.format(A.shape, B.shape)
assert A.shape == B.shape, (
"The shape of sparse matrix A {} and"
" B {} are expected to match.".format(A.shape, B.shape)
)
C = A.adj - B.adj
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
raise RuntimeError('Elementwise subtraction between {} and {} is not ' \
'supported.'.format(type(A), type(B)))
raise RuntimeError(
"Elementwise subtraction between {} and {} is not "
"supported.".format(type(A), type(B))
)
def mul(A, B):
def mul(
A: Union[SparseMatrix, DiagMatrix, float],
B: Union[SparseMatrix, DiagMatrix, float],
) -> SparseMatrix:
"""Elementwise multiplication.
Parameters
----------
A : SparseMatrix or scalar
Sparse matrix or scalar value
B : SparseMatrix or scalar
Sparse matrix or scalar value.
A : SparseMatrix or DiagMatrix or scalar
Sparse matrix or diagonal matrix or scalar value
B : SparseMatrix or DiagMatrix or scalar
Sparse matrix or diagonal matrix or scalar value
Returns
-------
......@@ -117,51 +161,62 @@ def mul(A, B):
--------
Case 1: Elementwise multiplication between two sparse matrices
>>> rowA = torch.tensor([1, 0, 2, 7, 1])
>>> colA = torch.tensor([0, 49, 2, 1, 7])
>>> valA = torch.tensor([10, 20, 30, 40, 50])
>>> A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
>>> rowB = torch.tensor([1, 9, 2, 7, 1, 1])
>>> colB = torch.tensor([0, 1, 2, 1, 7, 11])
>>> valB = torch.tensor([1, 2, 3, 4, 5, 6])
>>> B = SparseMatrix(rowB, colB, valB, shape=(10, 50))
>>> rowA = torch.tensor([1, 0, 2])
>>> colA = torch.tensor([0, 3, 2])
>>> valA = torch.tensor([10, 20, 30])
>>> A = SparseMatrix(rowA, colA, valA, shape=(3, 4))
>>> rowB = torch.tensor([1, 2, 0, 2, 1])
>>> colB = torch.tensor([0, 2, 1, 3, 3])
>>> valB = torch.tensor([1, 2, 3, 4, 5])
>>> B = SparseMatrix(rowB, colB, valB, shape=(3 ,4))
>>> A * B
SparseMatrix(indices=tensor([[1, 1, 2, 7],
[0, 7, 2, 1]]),
values=tensor([ 10, 250, 90, 160]),
shape=(10, 50), nnz=4)
SparseMatrix(indices=tensor([[1, 2],
[0, 2]]),
values=tensor([10, 60]),
shape=(3, 4), nnz=2)
Case 2: Elementwise multiplication between sparse matrix and scalar
Case 2: Elementwise multiplication between sparse matrix and scalar value
>>> v_scalar = 2.5
>>> A * v_scalar
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([ 50., 25., 125., 75., 100.]),
shape=(8, 50), nnz=5)
>>> v_scalar * A
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([ 50., 25., 125., 75., 100.]),
shape=(8, 50), nnz=5)
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
values=tensor([50., 25., 75.]),
shape=(3, 4), nnz=3)
Case 3: Elementwise multiplication between sparse and diagonal matrix
>>> D = diag(torch.arange(2, 5), shape=A.shape)
>>> A * D
SparseMatrix(indices=tensor([[2],
[2]]),
values=tensor([120]),
shape=(3, 4), nnz=1)
"""
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
assert A.shape == B.shape, 'The shape of sparse matrix A {} and' \
' B {} are expected to match.'.format(A.shape, B.shape)
assert A.shape == B.shape, (
"The shape of sparse matrix A {} and"
" B {} are expected to match.".format(A.shape, B.shape)
)
A = A.adj if isinstance(A, SparseMatrix) else A
B = B.adj if isinstance(B, SparseMatrix) else B
C = A * B
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
def div(A, B):
def div(
A: Union[SparseMatrix, DiagMatrix],
B: Union[SparseMatrix, DiagMatrix, float],
) -> SparseMatrix:
"""Elementwise division.
Parameters
----------
A : SparseMatrix
Sparse matrix
B : SparseMatrix or scalar
Sparse matrix or scalar value.
A : SparseMatrix or DiagMatrix
Sparse matrix or diagonal matrix
B : SparseMatrix or DiagMatrix or scalar
Sparse matrix or diagonal matrix or scalar value.
Returns
-------
......@@ -182,38 +237,46 @@ def div(A, B):
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([20.0000, 5.0000, 16.6667, 7.5000, 8.0000]),
shape=(8, 50), nnz=5)
shape=(10, 50), nnz=5)
Case 2: Elementwise multiplication between sparse matrix and scalar
Case 2: Elementwise multiplication between sparse matrix and scalar value
>>> v_scalar = 2.5
>>> A / v_scalar
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([ 8., 4., 20., 12., 16.]),
shape=(8, 50), nnz=5)
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([ 8., 4., 20., 12., 16.]),
shape=(10, 50), nnz=5)
"""
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
# same sparsity structure
if torch.equal(A.indices("COO"), B.indices("COO")):
return SparseMatrix(A.row, A.col, A.val / B.val, A.shape)
raise ValueError('Division between matrices of different sparsity is not supported')
C = A.adj/B
raise ValueError(
"Division between matrices of different sparsity is not supported"
)
C = A.adj / B
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
def rdiv(A, B):
def rdiv(A: float, B: Union[SparseMatrix, DiagMatrix]):
"""Elementwise division.
Parameters
----------
A : scalar
scalar value
B : SparseMatrix
Sparse matrix
B : SparseMatrix or DiagMatrix
Sparse matrix or diagonal matrix
"""
raise RuntimeError('Elementwise division between {} and {} is not ' \
'supported.'.format(type(A), type(B)))
raise RuntimeError(
"Elementwise division between {} and {} is not "
"supported.".format(type(A), type(B))
)
def power(A, B):
def power(A: SparseMatrix, B: float) -> SparseMatrix:
"""Elementwise power operation.
Parameters
......@@ -237,14 +300,17 @@ def power(A, B):
>>> pow(A, 2.5)
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]),
values=tensor([ 1788.8544, 316.2278, 17677.6699, 4929.5029, 10119.2881]),
shape=(8, 50), nnz=5)
values=tensor([ 1788.8544, 316.2278, 17677.6699, 4929.5029, 10119.2881]),
shape=(10, 50), nnz=5)
"""
if isinstance(B, SparseMatrix):
raise RuntimeError('Power operation between two sparse matrices is not supported')
return SparseMatrix(A.row, A.col, torch.pow(A.val, B))
raise RuntimeError(
"Power operation between two sparse matrices is not supported"
)
return SparseMatrix(A.row, A.col, torch.pow(A.val, B), A.shape)
def rpower(A, B):
def rpower(A: float, B: SparseMatrix) -> SparseMatrix:
"""Elementwise power operation.
Parameters
......@@ -254,8 +320,11 @@ def rpower(A, B):
B : SparseMatrix
Sparse matrix.
"""
raise RuntimeError('Power operation between {} and {} is not ' \
'supported.'.format(type(A), type(B)))
raise RuntimeError(
"Power operation between {} and {} is not "
"supported.".format(type(A), type(B))
)
SparseMatrix.__add__ = add
SparseMatrix.__radd__ = add
......
import numpy as np
import pytest
import dgl
import dgl.backend as F
import torch
import numpy
import operator
from dgl.mock_sparse import SparseMatrix
parametrize_idtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
parametrize_dtype = pytest.mark.parametrize('dtype', [F.float32, F.float64])
from dgl.mock_sparse import SparseMatrix, diag
parametrize_idtype = pytest.mark.parametrize(
"idtype", [torch.int32, torch.int64]
)
parametrize_dtype = pytest.mark.parametrize(
"dtype", [torch.float32, torch.float64]
)
def all_close_sparse(A, B):
assert torch.allclose(A.indices(), B.indices())
assert torch.allclose(A.values(), B.values())
assert A.shape == B.shape
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul, operator.truediv])
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_sparse_op_sparse(idtype, dtype, op):
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
......@@ -36,12 +43,43 @@ def test_sparse_op_sparse(idtype, dtype, op):
all_close_sparse(op(A.adj, B.adj), op(A, B).adj)
else:
# sparse div is not supported in PyTorch
assert np.allclose(op(A, A1).val, op(A.val, A1.val), rtol=1e-4, atol=1e-4)
assert np.allclose(
op(A, A1).val, op(A.val, A1.val), rtol=1e-4, atol=1e-4
)
_test()
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_sparse_op_diag(idtype, dtype, op):
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
valA = torch.rand(len(rowA))
A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
D = diag(torch.arange(2, 12), shape=A.shape)
D_sp = D.as_sparse()
def _test():
if op is not operator.truediv:
all_close_sparse(op(A.adj, D_sp.adj), op(A, D).adj)
else:
# NOTE (Israt): Matrices mush have same sparsity pattern for div
D2 = diag(torch.arange(12, 22), shape=A.shape)
A_sp2 = D2.as_sparse()
assert np.allclose(
op(A_sp2, D).val, op(A_sp2.val, D_sp.val), rtol=1e-4, atol=1e-4
)
_test()
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize('v_scalar', [2, 2.5])
@pytest.mark.parametrize("v_scalar", [2, 2.5])
def test_sparse_op_scalar(idtype, dtype, v_scalar):
row = torch.randint(1, 500, (100,))
col = torch.randint(1, 500, (100,))
......@@ -51,9 +89,10 @@ def test_sparse_op_scalar(idtype, dtype, v_scalar):
all_close_sparse(A.adj / v_scalar, (A / v_scalar).adj)
all_close_sparse(pow(A.adj, v_scalar), pow(A, v_scalar).adj)
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize('v_scalar', [2, 2.5])
@pytest.mark.parametrize("v_scalar", [2, 2.5])
def test_scalar_op_sparse(idtype, dtype, v_scalar):
row = torch.randint(1, 500, (100,))
col = torch.randint(1, 500, (100,))
......@@ -61,6 +100,7 @@ def test_scalar_op_sparse(idtype, dtype, v_scalar):
A = SparseMatrix(row, col, val)
all_close_sparse(v_scalar * A.adj, (v_scalar * A).adj)
def test_expose_op():
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
......@@ -69,9 +109,3 @@ def test_expose_op():
dgl.mock_sparse.sub(A, A)
dgl.mock_sparse.mul(A, A)
dgl.mock_sparse.div(A, A)
if __name__ == '__main__':
test_sparse_op_sparse()
test_sparse_op_scalar()
test_scalar_op_sparse()
test_expose_op()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment