Unverified Commit 7dba1991 authored by Israt Nisa's avatar Israt Nisa Committed by GitHub
Browse files

Sparse ops: Support elementwise ops between sparse and diagonal matrix (#4617)



* initial commit

* address comments

* lint check
Co-authored-by: default avatarIsrat Nisa <nisisrat@amazon.com>
parent ace76327
"""dgl elementwise operators for sparse matrix module.""" """dgl elementwise operators for sparse matrix module."""
from typing import Union
import torch import torch
from .sp_matrix import SparseMatrix from .sp_matrix import SparseMatrix
from .diag_matrix import DiagMatrix
__all__ = ["add", "sub", "mul", "div", "rdiv", "power", "rpower"]
__all__ = ['add', 'sub', 'mul', 'div', 'rdiv', 'power', 'rpower']
def add(A, B): def add(
A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix]
) -> SparseMatrix:
"""Elementwise addition. """Elementwise addition.
Parameters Parameters
---------- ----------
A : SparseMatrix A : SparseMatrix or DiagMatrix
Sparse matrix Sparse matrix or diagonal matrix
B : SparseMatrix B : SparseMatrix or DiagMatrix
Sparse matrix Sparse matrix or diagonal matrix
Returns Returns
------- -------
...@@ -21,53 +28,70 @@ def add(A, B): ...@@ -21,53 +28,70 @@ def add(A, B):
Examples Examples
-------- --------
Case 1: Add two matrices of same sparsity structure Case 1: Add two sparse matrices of same sparsity structure
>>> rowA = torch.tensor([1, 0, 2, 7, 1]) >>> rowA = torch.tensor([1, 0, 2])
>>> colA = torch.tensor([0, 49, 2, 1, 7]) >>> colA = torch.tensor([0, 3, 2])
>>> valA = torch.tensor([10, 20, 30, 40, 50]) >>> valA = torch.tensor([10, 20, 30])
>>> A = SparseMatrix(rowA, colA, valA, shape=(10, 50)) >>> A = SparseMatrix(rowA, colA, valA, shape=(3, 4))
>>> A + A >>> A + A
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], SparseMatrix(indices=tensor([[0, 1, 2],
[49, 0, 7, 2, 1]]), [3, 0, 2]]),
values=tensor([ 40, 20, 100, 60, 80]), values=tensor([40, 20, 60]),
shape=(10, 50), nnz=5) shape=(3, 4), nnz=3)
>>> w = torch.arange(1, len(rowA)+1) >>> w = torch.arange(1, len(rowA)+1)
>>> A + A(w) >>> A + A(w)
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], SparseMatrix(indices=tensor([[0, 1, 2],
[49, 0, 7, 2, 1]]), [3, 0, 2]]),
values=tensor([21, 12, 53, 34, 45]), values=tensor([21, 12, 33]),
shape=(10, 50), nnz=5) shape=(3, 4), nnz=3)
Case 2: Add two matrices of different sparsity structure Case 2: Add two sparse matrices of different sparsity structure
>>> rowB = torch.tensor([1, 9, 2, 7, 1, 1, 0]) >>> rowB = torch.tensor([1, 2, 0, 2, 1])
>>> colB = torch.tensor([0, 1, 2, 1, 7, 11, 15]) >>> colB = torch.tensor([0, 2, 1, 3, 3])
>>> valB = torch.tensor([1, 2, 3, 4, 5, 6]) >>> valB = torch.tensor([1, 2, 3, 4, 5])
>>> B = SparseMatrix(rowB, colB, valB, shape=(10, 50)) >>> B = SparseMatrix(rowB, colB, valB, shape=(3 ,4))
>>> A + B >>> A + B
SparseMatrix(indices=tensor([[ 0, 1, 1, 1, 2, 7, 9], SparseMatrix(indices=tensor([[0, 0, 1, 1, 2, 2],
[49, 0, 7, 11, 2, 1, 1]]), [1, 3, 0, 3, 2, 3]]),
values=tensor([20, 11, 55, 6, 33, 44, 2]), values=tensor([ 3, 20, 11, 5, 32, 4]),
shape=(10, 50), nnz=7) shape=(3, 4), nnz=6)
Case 3: Add sparse matrix and diagonal matrix
>>> D = diag(torch.arange(2, 5), shape=A.shape)
>>> A + D
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 3, 0, 1, 2]]),
values=tensor([ 2, 20, 10, 3, 34]),
shape=(3, 4), nnz=5)
""" """
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix): if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
assert A.shape == B.shape, 'The shape of sparse matrix A {} and' \ assert A.shape == B.shape, (
' B {} are expected to match'.format(A.shape, B.shape) "The shape of sparse matrix A {} and"
" B {} are expected to match".format(A.shape, B.shape)
)
C = A.adj + B.adj C = A.adj + B.adj
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape) return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
raise RuntimeError('Elementwise addition between {} and {} is not ' \ raise RuntimeError(
'supported.'.format(type(A), type(B))) "Elementwise addition between {} and {} is not "
"supported.".format(type(A), type(B))
)
def sub(A, B): def sub(
A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix]
) -> SparseMatrix:
"""Elementwise subtraction. """Elementwise subtraction.
Parameters Parameters
---------- ----------
A : SparseMatrix A : SparseMatrix or DiagMatrix
Sparse matrix Sparse matrix or diagonal matrix
B : SparseMatrix B : SparseMatrix or DiagMatrix
Sparse matrix Sparse matrix or diagonal matrix
Returns Returns
------- -------
...@@ -76,37 +100,57 @@ def sub(A, B): ...@@ -76,37 +100,57 @@ def sub(A, B):
Examples Examples
-------- --------
>>> rowA = torch.tensor([1, 0, 2, 7, 1]) Case 1: Subtract two sparse matrices
>>> colA = torch.tensor([0, 49, 2, 1, 7])
>>> valA = torch.tensor([10, 20, 30, 40, 50]) >>> rowA = torch.tensor([1, 0, 2])
>>> A = SparseMatrix(rowA, colA, valA, shape=(10, 50)) >>> colA = torch.tensor([0, 3, 2])
>>> rowB = torch.tensor([1, 9, 2, 7, 1, 1]) >>> valA = torch.tensor([10, 20, 30])
>>> colB = torch.tensor([0, 1, 2, 1, 7, 11]) >>> A = SparseMatrix(rowA, colA, valA, shape=(3, 4))
>>> valB = torch.tensor([1, 2, 3, 4, 5, 6]) >>> rowB = torch.tensor([1, 2, 0, 2, 1])
>>> B = SparseMatrix(rowB, colB, valB, shape=(10, 50)) >>> colB = torch.tensor([0, 2, 1, 3, 3])
>>> valB = torch.tensor([1, 2, 3, 4, 5])
>>> B = SparseMatrix(rowB, colB, valB, shape=(3 ,4))
>>> A - B >>> A - B
SparseMatrix(indices=tensor([[ 0, 1, 1, 1, 2, 7, 9], SparseMatrix(indices=tensor([[0, 0, 1, 1, 2, 2],
[49, 0, 7, 11, 2, 1, 1]]), [1, 3, 0, 3, 2, 3]]),
values=tensor([20, 9, 45, -6, 27, 36, -2]), values=tensor([-3, 20, 9, -5, 28, -4]),
shape=(10, 50), nnz=7 shape=(3, 4), nnz=6)
Case 2: Subtract sparse matrix and diagonal matrix
>>> D = diag(torch.arange(2, 5), shape=A.shape)
>>> A - D
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 3, 0, 1, 2]]),
values=tensor([-2, 20, 10, -3, 26]),
shape=(3, 4), nnz=5)
""" """
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix): if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
assert A.shape == B.shape, 'The shape of sparse matrix A {} and' \ assert A.shape == B.shape, (
' B {} are expected to match.'.format(A.shape, B.shape) "The shape of sparse matrix A {} and"
" B {} are expected to match.".format(A.shape, B.shape)
)
C = A.adj - B.adj C = A.adj - B.adj
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape) return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
raise RuntimeError('Elementwise subtraction between {} and {} is not ' \ raise RuntimeError(
'supported.'.format(type(A), type(B))) "Elementwise subtraction between {} and {} is not "
"supported.".format(type(A), type(B))
)
def mul(A, B): def mul(
A: Union[SparseMatrix, DiagMatrix, float],
B: Union[SparseMatrix, DiagMatrix, float],
) -> SparseMatrix:
"""Elementwise multiplication. """Elementwise multiplication.
Parameters Parameters
---------- ----------
A : SparseMatrix or scalar A : SparseMatrix or DiagMatrix or scalar
Sparse matrix or scalar value Sparse matrix or diagonal matrix or scalar value
B : SparseMatrix or scalar B : SparseMatrix or DiagMatrix or scalar
Sparse matrix or scalar value. Sparse matrix or diagonal matrix or scalar value
Returns Returns
------- -------
...@@ -117,51 +161,62 @@ def mul(A, B): ...@@ -117,51 +161,62 @@ def mul(A, B):
-------- --------
Case 1: Elementwise multiplication between two sparse matrices Case 1: Elementwise multiplication between two sparse matrices
>>> rowA = torch.tensor([1, 0, 2, 7, 1]) >>> rowA = torch.tensor([1, 0, 2])
>>> colA = torch.tensor([0, 49, 2, 1, 7]) >>> colA = torch.tensor([0, 3, 2])
>>> valA = torch.tensor([10, 20, 30, 40, 50]) >>> valA = torch.tensor([10, 20, 30])
>>> A = SparseMatrix(rowA, colA, valA, shape=(10, 50)) >>> A = SparseMatrix(rowA, colA, valA, shape=(3, 4))
>>> rowB = torch.tensor([1, 9, 2, 7, 1, 1]) >>> rowB = torch.tensor([1, 2, 0, 2, 1])
>>> colB = torch.tensor([0, 1, 2, 1, 7, 11]) >>> colB = torch.tensor([0, 2, 1, 3, 3])
>>> valB = torch.tensor([1, 2, 3, 4, 5, 6]) >>> valB = torch.tensor([1, 2, 3, 4, 5])
>>> B = SparseMatrix(rowB, colB, valB, shape=(10, 50)) >>> B = SparseMatrix(rowB, colB, valB, shape=(3 ,4))
>>> A * B >>> A * B
SparseMatrix(indices=tensor([[1, 1, 2, 7], SparseMatrix(indices=tensor([[1, 2],
[0, 7, 2, 1]]), [0, 2]]),
values=tensor([ 10, 250, 90, 160]), values=tensor([10, 60]),
shape=(10, 50), nnz=4) shape=(3, 4), nnz=2)
Case 2: Elementwise multiplication between sparse matrix and scalar Case 2: Elementwise multiplication between sparse matrix and scalar value
>>> v_scalar = 2.5 >>> v_scalar = 2.5
>>> A * v_scalar >>> A * v_scalar
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], SparseMatrix(indices=tensor([[0, 1, 2],
[49, 0, 7, 2, 1]]), [3, 0, 2]]),
values=tensor([ 50., 25., 125., 75., 100.]), values=tensor([50., 25., 75.]),
shape=(8, 50), nnz=5) shape=(3, 4), nnz=3)
>>> v_scalar * A
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], Case 3: Elementwise multiplication between sparse and diagonal matrix
[49, 0, 7, 2, 1]]),
values=tensor([ 50., 25., 125., 75., 100.]), >>> D = diag(torch.arange(2, 5), shape=A.shape)
shape=(8, 50), nnz=5) >>> A * D
SparseMatrix(indices=tensor([[2],
[2]]),
values=tensor([120]),
shape=(3, 4), nnz=1)
""" """
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix): if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
assert A.shape == B.shape, 'The shape of sparse matrix A {} and' \ assert A.shape == B.shape, (
' B {} are expected to match.'.format(A.shape, B.shape) "The shape of sparse matrix A {} and"
" B {} are expected to match.".format(A.shape, B.shape)
)
A = A.adj if isinstance(A, SparseMatrix) else A A = A.adj if isinstance(A, SparseMatrix) else A
B = B.adj if isinstance(B, SparseMatrix) else B B = B.adj if isinstance(B, SparseMatrix) else B
C = A * B C = A * B
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape) return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
def div(A, B):
def div(
A: Union[SparseMatrix, DiagMatrix],
B: Union[SparseMatrix, DiagMatrix, float],
) -> SparseMatrix:
"""Elementwise division. """Elementwise division.
Parameters Parameters
---------- ----------
A : SparseMatrix A : SparseMatrix or DiagMatrix
Sparse matrix Sparse matrix or diagonal matrix
B : SparseMatrix or scalar B : SparseMatrix or DiagMatrix or scalar
Sparse matrix or scalar value. Sparse matrix or diagonal matrix or scalar value.
Returns Returns
------- -------
...@@ -182,38 +237,46 @@ def div(A, B): ...@@ -182,38 +237,46 @@ def div(A, B):
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]), [49, 0, 7, 2, 1]]),
values=tensor([20.0000, 5.0000, 16.6667, 7.5000, 8.0000]), values=tensor([20.0000, 5.0000, 16.6667, 7.5000, 8.0000]),
shape=(8, 50), nnz=5) shape=(10, 50), nnz=5)
Case 2: Elementwise multiplication between sparse matrix and scalar Case 2: Elementwise multiplication between sparse matrix and scalar value
>>> v_scalar = 2.5
>>> A / v_scalar >>> A / v_scalar
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]), [49, 0, 7, 2, 1]]),
values=tensor([ 8., 4., 20., 12., 16.]), values=tensor([ 8., 4., 20., 12., 16.]),
shape=(8, 50), nnz=5) shape=(10, 50), nnz=5)
""" """
B = B.as_sparse() if isinstance(B, DiagMatrix) else B
if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix): if isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix):
# same sparsity structure # same sparsity structure
if torch.equal(A.indices("COO"), B.indices("COO")): if torch.equal(A.indices("COO"), B.indices("COO")):
return SparseMatrix(A.row, A.col, A.val / B.val, A.shape) return SparseMatrix(A.row, A.col, A.val / B.val, A.shape)
raise ValueError('Division between matrices of different sparsity is not supported') raise ValueError(
C = A.adj/B "Division between matrices of different sparsity is not supported"
)
C = A.adj / B
return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape) return SparseMatrix(C.indices()[0], C.indices()[1], C.values(), C.shape)
def rdiv(A, B):
def rdiv(A: float, B: Union[SparseMatrix, DiagMatrix]):
"""Elementwise division. """Elementwise division.
Parameters Parameters
---------- ----------
A : scalar A : scalar
scalar value scalar value
B : SparseMatrix B : SparseMatrix or DiagMatrix
Sparse matrix Sparse matrix or diagonal matrix
""" """
raise RuntimeError('Elementwise division between {} and {} is not ' \ raise RuntimeError(
'supported.'.format(type(A), type(B))) "Elementwise division between {} and {} is not "
"supported.".format(type(A), type(B))
)
def power(A, B): def power(A: SparseMatrix, B: float) -> SparseMatrix:
"""Elementwise power operation. """Elementwise power operation.
Parameters Parameters
...@@ -238,13 +301,16 @@ def power(A, B): ...@@ -238,13 +301,16 @@ def power(A, B):
SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7], SparseMatrix(indices=tensor([[ 0, 1, 1, 2, 7],
[49, 0, 7, 2, 1]]), [49, 0, 7, 2, 1]]),
values=tensor([ 1788.8544, 316.2278, 17677.6699, 4929.5029, 10119.2881]), values=tensor([ 1788.8544, 316.2278, 17677.6699, 4929.5029, 10119.2881]),
shape=(8, 50), nnz=5) shape=(10, 50), nnz=5)
""" """
if isinstance(B, SparseMatrix): if isinstance(B, SparseMatrix):
raise RuntimeError('Power operation between two sparse matrices is not supported') raise RuntimeError(
return SparseMatrix(A.row, A.col, torch.pow(A.val, B)) "Power operation between two sparse matrices is not supported"
)
return SparseMatrix(A.row, A.col, torch.pow(A.val, B), A.shape)
def rpower(A, B): def rpower(A: float, B: SparseMatrix) -> SparseMatrix:
"""Elementwise power operation. """Elementwise power operation.
Parameters Parameters
...@@ -254,8 +320,11 @@ def rpower(A, B): ...@@ -254,8 +320,11 @@ def rpower(A, B):
B : SparseMatrix B : SparseMatrix
Sparse matrix. Sparse matrix.
""" """
raise RuntimeError('Power operation between {} and {} is not ' \ raise RuntimeError(
'supported.'.format(type(A), type(B))) "Power operation between {} and {} is not "
"supported.".format(type(A), type(B))
)
SparseMatrix.__add__ = add SparseMatrix.__add__ = add
SparseMatrix.__radd__ = add SparseMatrix.__radd__ = add
......
import numpy as np import numpy as np
import pytest import pytest
import dgl import dgl
import dgl.backend as F
import torch import torch
import numpy
import operator import operator
from dgl.mock_sparse import SparseMatrix from dgl.mock_sparse import SparseMatrix, diag
parametrize_idtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
parametrize_dtype = pytest.mark.parametrize('dtype', [F.float32, F.float64]) parametrize_idtype = pytest.mark.parametrize(
"idtype", [torch.int32, torch.int64]
)
parametrize_dtype = pytest.mark.parametrize(
"dtype", [torch.float32, torch.float64]
)
def all_close_sparse(A, B): def all_close_sparse(A, B):
assert torch.allclose(A.indices(), B.indices()) assert torch.allclose(A.indices(), B.indices())
assert torch.allclose(A.values(), B.values()) assert torch.allclose(A.values(), B.values())
assert A.shape == B.shape assert A.shape == B.shape
@parametrize_idtype @parametrize_idtype
@parametrize_dtype @parametrize_dtype
@pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul, operator.truediv]) @pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_sparse_op_sparse(idtype, dtype, op): def test_sparse_op_sparse(idtype, dtype, op):
rowA = torch.tensor([1, 0, 2, 7, 1]) rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7]) colA = torch.tensor([0, 49, 2, 1, 7])
...@@ -36,12 +43,43 @@ def test_sparse_op_sparse(idtype, dtype, op): ...@@ -36,12 +43,43 @@ def test_sparse_op_sparse(idtype, dtype, op):
all_close_sparse(op(A.adj, B.adj), op(A, B).adj) all_close_sparse(op(A.adj, B.adj), op(A, B).adj)
else: else:
# sparse div is not supported in PyTorch # sparse div is not supported in PyTorch
assert np.allclose(op(A, A1).val, op(A.val, A1.val), rtol=1e-4, atol=1e-4) assert np.allclose(
op(A, A1).val, op(A.val, A1.val), rtol=1e-4, atol=1e-4
)
_test()
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_sparse_op_diag(idtype, dtype, op):
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
valA = torch.rand(len(rowA))
A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
D = diag(torch.arange(2, 12), shape=A.shape)
D_sp = D.as_sparse()
def _test():
if op is not operator.truediv:
all_close_sparse(op(A.adj, D_sp.adj), op(A, D).adj)
else:
# NOTE (Israt): Matrices mush have same sparsity pattern for div
D2 = diag(torch.arange(12, 22), shape=A.shape)
A_sp2 = D2.as_sparse()
assert np.allclose(
op(A_sp2, D).val, op(A_sp2.val, D_sp.val), rtol=1e-4, atol=1e-4
)
_test() _test()
@parametrize_idtype @parametrize_idtype
@parametrize_dtype @parametrize_dtype
@pytest.mark.parametrize('v_scalar', [2, 2.5]) @pytest.mark.parametrize("v_scalar", [2, 2.5])
def test_sparse_op_scalar(idtype, dtype, v_scalar): def test_sparse_op_scalar(idtype, dtype, v_scalar):
row = torch.randint(1, 500, (100,)) row = torch.randint(1, 500, (100,))
col = torch.randint(1, 500, (100,)) col = torch.randint(1, 500, (100,))
...@@ -51,9 +89,10 @@ def test_sparse_op_scalar(idtype, dtype, v_scalar): ...@@ -51,9 +89,10 @@ def test_sparse_op_scalar(idtype, dtype, v_scalar):
all_close_sparse(A.adj / v_scalar, (A / v_scalar).adj) all_close_sparse(A.adj / v_scalar, (A / v_scalar).adj)
all_close_sparse(pow(A.adj, v_scalar), pow(A, v_scalar).adj) all_close_sparse(pow(A.adj, v_scalar), pow(A, v_scalar).adj)
@parametrize_idtype @parametrize_idtype
@parametrize_dtype @parametrize_dtype
@pytest.mark.parametrize('v_scalar', [2, 2.5]) @pytest.mark.parametrize("v_scalar", [2, 2.5])
def test_scalar_op_sparse(idtype, dtype, v_scalar): def test_scalar_op_sparse(idtype, dtype, v_scalar):
row = torch.randint(1, 500, (100,)) row = torch.randint(1, 500, (100,))
col = torch.randint(1, 500, (100,)) col = torch.randint(1, 500, (100,))
...@@ -61,6 +100,7 @@ def test_scalar_op_sparse(idtype, dtype, v_scalar): ...@@ -61,6 +100,7 @@ def test_scalar_op_sparse(idtype, dtype, v_scalar):
A = SparseMatrix(row, col, val) A = SparseMatrix(row, col, val)
all_close_sparse(v_scalar * A.adj, (v_scalar * A).adj) all_close_sparse(v_scalar * A.adj, (v_scalar * A).adj)
def test_expose_op(): def test_expose_op():
rowA = torch.tensor([1, 0, 2, 7, 1]) rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7]) colA = torch.tensor([0, 49, 2, 1, 7])
...@@ -69,9 +109,3 @@ def test_expose_op(): ...@@ -69,9 +109,3 @@ def test_expose_op():
dgl.mock_sparse.sub(A, A) dgl.mock_sparse.sub(A, A)
dgl.mock_sparse.mul(A, A) dgl.mock_sparse.mul(A, A)
dgl.mock_sparse.div(A, A) dgl.mock_sparse.div(A, A)
if __name__ == '__main__':
test_sparse_op_sparse()
test_sparse_op_scalar()
test_scalar_op_sparse()
test_expose_op()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment