Unverified Commit c4ffd752 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Polish the doc strings. (#5203)


Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 050798e9
...@@ -104,9 +104,8 @@ class DiagMatrix: ...@@ -104,9 +104,8 @@ class DiagMatrix:
>>> import torch >>> import torch
>>> val = torch.ones(5) >>> val = torch.ones(5)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> sp_mat = mat.to_sparse() >>> D.to_sparse()
>>> print(sp_mat)
SparseMatrix(indices=tensor([[0, 1, 2, 3, 4], SparseMatrix(indices=tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]), [0, 1, 2, 3, 4]]),
values=tensor([1., 1., 1., 1., 1.]), values=tensor([1., 1., 1., 1., 1.]),
...@@ -152,11 +151,10 @@ class DiagMatrix: ...@@ -152,11 +151,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.arange(1, 5).float() >>> val = torch.arange(1, 5).float()
>>> mat = diag(val, shape=(4, 5)) >>> D = dglsp.diag(val, shape=(4, 5))
>>> mat = mat.transpose() >>> D.transpose()
>>> print(mat)
DiagMatrix(val=tensor([1., 2., 3., 4.]), DiagMatrix(val=tensor([1., 2., 3., 4.]),
shape=(5, 4)) shape=(5, 4))
""" """
return DiagMatrix(self.val, self.shape[::-1]) return DiagMatrix(self.val, self.shape[::-1])
...@@ -182,10 +180,10 @@ class DiagMatrix: ...@@ -182,10 +180,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.to(device='cuda:0', dtype=torch.int32) >>> D.to(device='cuda:0', dtype=torch.int32)
DiagMatrix(values=tensor([1, 1], device='cuda:0', dtype=torch.int32), DiagMatrix(values=tensor([1, 1], device='cuda:0', dtype=torch.int32),
size=(2, 2)) shape=(2, 2))
""" """
if device is None: if device is None:
device = self.device device = self.device
...@@ -211,10 +209,10 @@ class DiagMatrix: ...@@ -211,10 +209,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.cuda() >>> D.cuda()
DiagMatrix(values=tensor([1., 1.], device='cuda:0'), DiagMatrix(values=tensor([1., 1.], device='cuda:0'),
size=(2, 2)) shape=(2, 2))
""" """
return self.to(device="cuda") return self.to(device="cuda")
...@@ -231,10 +229,10 @@ class DiagMatrix: ...@@ -231,10 +229,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.cpu() >>> D.cpu()
DiagMatrix(values=tensor([1., 1.]), DiagMatrix(values=tensor([1., 1.]),
size=(2, 2)) shape=(2, 2))
""" """
return self.to(device="cpu") return self.to(device="cpu")
...@@ -251,10 +249,10 @@ class DiagMatrix: ...@@ -251,10 +249,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.float() >>> D.float()
DiagMatrix(values=tensor([1., 1.]), DiagMatrix(values=tensor([1., 1.]),
size=(2, 2)) shape=(2, 2))
""" """
return self.to(dtype=torch.float) return self.to(dtype=torch.float)
...@@ -271,10 +269,10 @@ class DiagMatrix: ...@@ -271,10 +269,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.double() >>> D.double()
DiagMatrix(values=tensor([1., 1.], dtype=torch.float64), DiagMatrix(values=tensor([1., 1.], dtype=torch.float64),
size=(2, 2)) shape=(2, 2))
""" """
return self.to(dtype=torch.double) return self.to(dtype=torch.double)
...@@ -291,10 +289,10 @@ class DiagMatrix: ...@@ -291,10 +289,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.int() >>> D.int()
DiagMatrix(values=tensor([1, 1], dtype=torch.int32), DiagMatrix(values=tensor([1, 1], dtype=torch.int32),
size=(2, 2)) shape=(2, 2))
""" """
return self.to(dtype=torch.int) return self.to(dtype=torch.int)
...@@ -311,10 +309,10 @@ class DiagMatrix: ...@@ -311,10 +309,10 @@ class DiagMatrix:
-------- --------
>>> val = torch.ones(2) >>> val = torch.ones(2)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.long() >>> D.long()
DiagMatrix(values=tensor([1, 1]), DiagMatrix(values=tensor([1, 1]),
size=(2, 2)) shape=(2, 2))
""" """
return self.to(dtype=torch.long) return self.to(dtype=torch.long)
...@@ -344,26 +342,24 @@ def diag( ...@@ -344,26 +342,24 @@ def diag(
>>> import torch >>> import torch
>>> val = torch.ones(5) >>> val = torch.ones(5)
>>> mat = diag(val) >>> dglsp.diag(val)
>>> print(mat)
DiagMatrix(val=tensor([1., 1., 1., 1., 1.]), DiagMatrix(val=tensor([1., 1., 1., 1., 1.]),
shape=(5, 5)) shape=(5, 5))
Case2: 5-by-10 diagonal matrix with scaler values on the diagonal Case2: 5-by-10 diagonal matrix with scaler values on the diagonal
>>> val = torch.ones(5) >>> val = torch.ones(5)
>>> mat = diag(val, shape=(5, 10)) >>> dglsp.diag(val, shape=(5, 10))
>>> print(mat)
DiagMatrix(val=tensor([1., 1., 1., 1., 1.]), DiagMatrix(val=tensor([1., 1., 1., 1., 1.]),
shape=(5, 10)) shape=(5, 10))
Case3: 5-by-5 diagonal matrix with tensor values on the diagonal Case3: 5-by-5 diagonal matrix with vector values on the diagonal
>>> val = torch.randn(5, 3) >>> val = torch.randn(5, 3)
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat.shape >>> D.shape
(5, 5) (5, 5)
>>> mat.nnz >>> D.nnz
5 5
""" """
# NOTE(Mufei): this may not be needed if DiagMatrix is simple enough # NOTE(Mufei): this may not be needed if DiagMatrix is simple enough
...@@ -376,7 +372,7 @@ def identity( ...@@ -376,7 +372,7 @@ def identity(
dtype: Optional[torch.dtype] = None, dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None, device: Optional[torch.device] = None,
) -> DiagMatrix: ) -> DiagMatrix:
"""Creates a diagonal matrix with ones on the diagonal and zeros elsewhere. r"""Creates a diagonal matrix with ones on the diagonal and zeros elsewhere.
Parameters Parameters
---------- ----------
...@@ -404,8 +400,7 @@ def identity( ...@@ -404,8 +400,7 @@ def identity(
[0, 1, 0], [0, 1, 0],
[0, 0, 1]] [0, 0, 1]]
>>> mat = identity(shape=(3, 3)) >>> dglsp.identity(shape=(3, 3))
>>> print(mat)
DiagMatrix(val=tensor([1., 1., 1.]), DiagMatrix(val=tensor([1., 1., 1.]),
shape=(3, 3)) shape=(3, 3))
...@@ -415,19 +410,17 @@ def identity( ...@@ -415,19 +410,17 @@ def identity(
[0, 1, 0, 0, 0], [0, 1, 0, 0, 0],
[0, 0, 1, 0, 0]] [0, 0, 1, 0, 0]]
>>> mat = identity(shape=(3, 5)) >>> dglsp.identity(shape=(3, 5))
>>> print(mat)
DiagMatrix(val=tensor([1., 1., 1.]), DiagMatrix(val=tensor([1., 1., 1.]),
shape=(3, 5)) shape=(3, 5))
Case3: 3-by-3 matrix with tensor diagonal values Case3: 3-by-3 matrix with vector diagonal values
>>> mat = identity(shape=(3, 3), d=2) >>> dglsp.identity(shape=(3, 3), d=2)
>>> print(mat) DiagMatrix(values=tensor([[1., 1.],
DiagMatrix(val=tensor([[1., 1.], [1., 1.],
[1., 1.], [1., 1.]]),
[1., 1.]]), shape=(3, 3), val_size=(2,))
shape=(3, 3))
""" """
len_val = min(shape) len_val = min(shape)
if d is None: if d is None:
......
...@@ -12,8 +12,8 @@ __all__ = ["add", "sub", "mul", "div", "power"] ...@@ -12,8 +12,8 @@ __all__ = ["add", "sub", "mul", "div", "power"]
def add( def add(
A: Union[DiagMatrix, SparseMatrix], B: Union[DiagMatrix, SparseMatrix] A: Union[DiagMatrix, SparseMatrix], B: Union[DiagMatrix, SparseMatrix]
) -> Union[DiagMatrix, SparseMatrix]: ) -> Union[DiagMatrix, SparseMatrix]:
r"""Elementwise additions for ``DiagMatrix`` and ``SparseMatrix``, r"""Elementwise addition for ``DiagMatrix`` and ``SparseMatrix``, equivalent
equivalent to ``A + B``. to ``A + B``.
The supported combinations are shown as follows. The supported combinations are shown as follows.
...@@ -45,9 +45,9 @@ def add( ...@@ -45,9 +45,9 @@ def add(
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2]) >>> col = torch.tensor([0, 1, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> B = diag(torch.arange(1, 4)) >>> B = dglsp.diag(torch.arange(1, 4))
>>> add(A, B) >>> dglsp.add(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2], SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 1, 0, 1, 2]]), [0, 1, 0, 1, 2]]),
values=tensor([ 1, 20, 10, 2, 33]), values=tensor([ 1, 20, 10, 2, 33]),
...@@ -92,9 +92,9 @@ def sub( ...@@ -92,9 +92,9 @@ def sub(
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2]) >>> col = torch.tensor([0, 1, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> B = diag(torch.arange(1, 4)) >>> B = dglsp.diag(torch.arange(1, 4))
>>> sub(A, B) >>> dglsp.sub(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2], SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 1, 0, 1, 2]]), [0, 1, 0, 1, 2]]),
values=tensor([-1, 20, 10, -2, 27]), values=tensor([-1, 20, 10, -2, 27]),
...@@ -139,20 +139,20 @@ def mul( ...@@ -139,20 +139,20 @@ def mul(
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> mul(A, 2) >>> dglsp.mul(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
values=tensor([20, 40, 60]), values=tensor([20, 40, 60]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3)
>>> D = diag(torch.arange(1, 4)) >>> D = dglsp.diag(torch.arange(1, 4))
>>> mul(D, 2) >>> dglsp.mul(D, 2)
DiagMatrix(val=tensor([2, 4, 6]), DiagMatrix(val=tensor([2, 4, 6]),
shape=(3, 3)) shape=(3, 3))
>>> D = diag(torch.arange(1, 4)) >>> D = dglsp.diag(torch.arange(1, 4))
>>> mul(D, D) >>> dglsp.mul(D, D)
DiagMatrix(val=tensor([1, 4, 9]), DiagMatrix(val=tensor([1, 4, 9]),
shape=(3, 3)) shape=(3, 3))
""" """
...@@ -191,22 +191,22 @@ def div( ...@@ -191,22 +191,22 @@ def div(
Examples Examples
-------- --------
>>> A = diag(torch.arange(1, 4)) >>> A = dglsp.diag(torch.arange(1, 4))
>>> B = diag(torch.arange(10, 13)) >>> B = dglsp.diag(torch.arange(10, 13))
>>> div(A, B) >>> dglsp.div(A, B)
DiagMatrix(val=tensor([0.1000, 0.1818, 0.2500]), DiagMatrix(val=tensor([0.1000, 0.1818, 0.2500]),
shape=(3, 3)) shape=(3, 3))
>>> A = diag(torch.arange(1, 4)) >>> A = dglsp.diag(torch.arange(1, 4))
>>> div(A, 2) >>> dglsp.div(A, 2)
DiagMatrix(val=tensor([0.5000, 1.0000, 1.5000]), DiagMatrix(val=tensor([0.5000, 1.0000, 1.5000]),
shape=(3, 3)) shape=(3, 3))
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3]) >>> val = torch.tensor([1, 2, 3])
>>> A = from_coo(row, col, val, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A / 2 >>> dglsp.div(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
values=tensor([0.5000, 1.0000, 1.5000]), values=tensor([0.5000, 1.0000, 1.5000]),
...@@ -250,15 +250,15 @@ def power( ...@@ -250,15 +250,15 @@ def power(
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> power(A, 2) >>> dglsp.power(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
values=tensor([100, 400, 900]), values=tensor([100, 400, 900]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3)
>>> D = diag(torch.arange(1, 4)) >>> D = dglsp.diag(torch.arange(1, 4))
>>> power(D, 2) >>> dglsp.power(D, 2)
DiagMatrix(val=tensor([1, 4, 9]), DiagMatrix(val=tensor([1, 4, 9]),
shape=(3, 3)) shape=(3, 3))
""" """
......
...@@ -25,11 +25,11 @@ def diag_add( ...@@ -25,11 +25,11 @@ def diag_add(
Examples Examples
-------- --------
>>> D1 = diag(torch.arange(1, 4)) >>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13)) >>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D1 + D2 >>> D1 + D2
DiagMatrix(val=tensor([11, 13, 15]), DiagMatrix(val=tensor([11, 13, 15]),
shape=(3, 3)) shape=(3, 3))
""" """
if isinstance(D2, DiagMatrix): if isinstance(D2, DiagMatrix):
assert D1.shape == D2.shape, ( assert D1.shape == D2.shape, (
...@@ -68,11 +68,11 @@ def diag_sub( ...@@ -68,11 +68,11 @@ def diag_sub(
Examples Examples
-------- --------
>>> D1 = diag(torch.arange(1, 4)) >>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13)) >>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D1 - D2 >>> D1 - D2
DiagMatrix(val=tensor([-9, -9, -9]), DiagMatrix(val=tensor([-9, -9, -9]),
shape=(3, 3)) shape=(3, 3))
""" """
if isinstance(D2, DiagMatrix): if isinstance(D2, DiagMatrix):
assert D1.shape == D2.shape, ( assert D1.shape == D2.shape, (
...@@ -111,11 +111,11 @@ def diag_rsub( ...@@ -111,11 +111,11 @@ def diag_rsub(
Examples Examples
-------- --------
>>> D1 = diag(torch.arange(1, 4)) >>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13)) >>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D2 - D1 >>> D2 - D1
DiagMatrix(val=tensor([-9, -9, -9]), DiagMatrix(val=tensor([-9, -9, -9]),
shape=(3, 3)) shape=(3, 3))
""" """
return -(D1 - D2) return -(D1 - D2)
...@@ -137,13 +137,13 @@ def diag_mul(D1: DiagMatrix, D2: Union[DiagMatrix, Scalar]) -> DiagMatrix: ...@@ -137,13 +137,13 @@ def diag_mul(D1: DiagMatrix, D2: Union[DiagMatrix, Scalar]) -> DiagMatrix:
Examples Examples
-------- --------
>>> D = diag(torch.arange(1, 4)) >>> D = dglsp.diag(torch.arange(1, 4))
>>> D * 2.5 >>> D * 2.5
DiagMatrix(val=tensor([2.5000, 5.0000, 7.5000]), DiagMatrix(val=tensor([2.5000, 5.0000, 7.5000]),
shape=(3, 3)) shape=(3, 3))
>>> 2 * D >>> 2 * D
DiagMatrix(val=tensor([2, 4, 6]), DiagMatrix(val=tensor([2, 4, 6]),
shape=(3, 3)) shape=(3, 3))
""" """
if isinstance(D2, DiagMatrix): if isinstance(D2, DiagMatrix):
assert D1.shape == D2.shape, ( assert D1.shape == D2.shape, (
...@@ -178,14 +178,14 @@ def diag_div(D1: DiagMatrix, D2: Union[DiagMatrix, Scalar]) -> DiagMatrix: ...@@ -178,14 +178,14 @@ def diag_div(D1: DiagMatrix, D2: Union[DiagMatrix, Scalar]) -> DiagMatrix:
Examples Examples
-------- --------
>>> D1 = diag(torch.arange(1, 4)) >>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13)) >>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D1 / D2 >>> D1 / D2
DiagMatrix(val=tensor([0.1000, 0.1818, 0.2500]), DiagMatrix(val=tensor([0.1000, 0.1818, 0.2500]),
shape=(3, 3)) shape=(3, 3))
>>> D1 / 2.5 >>> D1 / 2.5
DiagMatrix(val=tensor([0.4000, 0.8000, 1.2000]), DiagMatrix(val=tensor([0.4000, 0.8000, 1.2000]),
shape=(3, 3)) shape=(3, 3))
""" """
if isinstance(D2, DiagMatrix): if isinstance(D2, DiagMatrix):
assert D1.shape == D2.shape, ( assert D1.shape == D2.shape, (
...@@ -221,10 +221,10 @@ def diag_power(D: DiagMatrix, scalar: Scalar) -> DiagMatrix: ...@@ -221,10 +221,10 @@ def diag_power(D: DiagMatrix, scalar: Scalar) -> DiagMatrix:
Examples Examples
-------- --------
>>> D = diag(torch.arange(1, 4)) >>> D = dglsp.diag(torch.arange(1, 4))
>>> D ** 2 >>> D ** 2
DiagMatrix(val=tensor([1, 4, 9]), DiagMatrix(val=tensor([1, 4, 9]),
shape=(3, 3)) shape=(3, 3))
""" """
return ( return (
diag(D.val**scalar, D.shape) if is_scalar(scalar) else NotImplemented diag(D.val**scalar, D.shape) if is_scalar(scalar) else NotImplemented
......
...@@ -33,7 +33,7 @@ def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix: ...@@ -33,7 +33,7 @@ def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A + A >>> A + A
SparseMatrix(indices=tensor([[0, 1, 2], SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]), [3, 0, 2]]),
...@@ -67,8 +67,8 @@ def sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix: ...@@ -67,8 +67,8 @@ def sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> val2 = torch.tensor([5, 10, 15]) >>> val2 = torch.tensor([5, 10, 15])
>>> A = from_coo(row, col, val, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> B = from_coo(row, col, val2, shape=(3, 4)) >>> B = dglsp.from_coo(row, col, val2, shape=(3, 4))
>>> A - B >>> A - B
SparseMatrix(indices=tensor([[0, 1, 2], SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]), [3, 0, 2]]),
...@@ -102,17 +102,17 @@ def sp_mul(A: SparseMatrix, B: Scalar) -> SparseMatrix: ...@@ -102,17 +102,17 @@ def sp_mul(A: SparseMatrix, B: Scalar) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3]) >>> val = torch.tensor([1, 2, 3])
>>> A = from_coo(row, col, val, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A * 2 >>> A * 2
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
values=tensor([2, 4, 6]), values=tensor([2, 4, 6]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3)
>>> 2 * A >>> 2 * A
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
values=tensor([2, 4, 6]), values=tensor([2, 4, 6]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
...@@ -145,7 +145,7 @@ def sp_div(A: SparseMatrix, B: Scalar) -> SparseMatrix: ...@@ -145,7 +145,7 @@ def sp_div(A: SparseMatrix, B: Scalar) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3]) >>> val = torch.tensor([1, 2, 3])
>>> A = from_coo(row, col, val, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A / 2 >>> A / 2
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
...@@ -180,10 +180,10 @@ def sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix: ...@@ -180,10 +180,10 @@ def sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2]) >>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2]) >>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30]) >>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> A ** 2 >>> A ** 2
SparseMatrix(indices=tensor([[1, 0, 2], SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]), [0, 3, 2]]),
values=tensor([100, 400, 900]), values=tensor([100, 400, 900]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
......
...@@ -12,7 +12,7 @@ __all__ = ["spmm", "bspmm", "spspmm", "matmul"] ...@@ -12,7 +12,7 @@ __all__ = ["spmm", "bspmm", "spspmm", "matmul"]
def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor: def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
"""Multiply a sparse matrix by a dense matrix, equivalent to ``A @ X``. """Multiplies a sparse matrix by a dense matrix, equivalent to ``A @ X``.
Parameters Parameters
---------- ----------
...@@ -32,12 +32,12 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor: ...@@ -32,12 +32,12 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
>>> row = torch.tensor([0, 1, 1]) >>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1]) >>> col = torch.tensor([1, 0, 1])
>>> val = torch.randn(len(row)) >>> val = torch.randn(len(row))
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> X = torch.randn(2, 3) >>> X = torch.randn(2, 3)
>>> result = dgl.sparse.spmm(A, X) >>> result = dglsp.spmm(A, X)
>>> print(type(result)) >>> type(result)
<class 'torch.Tensor'> <class 'torch.Tensor'>
>>> print(result.shape) >>> result.shape
torch.Size([2, 3]) torch.Size([2, 3])
""" """
assert isinstance( assert isinstance(
...@@ -54,7 +54,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor: ...@@ -54,7 +54,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor: def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
"""Multiply a sparse matrix by a dense matrix by batches, equivalent to """Multiplies a sparse matrix by a dense matrix by batches, equivalent to
``A @ X``. ``A @ X``.
Parameters Parameters
...@@ -75,12 +75,12 @@ def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor: ...@@ -75,12 +75,12 @@ def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
>>> row = torch.tensor([0, 1, 1]) >>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 2]) >>> col = torch.tensor([1, 0, 2])
>>> val = torch.randn(len(row), 2) >>> val = torch.randn(len(row), 2)
>>> A = from_coo(row, col, val, shape=(3, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 3))
>>> X = torch.randn(3, 3, 2) >>> X = torch.randn(3, 3, 2)
>>> result = dgl.sparse.bspmm(A, X) >>> result = dglsp.bspmm(A, X)
>>> print(type(result)) >>> type(result)
<class 'torch.Tensor'> <class 'torch.Tensor'>
>>> print(result.shape) >>> result.shape
torch.Size([3, 3, 2]) torch.Size([3, 3, 2])
""" """
assert isinstance( assert isinstance(
...@@ -177,7 +177,7 @@ def _diag_sparse_mm(D, A): ...@@ -177,7 +177,7 @@ def _diag_sparse_mm(D, A):
def spspmm( def spspmm(
A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix] A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix]
) -> Union[SparseMatrix, DiagMatrix]: ) -> Union[SparseMatrix, DiagMatrix]:
"""Multiply a sparse matrix by a sparse matrix, equivalent to ``A @ B``. """Multiplies a sparse matrix by a sparse matrix, equivalent to ``A @ B``.
The non-zero values of the two sparse matrices must be 1D. The non-zero values of the two sparse matrices must be 1D.
...@@ -200,14 +200,12 @@ def spspmm( ...@@ -200,14 +200,12 @@ def spspmm(
>>> row1 = torch.tensor([0, 1, 1]) >>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1]) >>> col1 = torch.tensor([1, 0, 1])
>>> val1 = torch.ones(len(row1)) >>> val1 = torch.ones(len(row1))
>>> A = from_coo(row1, col1, val1) >>> A = dglsp.from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1]) >>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1]) >>> col2 = torch.tensor([0, 2, 1])
>>> val2 = torch.ones(len(row2)) >>> val2 = torch.ones(len(row2))
>>> B = from_coo(row2, col2, val2) >>> B = dglsp.from_coo(row2, col2, val2)
>>> result = dgl.sparse.spspmm(A, B) >>> dglsp.spspmm(A, B)
>>> print(result)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 1], SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],
[1, 2, 0, 1, 2]]), [1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]), values=tensor([1., 1., 1., 1., 1.]),
...@@ -235,7 +233,7 @@ def matmul( ...@@ -235,7 +233,7 @@ def matmul(
A: Union[torch.Tensor, SparseMatrix, DiagMatrix], A: Union[torch.Tensor, SparseMatrix, DiagMatrix],
B: Union[torch.Tensor, SparseMatrix, DiagMatrix], B: Union[torch.Tensor, SparseMatrix, DiagMatrix],
) -> Union[torch.Tensor, SparseMatrix, DiagMatrix]: ) -> Union[torch.Tensor, SparseMatrix, DiagMatrix]:
"""Multiply two dense/sparse/diagonal matrices, equivalent to ``A @ B``. """Multiplies two dense/sparse/diagonal matrices, equivalent to ``A @ B``.
The supported combinations are shown as follows. The supported combinations are shown as follows.
...@@ -282,44 +280,44 @@ def matmul( ...@@ -282,44 +280,44 @@ def matmul(
Examples Examples
-------- --------
Multiply a diagonal matrix with a dense matrix. Multiplies a diagonal matrix with a dense matrix.
>>> val = torch.randn(3) >>> val = torch.randn(3)
>>> A = diag(val) >>> A = dglsp.diag(val)
>>> B = torch.randn(3, 2) >>> B = torch.randn(3, 2)
>>> result = dgl.sparse.matmul(A, B) >>> result = dglsp.matmul(A, B)
>>> print(type(result)) >>> type(result)
<class 'torch.Tensor'> <class 'torch.Tensor'>
>>> print(result.shape) >>> result.shape
torch.Size([3, 2]) torch.Size([3, 2])
Multiply a sparse matrix with a dense matrix. Multiplies a sparse matrix with a dense matrix.
>>> row = torch.tensor([0, 1, 1]) >>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1]) >>> col = torch.tensor([1, 0, 1])
>>> val = torch.randn(len(row)) >>> val = torch.randn(len(row))
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> X = torch.randn(2, 3) >>> X = torch.randn(2, 3)
>>> result = dgl.sparse.matmul(A, X) >>> result = dglsp.matmul(A, X)
>>> print(type(result)) >>> type(result)
<class 'torch.Tensor'> <class 'torch.Tensor'>
>>> print(result.shape) >>> result.shape
torch.Size([2, 3]) torch.Size([2, 3])
Multiply a sparse matrix with a sparse matrix. Multiplies a sparse matrix with a sparse matrix.
>>> row1 = torch.tensor([0, 1, 1]) >>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1]) >>> col1 = torch.tensor([1, 0, 1])
>>> val1 = torch.ones(len(row1)) >>> val1 = torch.ones(len(row1))
>>> A = from_coo(row1, col1, val1) >>> A = dglsp.from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1]) >>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1]) >>> col2 = torch.tensor([0, 2, 1])
>>> val2 = torch.ones(len(row2)) >>> val2 = torch.ones(len(row2))
>>> B = from_coo(row2, col2, val2) >>> B = dglsp.from_coo(row2, col2, val2)
>>> result = dgl.sparse.matmul(A, B) >>> result = dglsp.matmul(A, B)
>>> print(type(result)) >>> type(result)
<class 'dgl.sparse.sparse_matrix.SparseMatrix'> <class 'dgl.sparse.sparse_matrix.SparseMatrix'>
>>> print(result.shape) >>> result.shape
(2, 3) (2, 3)
""" """
assert isinstance(A, (torch.Tensor, SparseMatrix, DiagMatrix)), ( assert isinstance(A, (torch.Tensor, SparseMatrix, DiagMatrix)), (
......
...@@ -47,15 +47,15 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"): ...@@ -47,15 +47,15 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2]) >>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.reduce(A, rtype='sum')) >>> dglsp.reduce(A, rtype='sum')
tensor(4) tensor(4)
>>> print(dglsp.reduce(A, 0, 'sum')) >>> dglsp.reduce(A, 0, 'sum')
tensor([2, 0, 2]) tensor([2, 0, 2])
>>> print(dglsp.reduce(A, 1, 'sum')) >>> dglsp.reduce(A, 1, 'sum')
tensor([1, 3, 0, 0]) tensor([1, 3, 0, 0])
>>> print(dglsp.reduce(A, 0, 'smax')) >>> dglsp.reduce(A, 0, 'smax')
tensor([1, 0, 2]) tensor([1, 0, 2])
>>> print(dglsp.reduce(A, 1, 'smin')) >>> dglsp.reduce(A, 1, 'smin')
tensor([1, 1, 0, 0]) tensor([1, 1, 0, 0])
Case2: vector-valued sparse matrix Case2: vector-valued sparse matrix
...@@ -64,18 +64,18 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"): ...@@ -64,18 +64,18 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]]) >>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.reduce(A, rtype='sum')) >>> dglsp.reduce(A, rtype='sum')
tensor([5., 5.]) tensor([5., 5.])
>>> print(dglsp.reduce(A, 0, 'sum')) >>> dglsp.reduce(A, 0, 'sum')
tensor([[3., 3.], tensor([[3., 3.],
[0., 0.], [0., 0.],
[2., 2.]]) [2., 2.]])
>>> print(dglsp.reduce(A, 1, 'smin')) >>> dglsp.reduce(A, 1, 'smin')
tensor([[1., 2.], tensor([[1., 2.],
[2., 1.], [2., 1.],
[0., 0.], [0., 0.],
[0., 0.]]) [0., 0.]])
>>> print(dglsp.reduce(A, 0, 'smean')) >>> dglsp.reduce(A, 0, 'smean')
tensor([[1.5000, 1.5000], tensor([[1.5000, 1.5000],
[0.0000, 0.0000], [0.0000, 0.0000],
[2.0000, 2.0000]]) [2.0000, 2.0000]])
...@@ -115,11 +115,11 @@ def sum(input: SparseMatrix, dim: Optional[int] = None): ...@@ -115,11 +115,11 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2]) >>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sum(A)) >>> dglsp.sum(A)
tensor(4) tensor(4)
>>> print(dglsp.sum(A, 0)) >>> dglsp.sum(A, 0)
tensor([2, 0, 2]) tensor([2, 0, 2])
>>> print(dglsp.sum(A, 1)) >>> dglsp.sum(A, 1)
tensor([1, 3, 0, 0]) tensor([1, 3, 0, 0])
Case2: vector-valued sparse matrix Case2: vector-valued sparse matrix
...@@ -128,9 +128,9 @@ def sum(input: SparseMatrix, dim: Optional[int] = None): ...@@ -128,9 +128,9 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]]) >>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sum(A)) >>> dglsp.sum(A)
tensor([5, 5]) tensor([5, 5])
>>> print(dglsp.sum(A, 0)) >>> dglsp.sum(A, 0)
tensor([[3, 3], tensor([[3, 3],
[0, 0], [0, 0],
[2, 2]]) [2, 2]])
...@@ -173,11 +173,11 @@ def smax(input: SparseMatrix, dim: Optional[int] = None): ...@@ -173,11 +173,11 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2]) >>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smax(A)) >>> dglsp.smax(A)
tensor(2) tensor(2)
>>> print(dglsp.smax(A, 0)) >>> dglsp.smax(A, 0)
tensor([1, 0, 2]) tensor([1, 0, 2])
>>> print(dglsp.smax(A, 1)) >>> dglsp.smax(A, 1)
tensor([1, 2, 0, 0]) tensor([1, 2, 0, 0])
Case2: vector-valued sparse matrix Case2: vector-valued sparse matrix
...@@ -186,9 +186,9 @@ def smax(input: SparseMatrix, dim: Optional[int] = None): ...@@ -186,9 +186,9 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]]) >>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smax(A)) >>> dglsp.smax(A)
tensor([2, 2]) tensor([2, 2])
>>> print(dglsp.smax(A, 1)) >>> dglsp.smax(A, 1)
tensor([[1, 2], tensor([[1, 2],
[2, 2], [2, 2],
[0, 0], [0, 0],
...@@ -232,11 +232,11 @@ def smin(input: SparseMatrix, dim: Optional[int] = None): ...@@ -232,11 +232,11 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2]) >>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smin(A)) >>> dglsp.smin(A)
tensor(1) tensor(1)
>>> print(dglsp.smin(A, 0)) >>> dglsp.smin(A, 0)
tensor([1, 0, 2]) tensor([1, 0, 2])
>>> print(dglsp.smin(A, 1)) >>> dglsp.smin(A, 1)
tensor([1, 1, 0, 0]) tensor([1, 1, 0, 0])
Case2: vector-valued sparse matrix Case2: vector-valued sparse matrix
...@@ -245,13 +245,13 @@ def smin(input: SparseMatrix, dim: Optional[int] = None): ...@@ -245,13 +245,13 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]]) >>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smin(A)) >>> dglsp.smin(A)
tensor([1, 1]) tensor([1, 1])
>>> print(dglsp.smin(A, 0)) >>> dglsp.smin(A, 0)
tensor([[1, 1], tensor([[1, 1],
[0, 0], [0, 0],
[2, 2]]) [2, 2]])
>>> print(dglsp.smin(A, 1)) >>> dglsp.smin(A, 1)
tensor([[1, 2], tensor([[1, 2],
[2, 1], [2, 1],
[0, 0], [0, 0],
...@@ -295,11 +295,11 @@ def smean(input: SparseMatrix, dim: Optional[int] = None): ...@@ -295,11 +295,11 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1., 1., 2.]) >>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smean(A)) >>> dglsp.smean(A)
tensor(1.3333) tensor(1.3333)
>>> print(dglsp.smean(A, 0)) >>> dglsp.smean(A, 0)
tensor([1., 0., 2.]) tensor([1., 0., 2.])
>>> print(dglsp.smean(A, 1)) >>> dglsp.smean(A, 1)
tensor([1.0000, 1.5000, 0.0000, 0.0000]) tensor([1.0000, 1.5000, 0.0000, 0.0000])
Case2: vector-valued sparse matrix Case2: vector-valued sparse matrix
...@@ -308,13 +308,13 @@ def smean(input: SparseMatrix, dim: Optional[int] = None): ...@@ -308,13 +308,13 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]]) >>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smean(A)) >>> dglsp.smean(A)
tensor([1.6667, 1.6667]) tensor([1.6667, 1.6667])
>>> print(dglsp.smean(A, 0)) >>> dglsp.smean(A, 0)
tensor([[1.5000, 1.5000], tensor([[1.5000, 1.5000],
[0.0000, 0.0000], [0.0000, 0.0000],
[2.0000, 2.0000]]) [2.0000, 2.0000]])
>>> print(dglsp.smean(A, 1)) >>> dglsp.smean(A, 1)
tensor([[1.0000, 2.0000], tensor([[1.0000, 2.0000],
[2.0000, 1.5000], [2.0000, 1.5000],
[0.0000, 0.0000], [0.0000, 0.0000],
...@@ -358,11 +358,11 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None): ...@@ -358,11 +358,11 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2]) >>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sprod(A)) >>> dglsp.sprod(A)
tensor(2) tensor(2)
>>> print(dglsp.sprod(A, 0)) >>> dglsp.sprod(A, 0)
tensor([1, 0, 2]) tensor([1, 0, 2])
>>> print(dglsp.sprod(A, 1)) >>> dglsp.sprod(A, 1)
tensor([1, 2, 0, 0]) tensor([1, 2, 0, 0])
Case2: vector-valued sparse matrix Case2: vector-valued sparse matrix
...@@ -371,13 +371,13 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None): ...@@ -371,13 +371,13 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2]) >>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]]) >>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3)) >>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sprod(A)) >>> dglsp.sprod(A)
tensor([4, 4]) tensor([4, 4])
>>> print(dglsp.sprod(A, 0)) >>> dglsp.sprod(A, 0)
tensor([[2, 2], tensor([[2, 2],
[0, 0], [0, 0],
[2, 2]]) [2, 2]])
>>> print(dglsp.sprod(A, 1)) >>> dglsp.sprod(A, 1)
tensor([[1, 2], tensor([[1, 2],
[4, 2], [4, 2],
[0, 0], [0, 0],
......
...@@ -20,7 +20,7 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix: ...@@ -20,7 +20,7 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
out = (X1 @ X2) * A out = (X1 @ X2) * A
In particular, :attr:`X1` and :attr:`X2` can be 1-D, then ``X1 @ X2`` In particular, :attr:`X1` and :attr:`X2` can be 1-D, then ``X1 @ X2``
becomes the out-product of the two vector (which results in a matrix). becomes the out-product of the two vectors (which results in a matrix).
Parameters Parameters
---------- ----------
...@@ -42,14 +42,14 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix: ...@@ -42,14 +42,14 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3]) >>> col = torch.tensor([2, 3, 3])
>>> val = torch.arange(1, 4).float() >>> val = torch.arange(1, 4).float()
>>> A = from_coo(row, col, val, (3, 4)) >>> A = dglsp.from_coo(row, col, val, (3, 4))
>>> X1 = torch.randn(3, 5) >>> X1 = torch.randn(3, 5)
>>> X2 = torch.randn(5, 4) >>> X2 = torch.randn(5, 4)
>>> dgl.sparse.sddmm(A, X1, X2) >>> dglsp.sddmm(A, X1, X2)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 3, 3]]), [2, 3, 3]]),
values=tensor([ 1.3097, -1.0977, 1.6953]), values=tensor([-1.6585, -3.9714, -0.5406]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return SparseMatrix(torch.ops.dgl_sparse.sddmm(A.c_sparse_matrix, X1, X2)) return SparseMatrix(torch.ops.dgl_sparse.sddmm(A.c_sparse_matrix, X1, X2))
...@@ -92,15 +92,15 @@ def bsddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix: ...@@ -92,15 +92,15 @@ def bsddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3]) >>> col = torch.tensor([2, 3, 3])
>>> val = torch.arange(1, 4).float() >>> val = torch.arange(1, 4).float()
>>> A = from_coo(row, col, val, (3, 4)) >>> A = dglsp.from_coo(row, col, val, (3, 4))
>>> X1 = torch.arange(0, 3 * 5 * 2).view(3, 5, 2).float() >>> X1 = torch.arange(0, 3 * 5 * 2).view(3, 5, 2).float()
>>> X2 = torch.arange(0, 5 * 4 * 2).view(5, 4, 2).float() >>> X2 = torch.arange(0, 5 * 4 * 2).view(5, 4, 2).float()
>>> dgl.sparse.bsddmm(A, X1, X2) >>> dglsp.bsddmm(A, X1, X2)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 3, 3]]), [2, 3, 3]]),
values=tensor([[1560., 1735.], values=tensor([[1560., 1735.],
[3400., 3770.], [3400., 3770.],
[8400., 9105.]]), [8400., 9105.]]),
shape=(3, 4), nnz=3) shape=(3, 4), nnz=3, val_size=(2,))
""" """
return sddmm(A, X1, X2) return sddmm(A, X1, X2)
...@@ -9,7 +9,7 @@ __all__ = ["softmax"] ...@@ -9,7 +9,7 @@ __all__ = ["softmax"]
def softmax(input: SparseMatrix) -> SparseMatrix: def softmax(input: SparseMatrix) -> SparseMatrix:
"""Apply row-wise softmax to the non-zero elements of the sparse matrix. """Apples row-wise softmax to the non-zero elements of the sparse matrix.
If :attr:`input.val` takes shape :attr:`(nnz, D)`, then the output matrix If :attr:`input.val` takes shape :attr:`(nnz, D)`, then the output matrix
:attr:`output` and :attr:`output.val` take the same shape as :attr:`input` :attr:`output` and :attr:`output.val` take the same shape as :attr:`input`
...@@ -38,22 +38,24 @@ def softmax(input: SparseMatrix) -> SparseMatrix: ...@@ -38,22 +38,24 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
>>> A = dglsp.from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> dglsp.softmax(A) >>> dglsp.softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2], SparseMatrix(indices=tensor([[0, 0, 1, 2],
[1, 2, 2, 0]]), [1, 2, 2, 0]]),
values=tensor([0.2689, 0.7311, 1.0000, 1.0000]), values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),
shape=(3, 3), nnz=4) shape=(3, 3), nnz=4)
Case2: matrix with values of shape (nnz, D) Case2: matrix with values of shape (nnz, D)
>>> row = torch.tensor([0, 0, 1, 2])
>>> col = torch.tensor([1, 2, 2, 0])
>>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]]) >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
>>> A = dglsp.from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> dglsp.softmax(A) >>> dglsp.softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2], SparseMatrix(indices=tensor([[0, 0, 1, 2],
[1, 2, 2, 0]]), [1, 2, 2, 0]]),
values=tensor([[0.2689, 0.9820], values=tensor([[0.2689, 0.9820],
[0.7311, 0.0180], [0.7311, 0.0180],
[1.0000, 1.0000], [1.0000, 1.0000],
[1.0000, 1.0000]]), [1.0000, 1.0000]]),
shape=(3, 3), nnz=4) shape=(3, 3), nnz=4, val_size=(2,))
""" """
return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix)) return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))
......
...@@ -75,7 +75,7 @@ class SparseMatrix: ...@@ -75,7 +75,7 @@ class SparseMatrix:
Returns Returns
------- -------
tensor torch.Tensor
Row indices of the non-zero elements Row indices of the non-zero elements
""" """
return self.coo()[0] return self.coo()[0]
...@@ -86,7 +86,7 @@ class SparseMatrix: ...@@ -86,7 +86,7 @@ class SparseMatrix:
Returns Returns
------- -------
tensor torch.Tensor
Column indices of the non-zero elements Column indices of the non-zero elements
""" """
return self.coo()[1] return self.coo()[1]
...@@ -156,7 +156,7 @@ class SparseMatrix: ...@@ -156,7 +156,7 @@ class SparseMatrix:
return self.transpose() return self.transpose()
def transpose(self): def transpose(self):
"""Return the transpose of this sparse matrix. """Returns the transpose of this sparse matrix.
Returns Returns
------- -------
...@@ -169,13 +169,12 @@ class SparseMatrix: ...@@ -169,13 +169,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 3]) >>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([2, 1, 3]) >>> col = torch.tensor([2, 1, 3])
>>> val = torch.tensor([1, 1, 2]) >>> val = torch.tensor([1, 1, 2])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> A = A.transpose() >>> A = A.transpose()
>>> print(A)
SparseMatrix(indices=tensor([[2, 1, 3], SparseMatrix(indices=tensor([[2, 1, 3],
[1, 1, 3]]), [1, 1, 3]]),
values=tensor([1, 1, 2]), values=tensor([1, 1, 2]),
shape=(4, 4), nnz=3) shape=(4, 4), nnz=3)
""" """
return SparseMatrix(self.c_sparse_matrix.transpose()) return SparseMatrix(self.c_sparse_matrix.transpose())
...@@ -202,13 +201,13 @@ class SparseMatrix: ...@@ -202,13 +201,13 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0]) >>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.to(device='cuda:0', dtype=torch.int32) >>> A.to(device='cuda:0', dtype=torch.int32)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]], device='cuda:0'), [1, 2, 0]], device='cuda:0'),
values=tensor([1, 1, 1], device='cuda:0', values=tensor([1, 1, 1], device='cuda:0',
dtype=torch.int32), dtype=torch.int32),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
if device is None: if device is None:
device = self.device device = self.device
...@@ -243,12 +242,12 @@ class SparseMatrix: ...@@ -243,12 +242,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0]) >>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.cuda() >>> A.cuda()
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]], device='cuda:0'), [1, 2, 0]], device='cuda:0'),
values=tensor([1., 1., 1.], device='cuda:0'), values=tensor([1., 1., 1.], device='cuda:0'),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return self.to(device="cuda") return self.to(device="cuda")
...@@ -266,12 +265,12 @@ class SparseMatrix: ...@@ -266,12 +265,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]).to('cuda') >>> row = torch.tensor([1, 1, 2]).to('cuda')
>>> col = torch.tensor([1, 2, 0]).to('cuda') >>> col = torch.tensor([1, 2, 0]).to('cuda')
>>> A = from_coo(row, col, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.cpu() >>> A.cpu()
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]), [1, 2, 0]]),
values=tensor([1., 1., 1.]), values=tensor([1., 1., 1.]),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return self.to(device="cpu") return self.to(device="cpu")
...@@ -290,12 +289,12 @@ class SparseMatrix: ...@@ -290,12 +289,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0]) >>> col = torch.tensor([1, 2, 0])
>>> val = torch.ones(len(row)).long() >>> val = torch.ones(len(row)).long()
>>> A = from_coo(row, col, val, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A.float() >>> A.float()
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]), [1, 2, 0]]),
values=tensor([1., 1., 1.]), values=tensor([1., 1., 1.]),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return self.to(dtype=torch.float) return self.to(dtype=torch.float)
...@@ -313,12 +312,12 @@ class SparseMatrix: ...@@ -313,12 +312,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0]) >>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.double() >>> A.double()
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]), [1, 2, 0]]),
values=tensor([1., 1., 1.], dtype=torch.float64), values=tensor([1., 1., 1.], dtype=torch.float64),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return self.to(dtype=torch.double) return self.to(dtype=torch.double)
...@@ -336,12 +335,12 @@ class SparseMatrix: ...@@ -336,12 +335,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0]) >>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.int() >>> A.int()
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]), [1, 2, 0]]),
values=tensor([1, 1, 1], dtype=torch.int32), values=tensor([1, 1, 1], dtype=torch.int32),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return self.to(dtype=torch.int) return self.to(dtype=torch.int)
...@@ -359,12 +358,12 @@ class SparseMatrix: ...@@ -359,12 +358,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0]) >>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4)) >>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.long() >>> A.long()
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]), [1, 2, 0]]),
values=tensor([1, 1, 1]), values=tensor([1, 1, 1]),
size=(3, 4), nnz=3) shape=(3, 4), nnz=3)
""" """
return self.to(dtype=torch.long) return self.to(dtype=torch.long)
...@@ -391,13 +390,12 @@ class SparseMatrix: ...@@ -391,13 +390,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 0, 0, 0, 1]) >>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2]) >>> col = torch.tensor([1, 1, 1, 2, 2])
>>> val = torch.tensor([0, 1, 2, 3, 4]) >>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> A = A.coalesce() >>> A.coalesce()
>>> print(A)
SparseMatrix(indices=tensor([[0, 0, 1, 1], SparseMatrix(indices=tensor([[0, 0, 1, 1],
[1, 2, 1, 2]]), [1, 2, 1, 2]]),
values=tensor([3, 3, 0, 4]), values=tensor([3, 3, 0, 4]),
shape=(2, 3), nnz=4) shape=(2, 3), nnz=4)
""" """
return SparseMatrix(self.c_sparse_matrix.coalesce()) return SparseMatrix(self.c_sparse_matrix.coalesce())
...@@ -409,10 +407,10 @@ class SparseMatrix: ...@@ -409,10 +407,10 @@ class SparseMatrix:
>>> row = torch.tensor([1, 0, 0, 0, 1]) >>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2]) >>> col = torch.tensor([1, 1, 1, 2, 2])
>>> val = torch.tensor([0, 1, 2, 3, 4]) >>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> print(A.has_duplicate()) >>> A.has_duplicate()
True True
>>> print(A.coalesce().has_duplicate()) >>> A.coalesce().has_duplicate()
False False
""" """
return self.c_sparse_matrix.has_duplicate() return self.c_sparse_matrix.has_duplicate()
...@@ -424,15 +422,15 @@ def from_coo( ...@@ -424,15 +422,15 @@ def from_coo(
val: Optional[torch.Tensor] = None, val: Optional[torch.Tensor] = None,
shape: Optional[Tuple[int, int]] = None, shape: Optional[Tuple[int, int]] = None,
) -> SparseMatrix: ) -> SparseMatrix:
"""Creates a sparse matrix from row and column coordinates. r"""Creates a sparse matrix from row and column coordinates.
Parameters Parameters
---------- ----------
row : tensor row : torch.Tensor
The row indices of shape (nnz) The row indices of shape (nnz)
col : tensor col : torch.Tensor
The column indices of shape (nnz) The column indices of shape (nnz)
val : tensor, optional val : torch.Tensor, optional
The values of shape (nnz) or (nnz, D). If None, it will be a tensor of The values of shape (nnz) or (nnz, D). If None, it will be a tensor of
shape (nnz) filled by 1. shape (nnz) filled by 1.
shape : tuple[int, int], optional shape : tuple[int, int], optional
...@@ -452,31 +450,31 @@ def from_coo( ...@@ -452,31 +450,31 @@ def from_coo(
>>> dst = torch.tensor([1, 1, 2]) >>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3]) >>> src = torch.tensor([2, 4, 3])
>>> A = from_coo(dst, src) >>> A = dglsp.from_coo(dst, src)
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]), [2, 4, 3]]),
values=tensor([1., 1., 1.]), values=tensor([1., 1., 1.]),
shape=(3, 5), nnz=3) shape=(3, 5), nnz=3)
>>> # Specify shape >>> # Specify shape
>>> A = from_coo(dst, src, shape=(5, 5)) >>> A = dglsp.from_coo(dst, src, shape=(5, 5))
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]), [2, 4, 3]]),
values=tensor([1., 1., 1.]), values=tensor([1., 1., 1.]),
shape=(5, 5), nnz=3) shape=(5, 5), nnz=3)
Case2: Sparse matrix with scalar/vector values. Following example is with Case2: Sparse matrix with scalar/vector values. Following example is with
vector data. vector data.
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]]) >>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])
>>> A = from_coo(dst, src, val) >>> A = dglsp.from_coo(dst, src, val)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]), [2, 4, 3]]),
values=tensor([[1., 1.], values=tensor([[1., 1.],
[2., 2.], [2., 2.],
[3., 3.]]), [3., 3.]]),
shape=(3, 5), nnz=3) shape=(3, 5), nnz=3, val_size=(2,))
""" """
if shape is None: if shape is None:
shape = (torch.max(row).item() + 1, torch.max(col).item() + 1) shape = (torch.max(row).item() + 1, torch.max(col).item() + 1)
...@@ -492,7 +490,7 @@ def from_csr( ...@@ -492,7 +490,7 @@ def from_csr(
val: Optional[torch.Tensor] = None, val: Optional[torch.Tensor] = None,
shape: Optional[Tuple[int, int]] = None, shape: Optional[Tuple[int, int]] = None,
) -> SparseMatrix: ) -> SparseMatrix:
"""Creates a sparse matrix from CSR indices. r"""Creates a sparse matrix from CSR indices.
For row i of the sparse matrix For row i of the sparse matrix
...@@ -502,12 +500,12 @@ def from_csr( ...@@ -502,12 +500,12 @@ def from_csr(
Parameters Parameters
---------- ----------
indptr : tensor indptr : torch.Tensor
Pointer to the column indices of shape (N + 1), where N is the number Pointer to the column indices of shape (N + 1), where N is the number
of rows of rows
indices : tensor indices : torch.Tensor
The column indices of shape (nnz) The column indices of shape (nnz)
val : tensor, optional val : torch.Tensor, optional
The values of shape (nnz) or (nnz, D). If None, it will be a tensor of The values of shape (nnz) or (nnz, D). If None, it will be a tensor of
shape (nnz) filled by 1. shape (nnz) filled by 1.
shape : tuple[int, int], optional shape : tuple[int, int], optional
...@@ -531,34 +529,33 @@ def from_csr( ...@@ -531,34 +529,33 @@ def from_csr(
>>> indptr = torch.tensor([0, 1, 2, 5]) >>> indptr = torch.tensor([0, 1, 2, 5])
>>> indices = torch.tensor([1, 2, 0, 1, 2]) >>> indices = torch.tensor([1, 2, 0, 1, 2])
>>> A = from_csr(indptr, indices) >>> A = dglsp.from_csr(indptr, indices)
>>> print(A)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2], SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]), [1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]), values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 3), nnz=5) shape=(3, 3), nnz=5)
>>> # Specify shape >>> # Specify shape
>>> A = from_csr(indptr, indices, shape=(3, 5)) >>> A = dglsp.from_csr(indptr, indices, shape=(3, 5))
>>> print(A)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2], SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]), [1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]), values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 5), nnz=5) shape=(3, 5), nnz=5)
Case2: Sparse matrix with scalar/vector values. Following example is with Case2: Sparse matrix with scalar/vector values. Following example is with
vector data. vector data.
>>> indptr = torch.tensor([0, 1, 2, 5])
>>> indices = torch.tensor([1, 2, 0, 1, 2])
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) >>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = from_csr(indptr, indices, val) >>> A = dglsp.from_csr(indptr, indices, val)
>>> print(A)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2], SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]), [1, 2, 0, 1, 2]]),
values=tensor([[1, 1], values=tensor([[1, 1],
[2, 2], [2, 2],
[3, 3], [3, 3],
[4, 4], [4, 4],
[5, 5]]), [5, 5]]),
shape=(3, 3), nnz=5) shape=(3, 3), nnz=5, val_size=(2,))
""" """
if shape is None: if shape is None:
shape = (indptr.shape[0] - 1, torch.max(indices) + 1) shape = (indptr.shape[0] - 1, torch.max(indices) + 1)
...@@ -576,7 +573,7 @@ def from_csc( ...@@ -576,7 +573,7 @@ def from_csc(
val: Optional[torch.Tensor] = None, val: Optional[torch.Tensor] = None,
shape: Optional[Tuple[int, int]] = None, shape: Optional[Tuple[int, int]] = None,
) -> SparseMatrix: ) -> SparseMatrix:
"""Creates a sparse matrix from CSC indices. r"""Creates a sparse matrix from CSC indices.
For column i of the sparse matrix For column i of the sparse matrix
...@@ -586,12 +583,12 @@ def from_csc( ...@@ -586,12 +583,12 @@ def from_csc(
Parameters Parameters
---------- ----------
indptr : tensor indptr : torch.Tensor
Pointer to the row indices of shape N + 1, where N is the Pointer to the row indices of shape N + 1, where N is the
number of columns number of columns
indices : tensor indices : torch.Tensor
The row indices of shape nnz The row indices of shape nnz
val : tensor, optional val : torch.Tensor, optional
The values of shape (nnz) or (nnz, D). If None, it will be a tensor of The values of shape (nnz) or (nnz, D). If None, it will be a tensor of
shape (nnz) filled by 1. shape (nnz) filled by 1.
shape : tuple[int, int], optional shape : tuple[int, int], optional
...@@ -615,34 +612,33 @@ def from_csc( ...@@ -615,34 +612,33 @@ def from_csc(
>>> indptr = torch.tensor([0, 1, 3, 5]) >>> indptr = torch.tensor([0, 1, 3, 5])
>>> indices = torch.tensor([2, 0, 2, 1, 2]) >>> indices = torch.tensor([2, 0, 2, 1, 2])
>>> A = from_csc(indptr, indices) >>> A = dglsp.from_csc(indptr, indices)
>>> print(A)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2], SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]), [0, 1, 1, 2, 2]]),
values=tensor([1., 1., 1., 1., 1.]), values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 3), nnz=5) shape=(3, 3), nnz=5)
>>> # Specify shape >>> # Specify shape
>>> A = from_csc(indptr, indices, shape=(5, 3)) >>> A = dglsp.from_csc(indptr, indices, shape=(5, 3))
>>> print(A)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2], SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]), [0, 1, 1, 2, 2]]),
values=tensor([1., 1., 1., 1., 1.]), values=tensor([1., 1., 1., 1., 1.]),
shape=(5, 3), nnz=5) shape=(5, 3), nnz=5)
Case2: Sparse matrix with scalar/vector values. Following example is with Case2: Sparse matrix with scalar/vector values. Following example is with
vector data. vector data.
>>> indptr = torch.tensor([0, 1, 3, 5])
>>> indices = torch.tensor([2, 0, 2, 1, 2])
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) >>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = from_csc(indptr, indices, val) >>> A = dglsp.from_csc(indptr, indices, val)
>>> print(A)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2], SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]), [0, 1, 1, 2, 2]]),
values=tensor([[1, 1], values=tensor([[1, 1],
[2, 2], [2, 2],
[3, 3], [3, 3],
[4, 4], [4, 4],
[5, 5]]), [5, 5]]),
shape=(3, 3), nnz=5) shape=(3, 3), nnz=5, val_size=(2,))
""" """
if shape is None: if shape is None:
shape = (torch.max(indices) + 1, indptr.shape[0] - 1) shape = (torch.max(indices) + 1, indptr.shape[0] - 1)
...@@ -664,7 +660,7 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix: ...@@ -664,7 +660,7 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
---------- ----------
mat : SparseMatrix mat : SparseMatrix
An existing sparse matrix with non-zero values An existing sparse matrix with non-zero values
val : tensor val : torch.Tensor
The new values of the non-zero elements, a tensor of shape (nnz) or (nnz, D) The new values of the non-zero elements, a tensor of shape (nnz) or (nnz, D)
Returns Returns
...@@ -678,13 +674,12 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix: ...@@ -678,13 +674,12 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 4, 3]) >>> col = torch.tensor([2, 4, 3])
>>> val = torch.ones(3) >>> val = torch.ones(3)
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> B = val_like(A, torch.tensor([2, 2, 2])) >>> A = dglsp.val_like(A, torch.tensor([2, 2, 2]))
>>> print(B)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]), [2, 4, 3]]),
values=tensor([2, 2, 2]), values=tensor([2, 2, 2]),
shape=(3, 5), nnz=3) shape=(3, 5), nnz=3)
""" """
return SparseMatrix(torch.ops.dgl_sparse.val_like(mat.c_sparse_matrix, val)) return SparseMatrix(torch.ops.dgl_sparse.val_like(mat.c_sparse_matrix, val))
......
...@@ -4,8 +4,8 @@ from .diag_matrix import diag, DiagMatrix ...@@ -4,8 +4,8 @@ from .diag_matrix import diag, DiagMatrix
def neg(D: DiagMatrix) -> DiagMatrix: def neg(D: DiagMatrix) -> DiagMatrix:
"""Return a new diagonal matrix with the negation of the original nonzero """Returns a new diagonal matrix with the negation of the original nonzero
values. values, equivalent to ``-D``.
Returns Returns
------- -------
...@@ -16,17 +16,16 @@ def neg(D: DiagMatrix) -> DiagMatrix: ...@@ -16,17 +16,16 @@ def neg(D: DiagMatrix) -> DiagMatrix:
-------- --------
>>> val = torch.arange(3).float() >>> val = torch.arange(3).float()
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat = -mat >>> D = -D
>>> print(mat)
DiagMatrix(val=tensor([-0., -1., -2.]), DiagMatrix(val=tensor([-0., -1., -2.]),
shape=(3, 3)) shape=(3, 3))
""" """
return diag(-D.val, D.shape) return diag(-D.val, D.shape)
def inv(D: DiagMatrix) -> DiagMatrix: def inv(D: DiagMatrix) -> DiagMatrix:
"""Return the inverse of the diagonal matrix. """Returns the inverse of the diagonal matrix.
This function only supports square matrices with scalar nonzero values. This function only supports square matrices with scalar nonzero values.
...@@ -39,11 +38,10 @@ def inv(D: DiagMatrix) -> DiagMatrix: ...@@ -39,11 +38,10 @@ def inv(D: DiagMatrix) -> DiagMatrix:
-------- --------
>>> val = torch.arange(1, 4).float() >>> val = torch.arange(1, 4).float()
>>> mat = diag(val) >>> D = dglsp.diag(val)
>>> mat = mat.inv() >>> D = D.inv()
>>> print(mat)
DiagMatrix(val=tensor([1.0000, 0.5000, 0.3333]), DiagMatrix(val=tensor([1.0000, 0.5000, 0.3333]),
shape=(3, 3)) shape=(3, 3))
""" """
num_rows, num_cols = D.shape num_rows, num_cols = D.shape
assert num_rows == num_cols, f"Expect a square matrix, got shape {D.shape}" assert num_rows == num_cols, f"Expect a square matrix, got shape {D.shape}"
......
...@@ -3,8 +3,8 @@ from .sparse_matrix import SparseMatrix, val_like ...@@ -3,8 +3,8 @@ from .sparse_matrix import SparseMatrix, val_like
def neg(A: SparseMatrix) -> SparseMatrix: def neg(A: SparseMatrix) -> SparseMatrix:
"""Return a new sparse matrix with the negation of the original nonzero """Returns a new sparse matrix with the negation of the original nonzero
values. values, equivalent to ``-A``.
Returns Returns
------- -------
...@@ -17,13 +17,12 @@ def neg(A: SparseMatrix) -> SparseMatrix: ...@@ -17,13 +17,12 @@ def neg(A: SparseMatrix) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 3]) >>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([1, 2, 3]) >>> col = torch.tensor([1, 2, 3])
>>> val = torch.tensor([1., 1., 2.]) >>> val = torch.tensor([1., 1., 2.])
>>> A = from_coo(row, col, val) >>> A = dglsp.from_coo(row, col, val)
>>> A = -A >>> A = -A
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 3], SparseMatrix(indices=tensor([[1, 1, 3],
[1, 2, 3]]), [1, 2, 3]]),
values=tensor([-1., -1., -2.]), values=tensor([-1., -1., -2.]),
shape=(4, 4), nnz=3) shape=(4, 4), nnz=3)
""" """
return val_like(A, -A.val) return val_like(A, -A.val)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment