Unverified Commit c4ffd752 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Polish the doc strings. (#5203)


Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 050798e9
......@@ -104,9 +104,8 @@ class DiagMatrix:
>>> import torch
>>> val = torch.ones(5)
>>> mat = diag(val)
>>> sp_mat = mat.to_sparse()
>>> print(sp_mat)
>>> D = dglsp.diag(val)
>>> D.to_sparse()
SparseMatrix(indices=tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]),
values=tensor([1., 1., 1., 1., 1.]),
......@@ -152,9 +151,8 @@ class DiagMatrix:
--------
>>> val = torch.arange(1, 5).float()
>>> mat = diag(val, shape=(4, 5))
>>> mat = mat.transpose()
>>> print(mat)
>>> D = dglsp.diag(val, shape=(4, 5))
>>> D.transpose()
DiagMatrix(val=tensor([1., 2., 3., 4.]),
shape=(5, 4))
"""
......@@ -182,10 +180,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.to(device='cuda:0', dtype=torch.int32)
>>> D = dglsp.diag(val)
>>> D.to(device='cuda:0', dtype=torch.int32)
DiagMatrix(values=tensor([1, 1], device='cuda:0', dtype=torch.int32),
size=(2, 2))
shape=(2, 2))
"""
if device is None:
device = self.device
......@@ -211,10 +209,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.cuda()
>>> D = dglsp.diag(val)
>>> D.cuda()
DiagMatrix(values=tensor([1., 1.], device='cuda:0'),
size=(2, 2))
shape=(2, 2))
"""
return self.to(device="cuda")
......@@ -231,10 +229,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.cpu()
>>> D = dglsp.diag(val)
>>> D.cpu()
DiagMatrix(values=tensor([1., 1.]),
size=(2, 2))
shape=(2, 2))
"""
return self.to(device="cpu")
......@@ -251,10 +249,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.float()
>>> D = dglsp.diag(val)
>>> D.float()
DiagMatrix(values=tensor([1., 1.]),
size=(2, 2))
shape=(2, 2))
"""
return self.to(dtype=torch.float)
......@@ -271,10 +269,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.double()
>>> D = dglsp.diag(val)
>>> D.double()
DiagMatrix(values=tensor([1., 1.], dtype=torch.float64),
size=(2, 2))
shape=(2, 2))
"""
return self.to(dtype=torch.double)
......@@ -291,10 +289,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.int()
>>> D = dglsp.diag(val)
>>> D.int()
DiagMatrix(values=tensor([1, 1], dtype=torch.int32),
size=(2, 2))
shape=(2, 2))
"""
return self.to(dtype=torch.int)
......@@ -311,10 +309,10 @@ class DiagMatrix:
--------
>>> val = torch.ones(2)
>>> mat = diag(val)
>>> mat.long()
>>> D = dglsp.diag(val)
>>> D.long()
DiagMatrix(values=tensor([1, 1]),
size=(2, 2))
shape=(2, 2))
"""
return self.to(dtype=torch.long)
......@@ -344,26 +342,24 @@ def diag(
>>> import torch
>>> val = torch.ones(5)
>>> mat = diag(val)
>>> print(mat)
>>> dglsp.diag(val)
DiagMatrix(val=tensor([1., 1., 1., 1., 1.]),
shape=(5, 5))
Case2: 5-by-10 diagonal matrix with scaler values on the diagonal
>>> val = torch.ones(5)
>>> mat = diag(val, shape=(5, 10))
>>> print(mat)
>>> dglsp.diag(val, shape=(5, 10))
DiagMatrix(val=tensor([1., 1., 1., 1., 1.]),
shape=(5, 10))
Case3: 5-by-5 diagonal matrix with tensor values on the diagonal
Case3: 5-by-5 diagonal matrix with vector values on the diagonal
>>> val = torch.randn(5, 3)
>>> mat = diag(val)
>>> mat.shape
>>> D = dglsp.diag(val)
>>> D.shape
(5, 5)
>>> mat.nnz
>>> D.nnz
5
"""
# NOTE(Mufei): this may not be needed if DiagMatrix is simple enough
......@@ -376,7 +372,7 @@ def identity(
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
) -> DiagMatrix:
"""Creates a diagonal matrix with ones on the diagonal and zeros elsewhere.
r"""Creates a diagonal matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
......@@ -404,8 +400,7 @@ def identity(
[0, 1, 0],
[0, 0, 1]]
>>> mat = identity(shape=(3, 3))
>>> print(mat)
>>> dglsp.identity(shape=(3, 3))
DiagMatrix(val=tensor([1., 1., 1.]),
shape=(3, 3))
......@@ -415,19 +410,17 @@ def identity(
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0]]
>>> mat = identity(shape=(3, 5))
>>> print(mat)
>>> dglsp.identity(shape=(3, 5))
DiagMatrix(val=tensor([1., 1., 1.]),
shape=(3, 5))
Case3: 3-by-3 matrix with tensor diagonal values
Case3: 3-by-3 matrix with vector diagonal values
>>> mat = identity(shape=(3, 3), d=2)
>>> print(mat)
DiagMatrix(val=tensor([[1., 1.],
>>> dglsp.identity(shape=(3, 3), d=2)
DiagMatrix(values=tensor([[1., 1.],
[1., 1.],
[1., 1.]]),
shape=(3, 3))
shape=(3, 3), val_size=(2,))
"""
len_val = min(shape)
if d is None:
......
......@@ -12,8 +12,8 @@ __all__ = ["add", "sub", "mul", "div", "power"]
def add(
A: Union[DiagMatrix, SparseMatrix], B: Union[DiagMatrix, SparseMatrix]
) -> Union[DiagMatrix, SparseMatrix]:
r"""Elementwise additions for ``DiagMatrix`` and ``SparseMatrix``,
equivalent to ``A + B``.
r"""Elementwise addition for ``DiagMatrix`` and ``SparseMatrix``, equivalent
to ``A + B``.
The supported combinations are shown as follows.
......@@ -45,9 +45,9 @@ def add(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val)
>>> B = diag(torch.arange(1, 4))
>>> add(A, B)
>>> A = dglsp.from_coo(row, col, val)
>>> B = dglsp.diag(torch.arange(1, 4))
>>> dglsp.add(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 1, 0, 1, 2]]),
values=tensor([ 1, 20, 10, 2, 33]),
......@@ -92,9 +92,9 @@ def sub(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val)
>>> B = diag(torch.arange(1, 4))
>>> sub(A, B)
>>> A = dglsp.from_coo(row, col, val)
>>> B = dglsp.diag(torch.arange(1, 4))
>>> dglsp.sub(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
[0, 1, 0, 1, 2]]),
values=tensor([-1, 20, 10, -2, 27]),
......@@ -139,20 +139,20 @@ def mul(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val)
>>> mul(A, 2)
>>> A = dglsp.from_coo(row, col, val)
>>> dglsp.mul(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
values=tensor([20, 40, 60]),
shape=(3, 4), nnz=3)
>>> D = diag(torch.arange(1, 4))
>>> mul(D, 2)
>>> D = dglsp.diag(torch.arange(1, 4))
>>> dglsp.mul(D, 2)
DiagMatrix(val=tensor([2, 4, 6]),
shape=(3, 3))
>>> D = diag(torch.arange(1, 4))
>>> mul(D, D)
>>> D = dglsp.diag(torch.arange(1, 4))
>>> dglsp.mul(D, D)
DiagMatrix(val=tensor([1, 4, 9]),
shape=(3, 3))
"""
......@@ -191,22 +191,22 @@ def div(
Examples
--------
>>> A = diag(torch.arange(1, 4))
>>> B = diag(torch.arange(10, 13))
>>> div(A, B)
>>> A = dglsp.diag(torch.arange(1, 4))
>>> B = dglsp.diag(torch.arange(10, 13))
>>> dglsp.div(A, B)
DiagMatrix(val=tensor([0.1000, 0.1818, 0.2500]),
shape=(3, 3))
>>> A = diag(torch.arange(1, 4))
>>> div(A, 2)
>>> A = dglsp.diag(torch.arange(1, 4))
>>> dglsp.div(A, 2)
DiagMatrix(val=tensor([0.5000, 1.0000, 1.5000]),
shape=(3, 3))
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3])
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A / 2
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> dglsp.div(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
values=tensor([0.5000, 1.0000, 1.5000]),
......@@ -250,15 +250,15 @@ def power(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val)
>>> power(A, 2)
>>> A = dglsp.from_coo(row, col, val)
>>> dglsp.power(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
values=tensor([100, 400, 900]),
shape=(3, 4), nnz=3)
>>> D = diag(torch.arange(1, 4))
>>> power(D, 2)
>>> D = dglsp.diag(torch.arange(1, 4))
>>> dglsp.power(D, 2)
DiagMatrix(val=tensor([1, 4, 9]),
shape=(3, 3))
"""
......
......@@ -25,8 +25,8 @@ def diag_add(
Examples
--------
>>> D1 = diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13))
>>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D1 + D2
DiagMatrix(val=tensor([11, 13, 15]),
shape=(3, 3))
......@@ -68,8 +68,8 @@ def diag_sub(
Examples
--------
>>> D1 = diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13))
>>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D1 - D2
DiagMatrix(val=tensor([-9, -9, -9]),
shape=(3, 3))
......@@ -111,8 +111,8 @@ def diag_rsub(
Examples
--------
>>> D1 = diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13))
>>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D2 - D1
DiagMatrix(val=tensor([-9, -9, -9]),
shape=(3, 3))
......@@ -137,7 +137,7 @@ def diag_mul(D1: DiagMatrix, D2: Union[DiagMatrix, Scalar]) -> DiagMatrix:
Examples
--------
>>> D = diag(torch.arange(1, 4))
>>> D = dglsp.diag(torch.arange(1, 4))
>>> D * 2.5
DiagMatrix(val=tensor([2.5000, 5.0000, 7.5000]),
shape=(3, 3))
......@@ -178,8 +178,8 @@ def diag_div(D1: DiagMatrix, D2: Union[DiagMatrix, Scalar]) -> DiagMatrix:
Examples
--------
>>> D1 = diag(torch.arange(1, 4))
>>> D2 = diag(torch.arange(10, 13))
>>> D1 = dglsp.diag(torch.arange(1, 4))
>>> D2 = dglsp.diag(torch.arange(10, 13))
>>> D1 / D2
DiagMatrix(val=tensor([0.1000, 0.1818, 0.2500]),
shape=(3, 3))
......@@ -221,7 +221,7 @@ def diag_power(D: DiagMatrix, scalar: Scalar) -> DiagMatrix:
Examples
--------
>>> D = diag(torch.arange(1, 4))
>>> D = dglsp.diag(torch.arange(1, 4))
>>> D ** 2
DiagMatrix(val=tensor([1, 4, 9]),
shape=(3, 3))
......
......@@ -33,7 +33,7 @@ def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A + A
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
......@@ -67,8 +67,8 @@ def sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> val2 = torch.tensor([5, 10, 15])
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> B = from_coo(row, col, val2, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> B = dglsp.from_coo(row, col, val2, shape=(3, 4))
>>> A - B
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
......@@ -102,7 +102,7 @@ def sp_mul(A: SparseMatrix, B: Scalar) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3])
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A * 2
SparseMatrix(indices=tensor([[1, 0, 2],
......@@ -145,7 +145,7 @@ def sp_div(A: SparseMatrix, B: Scalar) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3])
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A / 2
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......@@ -180,7 +180,7 @@ def sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = from_coo(row, col, val)
>>> A = dglsp.from_coo(row, col, val)
>>> A ** 2
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......
......@@ -12,7 +12,7 @@ __all__ = ["spmm", "bspmm", "spspmm", "matmul"]
def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
"""Multiply a sparse matrix by a dense matrix, equivalent to ``A @ X``.
"""Multiplies a sparse matrix by a dense matrix, equivalent to ``A @ X``.
Parameters
----------
......@@ -32,12 +32,12 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1])
>>> val = torch.randn(len(row))
>>> A = from_coo(row, col, val)
>>> A = dglsp.from_coo(row, col, val)
>>> X = torch.randn(2, 3)
>>> result = dgl.sparse.spmm(A, X)
>>> print(type(result))
>>> result = dglsp.spmm(A, X)
>>> type(result)
<class 'torch.Tensor'>
>>> print(result.shape)
>>> result.shape
torch.Size([2, 3])
"""
assert isinstance(
......@@ -54,7 +54,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
"""Multiply a sparse matrix by a dense matrix by batches, equivalent to
"""Multiplies a sparse matrix by a dense matrix by batches, equivalent to
``A @ X``.
Parameters
......@@ -75,12 +75,12 @@ def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 2])
>>> val = torch.randn(len(row), 2)
>>> A = from_coo(row, col, val, shape=(3, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(3, 3))
>>> X = torch.randn(3, 3, 2)
>>> result = dgl.sparse.bspmm(A, X)
>>> print(type(result))
>>> result = dglsp.bspmm(A, X)
>>> type(result)
<class 'torch.Tensor'>
>>> print(result.shape)
>>> result.shape
torch.Size([3, 3, 2])
"""
assert isinstance(
......@@ -177,7 +177,7 @@ def _diag_sparse_mm(D, A):
def spspmm(
A: Union[SparseMatrix, DiagMatrix], B: Union[SparseMatrix, DiagMatrix]
) -> Union[SparseMatrix, DiagMatrix]:
"""Multiply a sparse matrix by a sparse matrix, equivalent to ``A @ B``.
"""Multiplies a sparse matrix by a sparse matrix, equivalent to ``A @ B``.
The non-zero values of the two sparse matrices must be 1D.
......@@ -200,14 +200,12 @@ def spspmm(
>>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1])
>>> val1 = torch.ones(len(row1))
>>> A = from_coo(row1, col1, val1)
>>> A = dglsp.from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1])
>>> val2 = torch.ones(len(row2))
>>> B = from_coo(row2, col2, val2)
>>> result = dgl.sparse.spspmm(A, B)
>>> print(result)
>>> B = dglsp.from_coo(row2, col2, val2)
>>> dglsp.spspmm(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],
[1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
......@@ -235,7 +233,7 @@ def matmul(
A: Union[torch.Tensor, SparseMatrix, DiagMatrix],
B: Union[torch.Tensor, SparseMatrix, DiagMatrix],
) -> Union[torch.Tensor, SparseMatrix, DiagMatrix]:
"""Multiply two dense/sparse/diagonal matrices, equivalent to ``A @ B``.
"""Multiplies two dense/sparse/diagonal matrices, equivalent to ``A @ B``.
The supported combinations are shown as follows.
......@@ -282,44 +280,44 @@ def matmul(
Examples
--------
Multiply a diagonal matrix with a dense matrix.
Multiplies a diagonal matrix with a dense matrix.
>>> val = torch.randn(3)
>>> A = diag(val)
>>> A = dglsp.diag(val)
>>> B = torch.randn(3, 2)
>>> result = dgl.sparse.matmul(A, B)
>>> print(type(result))
>>> result = dglsp.matmul(A, B)
>>> type(result)
<class 'torch.Tensor'>
>>> print(result.shape)
>>> result.shape
torch.Size([3, 2])
Multiply a sparse matrix with a dense matrix.
Multiplies a sparse matrix with a dense matrix.
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1])
>>> val = torch.randn(len(row))
>>> A = from_coo(row, col, val)
>>> A = dglsp.from_coo(row, col, val)
>>> X = torch.randn(2, 3)
>>> result = dgl.sparse.matmul(A, X)
>>> print(type(result))
>>> result = dglsp.matmul(A, X)
>>> type(result)
<class 'torch.Tensor'>
>>> print(result.shape)
>>> result.shape
torch.Size([2, 3])
Multiply a sparse matrix with a sparse matrix.
Multiplies a sparse matrix with a sparse matrix.
>>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1])
>>> val1 = torch.ones(len(row1))
>>> A = from_coo(row1, col1, val1)
>>> A = dglsp.from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1])
>>> val2 = torch.ones(len(row2))
>>> B = from_coo(row2, col2, val2)
>>> result = dgl.sparse.matmul(A, B)
>>> print(type(result))
>>> B = dglsp.from_coo(row2, col2, val2)
>>> result = dglsp.matmul(A, B)
>>> type(result)
<class 'dgl.sparse.sparse_matrix.SparseMatrix'>
>>> print(result.shape)
>>> result.shape
(2, 3)
"""
assert isinstance(A, (torch.Tensor, SparseMatrix, DiagMatrix)), (
......
......@@ -47,15 +47,15 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.reduce(A, rtype='sum'))
>>> dglsp.reduce(A, rtype='sum')
tensor(4)
>>> print(dglsp.reduce(A, 0, 'sum'))
>>> dglsp.reduce(A, 0, 'sum')
tensor([2, 0, 2])
>>> print(dglsp.reduce(A, 1, 'sum'))
>>> dglsp.reduce(A, 1, 'sum')
tensor([1, 3, 0, 0])
>>> print(dglsp.reduce(A, 0, 'smax'))
>>> dglsp.reduce(A, 0, 'smax')
tensor([1, 0, 2])
>>> print(dglsp.reduce(A, 1, 'smin'))
>>> dglsp.reduce(A, 1, 'smin')
tensor([1, 1, 0, 0])
Case2: vector-valued sparse matrix
......@@ -64,18 +64,18 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.reduce(A, rtype='sum'))
>>> dglsp.reduce(A, rtype='sum')
tensor([5., 5.])
>>> print(dglsp.reduce(A, 0, 'sum'))
>>> dglsp.reduce(A, 0, 'sum')
tensor([[3., 3.],
[0., 0.],
[2., 2.]])
>>> print(dglsp.reduce(A, 1, 'smin'))
>>> dglsp.reduce(A, 1, 'smin')
tensor([[1., 2.],
[2., 1.],
[0., 0.],
[0., 0.]])
>>> print(dglsp.reduce(A, 0, 'smean'))
>>> dglsp.reduce(A, 0, 'smean')
tensor([[1.5000, 1.5000],
[0.0000, 0.0000],
[2.0000, 2.0000]])
......@@ -115,11 +115,11 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sum(A))
>>> dglsp.sum(A)
tensor(4)
>>> print(dglsp.sum(A, 0))
>>> dglsp.sum(A, 0)
tensor([2, 0, 2])
>>> print(dglsp.sum(A, 1))
>>> dglsp.sum(A, 1)
tensor([1, 3, 0, 0])
Case2: vector-valued sparse matrix
......@@ -128,9 +128,9 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sum(A))
>>> dglsp.sum(A)
tensor([5, 5])
>>> print(dglsp.sum(A, 0))
>>> dglsp.sum(A, 0)
tensor([[3, 3],
[0, 0],
[2, 2]])
......@@ -173,11 +173,11 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smax(A))
>>> dglsp.smax(A)
tensor(2)
>>> print(dglsp.smax(A, 0))
>>> dglsp.smax(A, 0)
tensor([1, 0, 2])
>>> print(dglsp.smax(A, 1))
>>> dglsp.smax(A, 1)
tensor([1, 2, 0, 0])
Case2: vector-valued sparse matrix
......@@ -186,9 +186,9 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smax(A))
>>> dglsp.smax(A)
tensor([2, 2])
>>> print(dglsp.smax(A, 1))
>>> dglsp.smax(A, 1)
tensor([[1, 2],
[2, 2],
[0, 0],
......@@ -232,11 +232,11 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smin(A))
>>> dglsp.smin(A)
tensor(1)
>>> print(dglsp.smin(A, 0))
>>> dglsp.smin(A, 0)
tensor([1, 0, 2])
>>> print(dglsp.smin(A, 1))
>>> dglsp.smin(A, 1)
tensor([1, 1, 0, 0])
Case2: vector-valued sparse matrix
......@@ -245,13 +245,13 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smin(A))
>>> dglsp.smin(A)
tensor([1, 1])
>>> print(dglsp.smin(A, 0))
>>> dglsp.smin(A, 0)
tensor([[1, 1],
[0, 0],
[2, 2]])
>>> print(dglsp.smin(A, 1))
>>> dglsp.smin(A, 1)
tensor([[1, 2],
[2, 1],
[0, 0],
......@@ -295,11 +295,11 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smean(A))
>>> dglsp.smean(A)
tensor(1.3333)
>>> print(dglsp.smean(A, 0))
>>> dglsp.smean(A, 0)
tensor([1., 0., 2.])
>>> print(dglsp.smean(A, 1))
>>> dglsp.smean(A, 1)
tensor([1.0000, 1.5000, 0.0000, 0.0000])
Case2: vector-valued sparse matrix
......@@ -308,13 +308,13 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.smean(A))
>>> dglsp.smean(A)
tensor([1.6667, 1.6667])
>>> print(dglsp.smean(A, 0))
>>> dglsp.smean(A, 0)
tensor([[1.5000, 1.5000],
[0.0000, 0.0000],
[2.0000, 2.0000]])
>>> print(dglsp.smean(A, 1))
>>> dglsp.smean(A, 1)
tensor([[1.0000, 2.0000],
[2.0000, 1.5000],
[0.0000, 0.0000],
......@@ -358,11 +358,11 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sprod(A))
>>> dglsp.sprod(A)
tensor(2)
>>> print(dglsp.sprod(A, 0))
>>> dglsp.sprod(A, 0)
tensor([1, 0, 2])
>>> print(dglsp.sprod(A, 1))
>>> dglsp.sprod(A, 1)
tensor([1, 2, 0, 0])
Case2: vector-valued sparse matrix
......@@ -371,13 +371,13 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(dglsp.sprod(A))
>>> dglsp.sprod(A)
tensor([4, 4])
>>> print(dglsp.sprod(A, 0))
>>> dglsp.sprod(A, 0)
tensor([[2, 2],
[0, 0],
[2, 2]])
>>> print(dglsp.sprod(A, 1))
>>> dglsp.sprod(A, 1)
tensor([[1, 2],
[4, 2],
[0, 0],
......
......@@ -20,7 +20,7 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
out = (X1 @ X2) * A
In particular, :attr:`X1` and :attr:`X2` can be 1-D, then ``X1 @ X2``
becomes the out-product of the two vector (which results in a matrix).
becomes the out-product of the two vectors (which results in a matrix).
Parameters
----------
......@@ -42,13 +42,13 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3])
>>> val = torch.arange(1, 4).float()
>>> A = from_coo(row, col, val, (3, 4))
>>> A = dglsp.from_coo(row, col, val, (3, 4))
>>> X1 = torch.randn(3, 5)
>>> X2 = torch.randn(5, 4)
>>> dgl.sparse.sddmm(A, X1, X2)
>>> dglsp.sddmm(A, X1, X2)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 3, 3]]),
values=tensor([ 1.3097, -1.0977, 1.6953]),
values=tensor([-1.6585, -3.9714, -0.5406]),
shape=(3, 4), nnz=3)
"""
return SparseMatrix(torch.ops.dgl_sparse.sddmm(A.c_sparse_matrix, X1, X2))
......@@ -92,15 +92,15 @@ def bsddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3])
>>> val = torch.arange(1, 4).float()
>>> A = from_coo(row, col, val, (3, 4))
>>> A = dglsp.from_coo(row, col, val, (3, 4))
>>> X1 = torch.arange(0, 3 * 5 * 2).view(3, 5, 2).float()
>>> X2 = torch.arange(0, 5 * 4 * 2).view(5, 4, 2).float()
>>> dgl.sparse.bsddmm(A, X1, X2)
>>> dglsp.bsddmm(A, X1, X2)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 3, 3]]),
values=tensor([[1560., 1735.],
[3400., 3770.],
[8400., 9105.]]),
shape=(3, 4), nnz=3)
shape=(3, 4), nnz=3, val_size=(2,))
"""
return sddmm(A, X1, X2)
......@@ -9,7 +9,7 @@ __all__ = ["softmax"]
def softmax(input: SparseMatrix) -> SparseMatrix:
"""Apply row-wise softmax to the non-zero elements of the sparse matrix.
"""Apples row-wise softmax to the non-zero elements of the sparse matrix.
If :attr:`input.val` takes shape :attr:`(nnz, D)`, then the output matrix
:attr:`output` and :attr:`output.val` take the same shape as :attr:`input`
......@@ -44,6 +44,8 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
Case2: matrix with values of shape (nnz, D)
>>> row = torch.tensor([0, 0, 1, 2])
>>> col = torch.tensor([1, 2, 2, 0])
>>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
>>> A = dglsp.from_coo(row, col, val)
>>> dglsp.softmax(A)
......@@ -53,7 +55,7 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
[0.7311, 0.0180],
[1.0000, 1.0000],
[1.0000, 1.0000]]),
shape=(3, 3), nnz=4)
shape=(3, 3), nnz=4, val_size=(2,))
"""
return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))
......
......@@ -75,7 +75,7 @@ class SparseMatrix:
Returns
-------
tensor
torch.Tensor
Row indices of the non-zero elements
"""
return self.coo()[0]
......@@ -86,7 +86,7 @@ class SparseMatrix:
Returns
-------
tensor
torch.Tensor
Column indices of the non-zero elements
"""
return self.coo()[1]
......@@ -156,7 +156,7 @@ class SparseMatrix:
return self.transpose()
def transpose(self):
"""Return the transpose of this sparse matrix.
"""Returns the transpose of this sparse matrix.
Returns
-------
......@@ -169,9 +169,8 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([2, 1, 3])
>>> val = torch.tensor([1, 1, 2])
>>> A = from_coo(row, col, val)
>>> A = dglsp.from_coo(row, col, val)
>>> A = A.transpose()
>>> print(A)
SparseMatrix(indices=tensor([[2, 1, 3],
[1, 1, 3]]),
values=tensor([1, 1, 2]),
......@@ -202,13 +201,13 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.to(device='cuda:0', dtype=torch.int32)
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]], device='cuda:0'),
values=tensor([1, 1, 1], device='cuda:0',
dtype=torch.int32),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
if device is None:
device = self.device
......@@ -243,12 +242,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.cuda()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]], device='cuda:0'),
values=tensor([1., 1., 1.], device='cuda:0'),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
return self.to(device="cuda")
......@@ -266,12 +265,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2]).to('cuda')
>>> col = torch.tensor([1, 2, 0]).to('cuda')
>>> A = from_coo(row, col, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.cpu()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
values=tensor([1., 1., 1.]),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
return self.to(device="cpu")
......@@ -290,12 +289,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> val = torch.ones(len(row)).long()
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A.float()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
values=tensor([1., 1., 1.]),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
return self.to(dtype=torch.float)
......@@ -313,12 +312,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.double()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
values=tensor([1., 1., 1.], dtype=torch.float64),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
return self.to(dtype=torch.double)
......@@ -336,12 +335,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.int()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
values=tensor([1, 1, 1], dtype=torch.int32),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
return self.to(dtype=torch.int)
......@@ -359,12 +358,12 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = from_coo(row, col, shape=(3, 4))
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> A.long()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
values=tensor([1, 1, 1]),
size=(3, 4), nnz=3)
shape=(3, 4), nnz=3)
"""
return self.to(dtype=torch.long)
......@@ -391,9 +390,8 @@ class SparseMatrix:
>>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = from_coo(row, col, val)
>>> A = A.coalesce()
>>> print(A)
>>> A = dglsp.from_coo(row, col, val)
>>> A.coalesce()
SparseMatrix(indices=tensor([[0, 0, 1, 1],
[1, 2, 1, 2]]),
values=tensor([3, 3, 0, 4]),
......@@ -409,10 +407,10 @@ class SparseMatrix:
>>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = from_coo(row, col, val)
>>> print(A.has_duplicate())
>>> A = dglsp.from_coo(row, col, val)
>>> A.has_duplicate()
True
>>> print(A.coalesce().has_duplicate())
>>> A.coalesce().has_duplicate()
False
"""
return self.c_sparse_matrix.has_duplicate()
......@@ -424,15 +422,15 @@ def from_coo(
val: Optional[torch.Tensor] = None,
shape: Optional[Tuple[int, int]] = None,
) -> SparseMatrix:
"""Creates a sparse matrix from row and column coordinates.
r"""Creates a sparse matrix from row and column coordinates.
Parameters
----------
row : tensor
row : torch.Tensor
The row indices of shape (nnz)
col : tensor
col : torch.Tensor
The column indices of shape (nnz)
val : tensor, optional
val : torch.Tensor, optional
The values of shape (nnz) or (nnz, D). If None, it will be a tensor of
shape (nnz) filled by 1.
shape : tuple[int, int], optional
......@@ -452,15 +450,13 @@ def from_coo(
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> A = from_coo(dst, src)
>>> print(A)
>>> A = dglsp.from_coo(dst, src)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
shape=(3, 5), nnz=3)
>>> # Specify shape
>>> A = from_coo(dst, src, shape=(5, 5))
>>> print(A)
>>> A = dglsp.from_coo(dst, src, shape=(5, 5))
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
......@@ -469,14 +465,16 @@ def from_coo(
Case2: Sparse matrix with scalar/vector values. Following example is with
vector data.
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])
>>> A = from_coo(dst, src, val)
>>> A = dglsp.from_coo(dst, src, val)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([[1., 1.],
[2., 2.],
[3., 3.]]),
shape=(3, 5), nnz=3)
shape=(3, 5), nnz=3, val_size=(2,))
"""
if shape is None:
shape = (torch.max(row).item() + 1, torch.max(col).item() + 1)
......@@ -492,7 +490,7 @@ def from_csr(
val: Optional[torch.Tensor] = None,
shape: Optional[Tuple[int, int]] = None,
) -> SparseMatrix:
"""Creates a sparse matrix from CSR indices.
r"""Creates a sparse matrix from CSR indices.
For row i of the sparse matrix
......@@ -502,12 +500,12 @@ def from_csr(
Parameters
----------
indptr : tensor
indptr : torch.Tensor
Pointer to the column indices of shape (N + 1), where N is the number
of rows
indices : tensor
indices : torch.Tensor
The column indices of shape (nnz)
val : tensor, optional
val : torch.Tensor, optional
The values of shape (nnz) or (nnz, D). If None, it will be a tensor of
shape (nnz) filled by 1.
shape : tuple[int, int], optional
......@@ -531,15 +529,13 @@ def from_csr(
>>> indptr = torch.tensor([0, 1, 2, 5])
>>> indices = torch.tensor([1, 2, 0, 1, 2])
>>> A = from_csr(indptr, indices)
>>> print(A)
>>> A = dglsp.from_csr(indptr, indices)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 3), nnz=5)
>>> # Specify shape
>>> A = from_csr(indptr, indices, shape=(3, 5))
>>> print(A)
>>> A = dglsp.from_csr(indptr, indices, shape=(3, 5))
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
......@@ -548,9 +544,10 @@ def from_csr(
Case2: Sparse matrix with scalar/vector values. Following example is with
vector data.
>>> indptr = torch.tensor([0, 1, 2, 5])
>>> indices = torch.tensor([1, 2, 0, 1, 2])
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = from_csr(indptr, indices, val)
>>> print(A)
>>> A = dglsp.from_csr(indptr, indices, val)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]),
values=tensor([[1, 1],
......@@ -558,7 +555,7 @@ def from_csr(
[3, 3],
[4, 4],
[5, 5]]),
shape=(3, 3), nnz=5)
shape=(3, 3), nnz=5, val_size=(2,))
"""
if shape is None:
shape = (indptr.shape[0] - 1, torch.max(indices) + 1)
......@@ -576,7 +573,7 @@ def from_csc(
val: Optional[torch.Tensor] = None,
shape: Optional[Tuple[int, int]] = None,
) -> SparseMatrix:
"""Creates a sparse matrix from CSC indices.
r"""Creates a sparse matrix from CSC indices.
For column i of the sparse matrix
......@@ -586,12 +583,12 @@ def from_csc(
Parameters
----------
indptr : tensor
indptr : torch.Tensor
Pointer to the row indices of shape N + 1, where N is the
number of columns
indices : tensor
indices : torch.Tensor
The row indices of shape nnz
val : tensor, optional
val : torch.Tensor, optional
The values of shape (nnz) or (nnz, D). If None, it will be a tensor of
shape (nnz) filled by 1.
shape : tuple[int, int], optional
......@@ -615,15 +612,13 @@ def from_csc(
>>> indptr = torch.tensor([0, 1, 3, 5])
>>> indices = torch.tensor([2, 0, 2, 1, 2])
>>> A = from_csc(indptr, indices)
>>> print(A)
>>> A = dglsp.from_csc(indptr, indices)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 3), nnz=5)
>>> # Specify shape
>>> A = from_csc(indptr, indices, shape=(5, 3))
>>> print(A)
>>> A = dglsp.from_csc(indptr, indices, shape=(5, 3))
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
......@@ -632,9 +627,10 @@ def from_csc(
Case2: Sparse matrix with scalar/vector values. Following example is with
vector data.
>>> indptr = torch.tensor([0, 1, 3, 5])
>>> indices = torch.tensor([2, 0, 2, 1, 2])
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = from_csc(indptr, indices, val)
>>> print(A)
>>> A = dglsp.from_csc(indptr, indices, val)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]),
values=tensor([[1, 1],
......@@ -642,7 +638,7 @@ def from_csc(
[3, 3],
[4, 4],
[5, 5]]),
shape=(3, 3), nnz=5)
shape=(3, 3), nnz=5, val_size=(2,))
"""
if shape is None:
shape = (torch.max(indices) + 1, indptr.shape[0] - 1)
......@@ -664,7 +660,7 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
----------
mat : SparseMatrix
An existing sparse matrix with non-zero values
val : tensor
val : torch.Tensor
The new values of the non-zero elements, a tensor of shape (nnz) or (nnz, D)
Returns
......@@ -678,9 +674,8 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 4, 3])
>>> val = torch.ones(3)
>>> A = from_coo(row, col, val)
>>> B = val_like(A, torch.tensor([2, 2, 2]))
>>> print(B)
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.val_like(A, torch.tensor([2, 2, 2]))
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([2, 2, 2]),
......
......@@ -4,8 +4,8 @@ from .diag_matrix import diag, DiagMatrix
def neg(D: DiagMatrix) -> DiagMatrix:
"""Return a new diagonal matrix with the negation of the original nonzero
values.
"""Returns a new diagonal matrix with the negation of the original nonzero
values, equivalent to ``-D``.
Returns
-------
......@@ -16,9 +16,8 @@ def neg(D: DiagMatrix) -> DiagMatrix:
--------
>>> val = torch.arange(3).float()
>>> mat = diag(val)
>>> mat = -mat
>>> print(mat)
>>> D = dglsp.diag(val)
>>> D = -D
DiagMatrix(val=tensor([-0., -1., -2.]),
shape=(3, 3))
"""
......@@ -26,7 +25,7 @@ def neg(D: DiagMatrix) -> DiagMatrix:
def inv(D: DiagMatrix) -> DiagMatrix:
"""Return the inverse of the diagonal matrix.
"""Returns the inverse of the diagonal matrix.
This function only supports square matrices with scalar nonzero values.
......@@ -39,9 +38,8 @@ def inv(D: DiagMatrix) -> DiagMatrix:
--------
>>> val = torch.arange(1, 4).float()
>>> mat = diag(val)
>>> mat = mat.inv()
>>> print(mat)
>>> D = dglsp.diag(val)
>>> D = D.inv()
DiagMatrix(val=tensor([1.0000, 0.5000, 0.3333]),
shape=(3, 3))
"""
......
......@@ -3,8 +3,8 @@ from .sparse_matrix import SparseMatrix, val_like
def neg(A: SparseMatrix) -> SparseMatrix:
"""Return a new sparse matrix with the negation of the original nonzero
values.
"""Returns a new sparse matrix with the negation of the original nonzero
values, equivalent to ``-A``.
Returns
-------
......@@ -17,9 +17,8 @@ def neg(A: SparseMatrix) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([1, 2, 3])
>>> val = torch.tensor([1., 1., 2.])
>>> A = from_coo(row, col, val)
>>> A = dglsp.from_coo(row, col, val)
>>> A = -A
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 3],
[1, 2, 3]]),
values=tensor([-1., -1., -2.]),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment