"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "e89ab5bc260374f295be76efec4d9904445e2ea2"
Unverified Commit fbbe6d61 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Doc] Update all canonical creation example from from_coo to spmatrix. (#5208)



* fix

* sddmm

* change

* blabla

* blabla

* revert

* newpr

* tocuda
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 30fb03a6
......@@ -42,10 +42,10 @@ def add(
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 1, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> B = dglsp.diag(torch.arange(1, 4))
>>> dglsp.add(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
......@@ -89,10 +89,10 @@ def sub(
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 1, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> B = dglsp.diag(torch.arange(1, 4))
>>> dglsp.sub(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
......@@ -136,10 +136,10 @@ def mul(
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.mul(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......@@ -202,10 +202,10 @@ def div(
DiagMatrix(val=tensor([0.5000, 1.0000, 1.5000]),
shape=(3, 3))
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([1, 2, 3])
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> dglsp.div(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......@@ -247,10 +247,10 @@ def power(
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.power(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......
......@@ -30,10 +30,10 @@ def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A + A
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
......@@ -63,12 +63,12 @@ def sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> val2 = torch.tensor([5, 10, 15])
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> B = dglsp.from_coo(row, col, val2, shape=(3, 4))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> B = dglsp.spmatrix(indices, val2, shape=(3, 4))
>>> A - B
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
......@@ -99,10 +99,10 @@ def sp_mul(A: SparseMatrix, B: Scalar) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([1, 2, 3])
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A * 2
SparseMatrix(indices=tensor([[1, 0, 2],
......@@ -142,10 +142,10 @@ def sp_div(A: SparseMatrix, B: Scalar) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([1, 2, 3])
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A / 2
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......@@ -177,10 +177,10 @@ def sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> A ** 2
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......
......@@ -29,10 +29,10 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
Examples
--------
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1])
>>> indices = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> val = torch.randn(len(row))
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> X = torch.randn(2, 3)
>>> result = dglsp.spmm(A, X)
>>> type(result)
......@@ -72,10 +72,10 @@ def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
Examples
--------
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [1, 0, 2]])
>>> val = torch.randn(len(row), 2)
>>> A = dglsp.from_coo(row, col, val, shape=(3, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 3))
>>> X = torch.randn(3, 3, 2)
>>> result = dglsp.bspmm(A, X)
>>> type(result)
......@@ -197,14 +197,14 @@ def spspmm(
Examples
--------
>>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1])
>>> indices1 = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> val1 = torch.ones(len(row1))
>>> A = dglsp.from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1])
>>> A = dglsp.spmatrix(indices1, val1)
>>> indices2 = torch.tensor([[0, 1, 1],
>>> [0, 2, 1]])
>>> val2 = torch.ones(len(row2))
>>> B = dglsp.from_coo(row2, col2, val2)
>>> B = dglsp.spmatrix(indices2, val2)
>>> dglsp.spspmm(A, B)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],
[1, 2, 0, 1, 2]]),
......@@ -293,10 +293,10 @@ def matmul(
Multiplies a sparse matrix with a dense matrix.
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1])
>>> indices = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> val = torch.randn(len(row))
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> X = torch.randn(2, 3)
>>> result = dglsp.matmul(A, X)
>>> type(result)
......@@ -306,14 +306,14 @@ def matmul(
Multiplies a sparse matrix with a sparse matrix.
>>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1])
>>> indices1 = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> val1 = torch.ones(len(row1))
>>> A = dglsp.from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1])
>>> A = dglsp.spmatrix(indices1, val1)
>>> indices2 = torch.tensor([[0, 1, 1],
>>> [0, 2, 1]])
>>> val2 = torch.ones(len(row2))
>>> B = dglsp.from_coo(row2, col2, val2)
>>> B = dglsp.spmatrix(indices2, val2)
>>> result = dglsp.matmul(A, B)
>>> type(result)
<class 'dgl.sparse.sparse_matrix.SparseMatrix'>
......
......@@ -44,10 +44,10 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
Case1: scalar-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.reduce(A, rtype='sum')
tensor(4)
>>> dglsp.reduce(A, 0, 'sum')
......@@ -61,10 +61,10 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
Case2: vector-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.reduce(A, rtype='sum')
tensor([5., 5.])
>>> dglsp.reduce(A, 0, 'sum')
......@@ -113,10 +113,10 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sum(A)
tensor(4)
>>> dglsp.sum(A, 0)
......@@ -126,10 +126,10 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sum(A)
tensor([5, 5])
>>> dglsp.sum(A, 0)
......@@ -172,10 +172,10 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smax(A)
tensor(2)
>>> dglsp.smax(A, 0)
......@@ -185,10 +185,10 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smax(A)
tensor([2, 2])
>>> dglsp.smax(A, 1)
......@@ -232,10 +232,10 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smin(A)
tensor(1)
>>> dglsp.smin(A, 0)
......@@ -245,10 +245,10 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smin(A)
tensor([1, 1])
>>> dglsp.smin(A, 0)
......@@ -296,10 +296,10 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smean(A)
tensor(1.3333)
>>> dglsp.smean(A, 0)
......@@ -309,10 +309,10 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smean(A)
tensor([1.6667, 1.6667])
>>> dglsp.smean(A, 0)
......@@ -360,10 +360,10 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sprod(A)
tensor(2)
>>> dglsp.sprod(A, 0)
......@@ -373,10 +373,10 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sprod(A)
tensor([4, 4])
>>> dglsp.sprod(A, 0)
......
......@@ -39,10 +39,10 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3])
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 3, 3]])
>>> val = torch.arange(1, 4).float()
>>> A = dglsp.from_coo(row, col, val, (3, 4))
>>> A = dglsp.spmatrix(indices, val, (3, 4))
>>> X1 = torch.randn(3, 5)
>>> X2 = torch.randn(5, 4)
>>> dglsp.sddmm(A, X1, X2)
......@@ -89,10 +89,10 @@ def bsddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3])
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 3, 3]])
>>> val = torch.arange(1, 4).float()
>>> A = dglsp.from_coo(row, col, val, (3, 4))
>>> A = dglsp.spmatrix(indices, val, (3, 4))
>>> X1 = torch.arange(0, 3 * 5 * 2).view(3, 5, 2).float()
>>> X2 = torch.arange(0, 5 * 4 * 2).view(5, 4, 2).float()
>>> dglsp.bsddmm(A, X1, X2)
......
......@@ -34,11 +34,11 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
Case1: matrix with values of shape (nnz)
>>> row = torch.tensor([0, 0, 1, 2])
>>> col = torch.tensor([1, 2, 2, 0])
>>> indices = torch.tensor([[0, 0, 1, 2],
>>> [1, 2, 2, 0]])
>>> nnz = len(row)
>>> val = torch.arange(nnz).float()
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2],
[1, 2, 2, 0]]),
......@@ -47,10 +47,10 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
Case2: matrix with values of shape (nnz, D)
>>> row = torch.tensor([0, 0, 1, 2])
>>> col = torch.tensor([1, 2, 2, 0])
>>> indices = torch.tensor([[0, 0, 1, 2],
>>> [1, 2, 2, 0]])
>>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2],
[1, 2, 2, 0]]),
......
......@@ -108,8 +108,8 @@ class SparseMatrix:
Examples
--------
>>> dst = torch.tensor([1, 2, 1])
>>> src = torch.tensor([2, 4, 3])
>>> indices = torch.tensor([[1, 2, 1],
>>> [2, 4, 3]])
>>> A = from_coo(dst, src)
>>> A.coo()
(tensor([1, 2, 1]), tensor([2, 4, 3]))
......@@ -140,8 +140,8 @@ class SparseMatrix:
Examples
--------
>>> dst = torch.tensor([1, 2, 1])
>>> src = torch.tensor([2, 4, 3])
>>> indices = torch.tensor([[1, 2, 1],
>>> [2, 4, 3]])
>>> A = from_coo(dst, src)
>>> A.csr()
(tensor([0, 0, 2, 3]), tensor([2, 3, 4]), tensor([0, 2, 1]))
......@@ -172,8 +172,8 @@ class SparseMatrix:
Examples
--------
>>> dst = torch.tensor([1, 2, 1])
>>> src = torch.tensor([2, 4, 3])
>>> indices = torch.tensor([[1, 2, 1],
>>> [2, 4, 3]])
>>> A = from_coo(dst, src)
>>> A.csc()
(tensor([0, 0, 0, 1, 2, 3]), tensor([1, 1, 2]), tensor([0, 2, 1]))
......@@ -215,10 +215,10 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([2, 1, 3])
>>> indices = torch.tensor([[1, 1, 3],
>>> [2, 1, 3]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> A = A.transpose()
SparseMatrix(indices=tensor([[2, 1, 3],
[1, 1, 3]]),
......@@ -248,9 +248,9 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.to(device='cuda:0', dtype=torch.int32)
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]], device='cuda:0'),
......@@ -289,9 +289,9 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.cuda()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]], device='cuda:0'),
......@@ -312,9 +312,9 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2]).to('cuda')
>>> col = torch.tensor([1, 2, 0]).to('cuda')
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]]).to("cuda")
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.cpu()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
......@@ -335,10 +335,10 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> val = torch.ones(len(row)).long()
>>> A = dglsp.from_coo(row, col, val, shape=(3, 4))
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A.float()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
......@@ -359,9 +359,9 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.double()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
......@@ -382,9 +382,9 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.int()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
......@@ -405,9 +405,9 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([1, 2, 0])
>>> A = dglsp.from_coo(row, col, shape=(3, 4))
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.long()
SparseMatrix(indices=tensor([[1, 1, 2],
[1, 2, 0]]),
......@@ -436,10 +436,10 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2])
>>> indices = torch.tensor([[1, 0, 0, 0, 1],
>>> [1, 1, 1, 2, 2]])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> A.coalesce()
SparseMatrix(indices=tensor([[0, 0, 1, 1],
[1, 2, 1, 2]]),
......@@ -453,10 +453,10 @@ class SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2])
>>> indices = torch.tensor([[1, 0, 0, 0, 1],
>>> [1, 1, 1, 2, 2]])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> A.has_duplicate()
True
>>> A.coalesce().has_duplicate()
......@@ -496,15 +496,15 @@ def spmatrix(
Case1: Sparse matrix with row and column indices without values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> A = dglsp.spmatrix(indices)
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> A = dglsp.from_coo(dst, src)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
shape=(3, 5), nnz=3)
>>> # Specify shape
>>> A = dglsp.spmatrix(indices, shape=(5, 5))
>>> A = dglsp.from_coo(dst, src, shape=(5, 5))
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
......@@ -525,10 +525,10 @@ def spmatrix(
Case3: Sparse matrix with vector values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])
>>> A = dglsp.spmatrix(indices, val)
>>> A = dglsp.from_coo(dst, src, val)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([[1., 1.],
......@@ -575,15 +575,15 @@ def from_coo(
Case1: Sparse matrix with row and column indices without values.
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> A = dglsp.from_coo(dst, src)
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> A = dglsp.spmatrix(indices)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
shape=(3, 5), nnz=3)
>>> # Specify shape
>>> A = dglsp.from_coo(dst, src, shape=(5, 5))
>>> A = dglsp.spmatrix(indices, shape=(5, 5))
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
......@@ -591,8 +591,10 @@ def from_coo(
Case2: Sparse matrix with scalar values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> val = torch.tensor([[1.], [2.], [3.]])
>>> A = dglsp.from_coo(dst, src, val)
>>> A = dglsp.spmatrix(indices, val)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([[1.],
......@@ -602,10 +604,10 @@ def from_coo(
Case3: Sparse matrix with vector values.
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])
>>> A = dglsp.from_coo(dst, src, val)
>>> A = dglsp.spmatrix(indices, val)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([[1., 1.],
......@@ -826,10 +828,10 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 4, 3])
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> val = torch.ones(3)
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> A = dglsp.val_like(A, torch.tensor([2, 2, 2]))
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
......
......@@ -14,10 +14,10 @@ def neg(A: SparseMatrix) -> SparseMatrix:
Examples
--------
>>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([1, 2, 3])
>>> indices = torch.tensor([[1, 1, 3],
>>> [1, 2, 3]])
>>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.from_coo(row, col, val)
>>> A = dglsp.spmatrix(indices, val)
>>> A = -A
SparseMatrix(indices=tensor([[1, 1, 3],
[1, 2, 3]]),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment