"docs/source/using-diffusers/other-modalities.mdx" did not exist on "a0520193e15951655ee2c08c24bfdca716f6f64c"
Unverified Commit d61012e0 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

Change example code for indices creation from two line to one line to avoid test break. (#5222)



* add_backslash

* fix

* more

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 0bdc437b
......@@ -42,8 +42,7 @@ def add(
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 1, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 1, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.spmatrix(indices, val)
>>> B = dglsp.diag(torch.arange(1, 4))
......@@ -89,8 +88,7 @@ def sub(
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 1, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 1, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.spmatrix(indices, val)
>>> B = dglsp.diag(torch.arange(1, 4))
......@@ -136,8 +134,7 @@ def mul(
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.mul(A, 2)
......@@ -202,8 +199,7 @@ def div(
DiagMatrix(val=tensor([0.5000, 1.0000, 1.5000]),
shape=(3, 3))
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([1, 2, 3])
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> dglsp.div(A, 2)
......@@ -247,8 +243,7 @@ def power(
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.power(A, 2)
......
......@@ -30,8 +30,7 @@ def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A + A
......@@ -63,8 +62,7 @@ def sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> val2 = torch.tensor([5, 10, 15])
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
......@@ -99,8 +97,7 @@ def sp_mul(A: SparseMatrix, B: Scalar) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([1, 2, 3])
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
......@@ -142,8 +139,7 @@ def sp_div(A: SparseMatrix, B: Scalar) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([1, 2, 3])
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A / 2
......@@ -177,8 +173,7 @@ def sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 2],
>>> [0, 3, 2]])
>>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
>>> val = torch.tensor([10, 20, 30])
>>> A = dglsp.spmatrix(indices, val)
>>> A ** 2
......
......@@ -29,8 +29,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
Examples
--------
>>> indices = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> indices = torch.tensor([[0, 1, 1], [1, 0, 1]])
>>> val = torch.randn(len(row))
>>> A = dglsp.spmatrix(indices, val)
>>> X = torch.randn(2, 3)
......@@ -72,8 +71,7 @@ def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
Examples
--------
>>> indices = torch.tensor([[0, 1, 1],
>>> [1, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [1, 0, 2]])
>>> val = torch.randn(len(row), 2)
>>> A = dglsp.spmatrix(indices, val, shape=(3, 3))
>>> X = torch.randn(3, 3, 2)
......@@ -197,12 +195,10 @@ def spspmm(
Examples
--------
>>> indices1 = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> indices1 = torch.tensor([[0, 1, 1], [1, 0, 1]])
>>> val1 = torch.ones(len(row1))
>>> A = dglsp.spmatrix(indices1, val1)
>>> indices2 = torch.tensor([[0, 1, 1],
>>> [0, 2, 1]])
>>> indices2 = torch.tensor([[0, 1, 1], [0, 2, 1]])
>>> val2 = torch.ones(len(row2))
>>> B = dglsp.spmatrix(indices2, val2)
>>> dglsp.spspmm(A, B)
......@@ -293,8 +289,7 @@ def matmul(
Multiplies a sparse matrix with a dense matrix.
>>> indices = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> indices = torch.tensor([[0, 1, 1], [1, 0, 1]])
>>> val = torch.randn(len(row))
>>> A = dglsp.spmatrix(indices, val)
>>> X = torch.randn(2, 3)
......@@ -306,12 +301,10 @@ def matmul(
Multiplies a sparse matrix with a sparse matrix.
>>> indices1 = torch.tensor([[0, 1, 1],
>>> [1, 0, 1]])
>>> indices1 = torch.tensor([[0, 1, 1], [1, 0, 1]])
>>> val1 = torch.ones(len(row1))
>>> A = dglsp.spmatrix(indices1, val1)
>>> indices2 = torch.tensor([[0, 1, 1],
>>> [0, 2, 1]])
>>> indices2 = torch.tensor([[0, 1, 1], [0, 2, 1]])
>>> val2 = torch.ones(len(row2))
>>> B = dglsp.spmatrix(indices2, val2)
>>> result = dglsp.matmul(A, B)
......
......@@ -44,8 +44,7 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
Case1: scalar-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.reduce(A, rtype='sum')
......@@ -61,8 +60,7 @@ def reduce(input: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
Case2: vector-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.reduce(A, rtype='sum')
......@@ -113,8 +111,7 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sum(A)
......@@ -126,8 +123,7 @@ def sum(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sum(A)
......@@ -172,8 +168,7 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smax(A)
......@@ -185,8 +180,7 @@ def smax(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smax(A)
......@@ -232,8 +226,7 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smin(A)
......@@ -245,8 +238,7 @@ def smin(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smin(A)
......@@ -296,8 +288,7 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smean(A)
......@@ -309,8 +300,7 @@ def smean(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.smean(A)
......@@ -360,8 +350,7 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
Case1: scalar-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sprod(A)
......@@ -373,8 +362,7 @@ def sprod(input: SparseMatrix, dim: Optional[int] = None):
Case2: vector-valued sparse matrix
>>> indices = torch.tensor([[0, 1, 1],
>>> [0, 0, 2]])
>>> indices = torch.tensor([[0, 1, 1], [0, 0, 2]])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.spmatrix(indices, val, shape=(4, 3))
>>> dglsp.sprod(A)
......
......@@ -39,8 +39,7 @@ def sddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 3, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 3, 3]])
>>> val = torch.arange(1, 4).float()
>>> A = dglsp.spmatrix(indices, val, (3, 4))
>>> X1 = torch.randn(3, 5)
......@@ -89,8 +88,7 @@ def bsddmm(A: SparseMatrix, X1: torch.Tensor, X2: torch.Tensor) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 3, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 3, 3]])
>>> val = torch.arange(1, 4).float()
>>> A = dglsp.spmatrix(indices, val, (3, 4))
>>> X1 = torch.arange(0, 3 * 5 * 2).view(3, 5, 2).float()
......
......@@ -34,8 +34,7 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
Case1: matrix with values of shape (nnz)
>>> indices = torch.tensor([[0, 0, 1, 2],
>>> [1, 2, 2, 0]])
>>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
>>> nnz = len(row)
>>> val = torch.arange(nnz).float()
>>> A = dglsp.spmatrix(indices, val)
......@@ -47,8 +46,7 @@ def softmax(input: SparseMatrix) -> SparseMatrix:
Case2: matrix with values of shape (nnz, D)
>>> indices = torch.tensor([[0, 0, 1, 2],
>>> [1, 2, 2, 0]])
>>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
>>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.softmax(A)
......
......@@ -108,8 +108,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 2, 1],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 2, 1], [2, 4, 3]])
>>> A = from_coo(dst, src)
>>> A.coo()
(tensor([1, 2, 1]), tensor([2, 4, 3]))
......@@ -140,8 +139,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 2, 1],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 2, 1], [2, 4, 3]])
>>> A = from_coo(dst, src)
>>> A.csr()
(tensor([0, 0, 2, 3]), tensor([2, 3, 4]), tensor([0, 2, 1]))
......@@ -172,8 +170,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 2, 1],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 2, 1], [2, 4, 3]])
>>> A = from_coo(dst, src)
>>> A.csc()
(tensor([0, 0, 0, 1, 2, 3]), tensor([1, 1, 2]), tensor([0, 2, 1]))
......@@ -215,8 +212,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 3],
>>> [2, 1, 3]])
>>> indices = torch.tensor([[1, 1, 3], [2, 1, 3]])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.spmatrix(indices, val)
>>> A = A.transpose()
......@@ -248,8 +244,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.to(device='cuda:0', dtype=torch.int32)
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -289,8 +284,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.cuda()
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -312,8 +306,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]]).to("cuda")
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]]).to("cuda")
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.cpu()
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -335,8 +328,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]])
>>> val = torch.ones(len(row)).long()
>>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
>>> A.float()
......@@ -359,8 +351,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.double()
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -382,8 +373,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.int()
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -405,8 +395,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [1, 2, 0]])
>>> indices = torch.tensor([[1, 1, 2], [1, 2, 0]])
>>> A = dglsp.spmatrix(indices, shape=(3, 4))
>>> A.long()
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -436,8 +425,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 0, 0, 1],
>>> [1, 1, 1, 2, 2]])
>>> indices = torch.tensor([[1, 0, 0, 0, 1], [1, 1, 1, 2, 2]])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = dglsp.spmatrix(indices, val)
>>> A.coalesce()
......@@ -453,8 +441,7 @@ class SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 0, 0, 0, 1],
>>> [1, 1, 1, 2, 2]])
>>> indices = torch.tensor([[1, 0, 0, 0, 1], [1, 1, 1, 2, 2]])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = dglsp.spmatrix(indices, val)
>>> A.has_duplicate()
......@@ -512,8 +499,7 @@ def spmatrix(
Case2: Sparse matrix with scalar values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 4, 3]])
>>> val = torch.tensor([[1.], [2.], [3.]])
>>> A = dglsp.spmatrix(indices, val)
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -575,8 +561,7 @@ def from_coo(
Case1: Sparse matrix with row and column indices without values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 4, 3]])
>>> A = dglsp.spmatrix(indices)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
......@@ -591,8 +576,7 @@ def from_coo(
Case2: Sparse matrix with scalar values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 4, 3]])
>>> val = torch.tensor([[1.], [2.], [3.]])
>>> A = dglsp.spmatrix(indices, val)
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -604,8 +588,7 @@ def from_coo(
Case3: Sparse matrix with vector values.
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 4, 3]])
>>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])
>>> A = dglsp.spmatrix(indices, val)
SparseMatrix(indices=tensor([[1, 1, 2],
......@@ -828,8 +811,7 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 2],
>>> [2, 4, 3]])
>>> indices = torch.tensor([[1, 1, 2], [2, 4, 3]])
>>> val = torch.ones(3)
>>> A = dglsp.spmatrix(indices, val)
>>> A = dglsp.val_like(A, torch.tensor([2, 2, 2]))
......
......@@ -14,8 +14,7 @@ def neg(A: SparseMatrix) -> SparseMatrix:
Examples
--------
>>> indices = torch.tensor([[1, 1, 3],
>>> [1, 2, 3]])
>>> indices = torch.tensor([[1, 1, 3], [1, 2, 3]])
>>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.spmatrix(indices, val)
>>> A = -A
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment