"...en/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "aace1f412bc41f521b699a3228f4ec3339160c98"
Unverified Commit 9334421d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Sparse] Rename as_sparse to to_sparse, dense to to_dense. (#5170)



* as_sp_to_sp

* dense

* revert_mock

* test

* revert
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 5f5db2df
...@@ -89,7 +89,7 @@ Attributes and methods ...@@ -89,7 +89,7 @@ Attributes and methods
SparseMatrix.csc SparseMatrix.csc
SparseMatrix.coalesce SparseMatrix.coalesce
SparseMatrix.has_duplicate SparseMatrix.has_duplicate
SparseMatrix.dense SparseMatrix.to_dense
SparseMatrix.to SparseMatrix.to
SparseMatrix.cuda SparseMatrix.cuda
SparseMatrix.cpu SparseMatrix.cpu
...@@ -134,8 +134,8 @@ Attributes and methods ...@@ -134,8 +134,8 @@ Attributes and methods
DiagMatrix.dtype DiagMatrix.dtype
DiagMatrix.device DiagMatrix.device
DiagMatrix.val DiagMatrix.val
DiagMatrix.as_sparse DiagMatrix.to_sparse
DiagMatrix.dense DiagMatrix.to_dense
DiagMatrix.to DiagMatrix.to
DiagMatrix.cuda DiagMatrix.cuda
DiagMatrix.cpu DiagMatrix.cpu
......
...@@ -98,7 +98,7 @@ class DiagMatrix: ...@@ -98,7 +98,7 @@ class DiagMatrix:
""" """
return self.val.device return self.val.device
def as_sparse(self) -> SparseMatrix: def to_sparse(self) -> SparseMatrix:
"""Convert the diagonal matrix into a sparse matrix object """Convert the diagonal matrix into a sparse matrix object
Returns Returns
...@@ -112,7 +112,7 @@ class DiagMatrix: ...@@ -112,7 +112,7 @@ class DiagMatrix:
>>> import torch >>> import torch
>>> val = torch.ones(5) >>> val = torch.ones(5)
>>> mat = diag(val) >>> mat = diag(val)
>>> sp_mat = mat.as_sparse() >>> sp_mat = mat.to_sparse()
>>> print(sp_mat) >>> print(sp_mat)
SparseMatrix(indices=tensor([[0, 1, 2, 3, 4], SparseMatrix(indices=tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]), [0, 1, 2, 3, 4]]),
...@@ -122,7 +122,7 @@ class DiagMatrix: ...@@ -122,7 +122,7 @@ class DiagMatrix:
row = col = torch.arange(len(self.val)).to(self.device) row = col = torch.arange(len(self.val)).to(self.device)
return from_coo(row=row, col=col, val=self.val, shape=self.shape) return from_coo(row=row, col=col, val=self.val, shape=self.shape)
def dense(self) -> torch.Tensor: def to_dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix. """Return a dense representation of the matrix.
Returns Returns
......
...@@ -42,7 +42,7 @@ def diag_add( ...@@ -42,7 +42,7 @@ def diag_add(
"The shape of diagonal matrix D1 " "The shape of diagonal matrix D1 "
f"{D1.shape} and sparse matrix D2 {D2.shape} must match." f"{D1.shape} and sparse matrix D2 {D2.shape} must match."
) )
D1 = D1.as_sparse() D1 = D1.to_sparse()
return D1 + D2 return D1 + D2
# Python falls back to D2.__radd__(D1) then TypeError when NotImplemented # Python falls back to D2.__radd__(D1) then TypeError when NotImplemented
# is returned. # is returned.
......
...@@ -49,7 +49,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor: ...@@ -49,7 +49,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
# The input is a DiagMatrix. Cast it to SparseMatrix # The input is a DiagMatrix. Cast it to SparseMatrix
if not isinstance(A, SparseMatrix): if not isinstance(A, SparseMatrix):
A = A.as_sparse() A = A.to_sparse()
return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X) return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X)
......
...@@ -126,7 +126,7 @@ class SparseMatrix: ...@@ -126,7 +126,7 @@ class SparseMatrix:
""" """
return self.c_sparse_matrix.csc() return self.c_sparse_matrix.csc()
def dense(self) -> torch.Tensor: def to_dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix. """Return a dense representation of the matrix.
Returns Returns
......
...@@ -36,7 +36,7 @@ def test_diag(val_shape, mat_shape): ...@@ -36,7 +36,7 @@ def test_diag(val_shape, mat_shape):
assert mat.device == val.device assert mat.device == val.device
# as_sparse # as_sparse
sp_mat = mat.as_sparse() sp_mat = mat.to_sparse()
# shape # shape
assert tuple(sp_mat.shape) == mat_shape assert tuple(sp_mat.shape) == mat_shape
# nnz # nnz
......
...@@ -25,9 +25,9 @@ def test_add_coo(val_shape): ...@@ -25,9 +25,9 @@ def test_add_coo(val_shape):
val = torch.randn(row.shape + val_shape).to(ctx) val = torch.randn(row.shape + val_shape).to(ctx)
B = from_coo(row, col, val, shape=A.shape) B = from_coo(row, col, val, shape=A.shape)
sum1 = (A + B).dense() sum1 = (A + B).to_dense()
sum2 = add(A, B).dense() sum2 = add(A, B).to_dense()
dense_sum = A.dense() + B.dense() dense_sum = A.to_dense() + B.to_dense()
assert torch.allclose(dense_sum, sum1) assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2) assert torch.allclose(dense_sum, sum2)
...@@ -51,9 +51,9 @@ def test_add_csr(val_shape): ...@@ -51,9 +51,9 @@ def test_add_csr(val_shape):
val = torch.randn(indices.shape + val_shape).to(ctx) val = torch.randn(indices.shape + val_shape).to(ctx)
B = from_csr(indptr, indices, val, shape=A.shape) B = from_csr(indptr, indices, val, shape=A.shape)
sum1 = (A + B).dense() sum1 = (A + B).to_dense()
sum2 = add(A, B).dense() sum2 = add(A, B).to_dense()
dense_sum = A.dense() + B.dense() dense_sum = A.to_dense() + B.to_dense()
assert torch.allclose(dense_sum, sum1) assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2) assert torch.allclose(dense_sum, sum2)
...@@ -77,9 +77,9 @@ def test_add_csc(val_shape): ...@@ -77,9 +77,9 @@ def test_add_csc(val_shape):
val = torch.randn(indices.shape + val_shape).to(ctx) val = torch.randn(indices.shape + val_shape).to(ctx)
B = from_csc(indptr, indices, val, shape=A.shape) B = from_csc(indptr, indices, val, shape=A.shape)
sum1 = (A + B).dense() sum1 = (A + B).to_dense()
sum2 = add(A, B).dense() sum2 = add(A, B).to_dense()
dense_sum = A.dense() + B.dense() dense_sum = A.to_dense() + B.to_dense()
assert torch.allclose(dense_sum, sum1) assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2) assert torch.allclose(dense_sum, sum2)
...@@ -98,9 +98,9 @@ def test_add_diag(val_shape): ...@@ -98,9 +98,9 @@ def test_add_diag(val_shape):
D1 = diag(torch.randn(val_shape).to(ctx), shape=shape) D1 = diag(torch.randn(val_shape).to(ctx), shape=shape)
D2 = diag(torch.randn(val_shape).to(ctx), shape=shape) D2 = diag(torch.randn(val_shape).to(ctx), shape=shape)
sum1 = (D1 + D2).dense() sum1 = (D1 + D2).to_dense()
sum2 = add(D1, D2).dense() sum2 = add(D1, D2).to_dense()
dense_sum = D1.dense() + D2.dense() dense_sum = D1.to_dense() + D2.to_dense()
assert torch.allclose(dense_sum, sum1) assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2) assert torch.allclose(dense_sum, sum2)
...@@ -118,11 +118,11 @@ def test_add_sparse_diag(val_shape): ...@@ -118,11 +118,11 @@ def test_add_sparse_diag(val_shape):
val_shape = (shape[0],) + val_shape val_shape = (shape[0],) + val_shape
D = diag(torch.randn(val_shape).to(ctx), shape=shape) D = diag(torch.randn(val_shape).to(ctx), shape=shape)
sum1 = (A + D).dense() sum1 = (A + D).to_dense()
sum2 = (D + A).dense() sum2 = (D + A).to_dense()
sum3 = add(A, D).dense() sum3 = add(A, D).to_dense()
sum4 = add(D, A).dense() sum4 = add(D, A).to_dense()
dense_sum = A.dense() + D.dense() dense_sum = A.to_dense() + D.to_dense()
assert torch.allclose(dense_sum, sum1) assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2) assert torch.allclose(dense_sum, sum2)
......
...@@ -65,7 +65,7 @@ def test_bspmm(create_func, shape, nnz): ...@@ -65,7 +65,7 @@ def test_bspmm(create_func, shape, nnz):
sparse_result.backward(grad) sparse_result.backward(grad)
XX = clone_detach_and_grad(X) XX = clone_detach_and_grad(X)
torch_A = A.dense().clone().detach().requires_grad_() torch_A = A.to_dense().clone().detach().requires_grad_()
torch_result = torch_A.permute(2, 0, 1) @ XX.permute(2, 0, 1) torch_result = torch_A.permute(2, 0, 1) @ XX.permute(2, 0, 1)
torch_result.backward(grad.permute(2, 0, 1)) torch_result.backward(grad.permute(2, 0, 1))
...@@ -103,14 +103,14 @@ def test_spspmm(create_func1, create_func2, shape_n_m, shape_k, nnz1, nnz2): ...@@ -103,14 +103,14 @@ def test_spspmm(create_func1, create_func2, shape_n_m, shape_k, nnz1, nnz2):
torch_A3.backward(torch_A3_grad) torch_A3.backward(torch_A3_grad)
with torch.no_grad(): with torch.no_grad():
assert torch.allclose(A3.dense(), torch_A3.to_dense(), atol=1e-05) assert torch.allclose(A3.to_dense(), torch_A3.to_dense(), atol=1e-05)
assert torch.allclose( assert torch.allclose(
val_like(A1, A1.val.grad).dense(), val_like(A1, A1.val.grad).to_dense(),
torch_A1.grad.to_dense(), torch_A1.grad.to_dense(),
atol=1e-05, atol=1e-05,
) )
assert torch.allclose( assert torch.allclose(
val_like(A2, A2.val.grad).dense(), val_like(A2, A2.val.grad).to_dense(),
torch_A2.grad.to_dense(), torch_A2.grad.to_dense(),
atol=1e-05, atol=1e-05,
) )
...@@ -161,20 +161,20 @@ def test_sparse_diag_mm(create_func, sparse_shape, nnz): ...@@ -161,20 +161,20 @@ def test_sparse_diag_mm(create_func, sparse_shape, nnz):
B.val.backward(grad) B.val.backward(grad)
torch_A = sparse_matrix_to_torch_sparse(A) torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D.as_sparse()) torch_D = sparse_matrix_to_torch_sparse(D.to_sparse())
torch_B = torch.sparse.mm(torch_A, torch_D) torch_B = torch.sparse.mm(torch_A, torch_D)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad) torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad) torch_B.backward(torch_B_grad)
with torch.no_grad(): with torch.no_grad():
assert torch.allclose(B.dense(), torch_B.to_dense(), atol=1e-05) assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose( assert torch.allclose(
val_like(A, A.val.grad).dense(), val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(), torch_A.grad.to_dense(),
atol=1e-05, atol=1e-05,
) )
assert torch.allclose( assert torch.allclose(
diag(D.val.grad, D.shape).dense(), diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(), torch_D.grad.to_dense(),
atol=1e-05, atol=1e-05,
) )
...@@ -195,20 +195,20 @@ def test_diag_sparse_mm(create_func, sparse_shape, nnz): ...@@ -195,20 +195,20 @@ def test_diag_sparse_mm(create_func, sparse_shape, nnz):
B.val.backward(grad) B.val.backward(grad)
torch_A = sparse_matrix_to_torch_sparse(A) torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D.as_sparse()) torch_D = sparse_matrix_to_torch_sparse(D.to_sparse())
torch_B = torch.sparse.mm(torch_D, torch_A) torch_B = torch.sparse.mm(torch_D, torch_A)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad) torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad) torch_B.backward(torch_B_grad)
with torch.no_grad(): with torch.no_grad():
assert torch.allclose(B.dense(), torch_B.to_dense(), atol=1e-05) assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose( assert torch.allclose(
val_like(A, A.val.grad).dense(), val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(), torch_A.grad.to_dense(),
atol=1e-05, atol=1e-05,
) )
assert torch.allclose( assert torch.allclose(
diag(D.val.grad, D.shape).dense(), diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(), torch_D.grad.to_dense(),
atol=1e-05, atol=1e-05,
) )
...@@ -104,7 +104,7 @@ def test_dense(val_shape): ...@@ -104,7 +104,7 @@ def test_dense(val_shape):
col = torch.tensor([2, 4, 3]).to(ctx) col = torch.tensor([2, 4, 3]).to(ctx)
val = torch.randn(val_shape).to(ctx) val = torch.randn(val_shape).to(ctx)
A = from_coo(row, col, val) A = from_coo(row, col, val)
A_dense = A.dense() A_dense = A.to_dense()
shape = A.shape + val.shape[1:] shape = A.shape + val.shape[1:]
mat = torch.zeros(shape, device=ctx) mat = torch.zeros(shape, device=ctx)
......
...@@ -101,7 +101,7 @@ def rand_csc_uncoalesced(shape, nnz, dev): ...@@ -101,7 +101,7 @@ def rand_csc_uncoalesced(shape, nnz, dev):
def sparse_matrix_to_dense(A: SparseMatrix): def sparse_matrix_to_dense(A: SparseMatrix):
dense = A.dense() dense = A.to_dense()
return clone_detach_and_grad(dense) return clone_detach_and_grad(dense)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment