Unverified Commit 9334421d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Sparse] Rename as_sparse to to_sparse, dense to to_dense. (#5170)



* as_sp_to_sp

* dense

* revert_mock

* test

* revert
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 5f5db2df
......@@ -89,7 +89,7 @@ Attributes and methods
SparseMatrix.csc
SparseMatrix.coalesce
SparseMatrix.has_duplicate
SparseMatrix.dense
SparseMatrix.to_dense
SparseMatrix.to
SparseMatrix.cuda
SparseMatrix.cpu
......@@ -134,8 +134,8 @@ Attributes and methods
DiagMatrix.dtype
DiagMatrix.device
DiagMatrix.val
DiagMatrix.as_sparse
DiagMatrix.dense
DiagMatrix.to_sparse
DiagMatrix.to_dense
DiagMatrix.to
DiagMatrix.cuda
DiagMatrix.cpu
......
......@@ -98,7 +98,7 @@ class DiagMatrix:
"""
return self.val.device
def as_sparse(self) -> SparseMatrix:
def to_sparse(self) -> SparseMatrix:
"""Convert the diagonal matrix into a sparse matrix object
Returns
......@@ -112,7 +112,7 @@ class DiagMatrix:
>>> import torch
>>> val = torch.ones(5)
>>> mat = diag(val)
>>> sp_mat = mat.as_sparse()
>>> sp_mat = mat.to_sparse()
>>> print(sp_mat)
SparseMatrix(indices=tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]),
......@@ -122,7 +122,7 @@ class DiagMatrix:
row = col = torch.arange(len(self.val)).to(self.device)
return from_coo(row=row, col=col, val=self.val, shape=self.shape)
def dense(self) -> torch.Tensor:
def to_dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix.
Returns
......
......@@ -42,7 +42,7 @@ def diag_add(
"The shape of diagonal matrix D1 "
f"{D1.shape} and sparse matrix D2 {D2.shape} must match."
)
D1 = D1.as_sparse()
D1 = D1.to_sparse()
return D1 + D2
# Python falls back to D2.__radd__(D1) then TypeError when NotImplemented
# is returned.
......
......@@ -49,7 +49,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
# The input is a DiagMatrix. Cast it to SparseMatrix
if not isinstance(A, SparseMatrix):
A = A.as_sparse()
A = A.to_sparse()
return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X)
......
......@@ -126,7 +126,7 @@ class SparseMatrix:
"""
return self.c_sparse_matrix.csc()
def dense(self) -> torch.Tensor:
def to_dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix.
Returns
......
......@@ -36,7 +36,7 @@ def test_diag(val_shape, mat_shape):
assert mat.device == val.device
# as_sparse
sp_mat = mat.as_sparse()
sp_mat = mat.to_sparse()
# shape
assert tuple(sp_mat.shape) == mat_shape
# nnz
......
......@@ -25,9 +25,9 @@ def test_add_coo(val_shape):
val = torch.randn(row.shape + val_shape).to(ctx)
B = from_coo(row, col, val, shape=A.shape)
sum1 = (A + B).dense()
sum2 = add(A, B).dense()
dense_sum = A.dense() + B.dense()
sum1 = (A + B).to_dense()
sum2 = add(A, B).to_dense()
dense_sum = A.to_dense() + B.to_dense()
assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
......@@ -51,9 +51,9 @@ def test_add_csr(val_shape):
val = torch.randn(indices.shape + val_shape).to(ctx)
B = from_csr(indptr, indices, val, shape=A.shape)
sum1 = (A + B).dense()
sum2 = add(A, B).dense()
dense_sum = A.dense() + B.dense()
sum1 = (A + B).to_dense()
sum2 = add(A, B).to_dense()
dense_sum = A.to_dense() + B.to_dense()
assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
......@@ -77,9 +77,9 @@ def test_add_csc(val_shape):
val = torch.randn(indices.shape + val_shape).to(ctx)
B = from_csc(indptr, indices, val, shape=A.shape)
sum1 = (A + B).dense()
sum2 = add(A, B).dense()
dense_sum = A.dense() + B.dense()
sum1 = (A + B).to_dense()
sum2 = add(A, B).to_dense()
dense_sum = A.to_dense() + B.to_dense()
assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
......@@ -98,9 +98,9 @@ def test_add_diag(val_shape):
D1 = diag(torch.randn(val_shape).to(ctx), shape=shape)
D2 = diag(torch.randn(val_shape).to(ctx), shape=shape)
sum1 = (D1 + D2).dense()
sum2 = add(D1, D2).dense()
dense_sum = D1.dense() + D2.dense()
sum1 = (D1 + D2).to_dense()
sum2 = add(D1, D2).to_dense()
dense_sum = D1.to_dense() + D2.to_dense()
assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
......@@ -118,11 +118,11 @@ def test_add_sparse_diag(val_shape):
val_shape = (shape[0],) + val_shape
D = diag(torch.randn(val_shape).to(ctx), shape=shape)
sum1 = (A + D).dense()
sum2 = (D + A).dense()
sum3 = add(A, D).dense()
sum4 = add(D, A).dense()
dense_sum = A.dense() + D.dense()
sum1 = (A + D).to_dense()
sum2 = (D + A).to_dense()
sum3 = add(A, D).to_dense()
sum4 = add(D, A).to_dense()
dense_sum = A.to_dense() + D.to_dense()
assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
......
......@@ -65,7 +65,7 @@ def test_bspmm(create_func, shape, nnz):
sparse_result.backward(grad)
XX = clone_detach_and_grad(X)
torch_A = A.dense().clone().detach().requires_grad_()
torch_A = A.to_dense().clone().detach().requires_grad_()
torch_result = torch_A.permute(2, 0, 1) @ XX.permute(2, 0, 1)
torch_result.backward(grad.permute(2, 0, 1))
......@@ -103,14 +103,14 @@ def test_spspmm(create_func1, create_func2, shape_n_m, shape_k, nnz1, nnz2):
torch_A3.backward(torch_A3_grad)
with torch.no_grad():
assert torch.allclose(A3.dense(), torch_A3.to_dense(), atol=1e-05)
assert torch.allclose(A3.to_dense(), torch_A3.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A1, A1.val.grad).dense(),
val_like(A1, A1.val.grad).to_dense(),
torch_A1.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
val_like(A2, A2.val.grad).dense(),
val_like(A2, A2.val.grad).to_dense(),
torch_A2.grad.to_dense(),
atol=1e-05,
)
......@@ -161,20 +161,20 @@ def test_sparse_diag_mm(create_func, sparse_shape, nnz):
B.val.backward(grad)
torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D.as_sparse())
torch_D = sparse_matrix_to_torch_sparse(D.to_sparse())
torch_B = torch.sparse.mm(torch_A, torch_D)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad)
with torch.no_grad():
assert torch.allclose(B.dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A, A.val.grad).dense(),
val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
diag(D.val.grad, D.shape).dense(),
diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(),
atol=1e-05,
)
......@@ -195,20 +195,20 @@ def test_diag_sparse_mm(create_func, sparse_shape, nnz):
B.val.backward(grad)
torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D.as_sparse())
torch_D = sparse_matrix_to_torch_sparse(D.to_sparse())
torch_B = torch.sparse.mm(torch_D, torch_A)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad)
with torch.no_grad():
assert torch.allclose(B.dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A, A.val.grad).dense(),
val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
diag(D.val.grad, D.shape).dense(),
diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(),
atol=1e-05,
)
......@@ -104,7 +104,7 @@ def test_dense(val_shape):
col = torch.tensor([2, 4, 3]).to(ctx)
val = torch.randn(val_shape).to(ctx)
A = from_coo(row, col, val)
A_dense = A.dense()
A_dense = A.to_dense()
shape = A.shape + val.shape[1:]
mat = torch.zeros(shape, device=ctx)
......
......@@ -101,7 +101,7 @@ def rand_csc_uncoalesced(shape, nnz, dev):
def sparse_matrix_to_dense(A: SparseMatrix):
dense = A.dense()
dense = A.to_dense()
return clone_detach_and_grad(dense)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment