Unverified Commit 9169fc35 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Sparse] Polish sparse matrix creation functions (#5093)



* update

* Update

* Update

* Update

* Update
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-36-188.ap-northeast-1.compute.internal>
parent 0698e91a
......@@ -3,7 +3,7 @@ from typing import Optional, Tuple
import torch
from .sparse_matrix import create_from_coo, SparseMatrix
from .sparse_matrix import from_coo, SparseMatrix
class DiagMatrix:
......@@ -119,7 +119,7 @@ class DiagMatrix:
shape=(5, 5), nnz=5)
"""
row = col = torch.arange(len(self.val)).to(self.device)
return create_from_coo(row=row, col=col, val=self.val, shape=self.shape)
return from_coo(row=row, col=col, val=self.val, shape=self.shape)
def dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix.
......
......@@ -30,7 +30,7 @@ def add(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 1, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> B = diag(torch.arange(1, 4))
>>> A + B
SparseMatrix(indices=tensor([[0, 0, 1, 1, 2],
......@@ -65,7 +65,7 @@ def power(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> power(A, 2)
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......
......@@ -37,7 +37,7 @@ def sp_add(A: SparseMatrix, B: Union[DiagMatrix, SparseMatrix]) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = create_from_coo(row, col, val, shape=(3, 4))
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A + A
SparseMatrix(indices=tensor([[0, 1, 2],
[3, 0, 2]]),
......@@ -77,7 +77,7 @@ def sp_mul(
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([1, 2, 3])
>>> A = create_from_coo(row, col, val, shape=(3, 4))
>>> A = from_coo(row, col, val, shape=(3, 4))
>>> A * 2
SparseMatrix(indices=tensor([[1, 0, 2],
......@@ -122,7 +122,7 @@ def sp_power(A: SparseMatrix, scalar: Union[float, int]) -> SparseMatrix:
>>> row = torch.tensor([1, 0, 2])
>>> col = torch.tensor([0, 3, 2])
>>> val = torch.tensor([10, 20, 30])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> A ** 2
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
......
......@@ -32,7 +32,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 1])
>>> val = torch.randn(len(row))
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> X = torch.randn(2, 3)
>>> result = dgl.sparse.spmm(A, X)
>>> print(type(result))
......@@ -74,7 +74,7 @@ def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([1, 0, 2])
>>> val = torch.randn(len(row), 2)
>>> A = create_from_coo(row, col, val, shape=(3, 3))
>>> A = from_coo(row, col, val, shape=(3, 3))
>>> X = torch.randn(3, 3, 2)
>>> result = dgl.sparse.bspmm(A, X)
>>> print(type(result))
......@@ -142,12 +142,12 @@ def spspmm(
>>> row1 = torch.tensor([0, 1, 1])
>>> col1 = torch.tensor([1, 0, 1])
>>> val1 = torch.ones(len(row1))
>>> A1 = create_from_coo(row1, col1, val1)
>>> A1 = from_coo(row1, col1, val1)
>>> row2 = torch.tensor([0, 1, 1])
>>> col2 = torch.tensor([0, 2, 1])
>>> val2 = torch.ones(len(row2))
>>> A2 = create_from_coo(row2, col2, val2)
>>> A2 = from_coo(row2, col2, val2)
>>> result = dgl.sparse.spspmm(A1, A2)
>>> print(result)
SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],
......
......@@ -42,7 +42,7 @@ def reduce(A: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.reduce(rtype='sum'))
tensor(4)
>>> print(A.reduce(0, 'sum'))
......@@ -57,7 +57,7 @@ def reduce(A: SparseMatrix, dim: Optional[int] = None, rtype: str = "sum"):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.reduce(rtype='sum'))
tensor([5., 5.])
>>> print(A.reduce(0, 'sum'))
......@@ -105,7 +105,7 @@ def sum(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.sum())
tensor(4)
>>> print(A.sum(0))
......@@ -116,7 +116,7 @@ def sum(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.sum())
tensor([5, 5])
>>> print(A.sum(0))
......@@ -157,7 +157,7 @@ def smax(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.smax())
tensor(2)
>>> print(A.smax(0))
......@@ -168,7 +168,7 @@ def smax(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.smax())
tensor([2, 2])
>>> print(A.smax(1))
......@@ -210,7 +210,7 @@ def smin(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.smin())
tensor(1)
>>> print(A.smin(0))
......@@ -221,7 +221,7 @@ def smin(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.smin())
tensor([1, 1])
>>> print(A.smin(0))
......@@ -267,7 +267,7 @@ def smean(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1., 1., 2.])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.smean())
tensor(1.3333)
>>> print(A.smean(0))
......@@ -278,7 +278,7 @@ def smean(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1., 2.], [2., 1.], [2., 2.]])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.smean())
tensor([1.6667, 1.6667])
>>> print(A.smean(0))
......@@ -324,7 +324,7 @@ def sprod(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([1, 1, 2])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.sprod())
tensor(2)
>>> print(A.sprod(0))
......@@ -335,7 +335,7 @@ def sprod(A: SparseMatrix, dim: Optional[int] = None):
>>> row = torch.tensor([0, 1, 1])
>>> col = torch.tensor([0, 0, 2])
>>> val = torch.tensor([[1, 2], [2, 1], [2, 2]])
>>> A = dglsp.create_from_coo(row, col, val, shape=(4, 3))
>>> A = dglsp.from_coo(row, col, val, shape=(4, 3))
>>> print(A.sprod())
tensor([4, 4])
>>> print(A.sprod(0))
......
......@@ -44,7 +44,7 @@ def sddmm(
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3])
>>> val = torch.arange(1, 4).float()
>>> A = create_from_coo(row, col, val, (3, 4))
>>> A = from_coo(row, col, val, (3, 4))
>>> mat1 = torch.randn(3, 5)
>>> mat2 = torch.randn(5, 4)
>>> dgl.sparse.sddmm(A, mat1, mat2)
......@@ -96,7 +96,7 @@ def bsddmm(
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 3, 3])
>>> val = torch.arange(1, 4).float()
>>> A = create_from_coo(row, col, val, (3, 4))
>>> A = from_coo(row, col, val, (3, 4))
>>> mat1 = torch.arange(0, 3 * 5 * 2).view(3, 5, 2).float()
>>> mat2 = torch.arange(0, 5 * 4 * 2).view(5, 4, 2).float()
>>> dgl.sparse.bsddmm(A, mat1, mat2)
......
......@@ -35,7 +35,7 @@ def softmax(A: SparseMatrix) -> SparseMatrix:
>>> col = torch.tensor([1, 2, 2, 0])
>>> nnz = len(row)
>>> val = torch.arange(nnz).float()
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2],
[1, 2, 2, 0]]),
......@@ -45,7 +45,7 @@ def softmax(A: SparseMatrix) -> SparseMatrix:
Case2: matrix with values of shape (nnz, D)
>>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2],
[1, 2, 2, 0]]),
......
......@@ -184,7 +184,7 @@ class SparseMatrix:
>>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([2, 1, 3])
>>> val = torch.tensor([1, 1, 2])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> A = A.transpose()
>>> print(A)
SparseMatrix(indices=tensor([[2, 1, 3],
......@@ -217,7 +217,7 @@ class SparseMatrix:
>>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> A = A.coalesce()
>>> print(A)
SparseMatrix(indices=tensor([[0, 0, 1, 1],
......@@ -240,7 +240,7 @@ class SparseMatrix:
>>> row = torch.tensor([1, 0, 0, 0, 1])
>>> col = torch.tensor([1, 1, 1, 2, 2])
>>> val = torch.tensor([0, 1, 2, 3, 4])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> print(A.has_duplicate())
True
>>> print(A.coalesce().has_duplicate())
......@@ -249,7 +249,7 @@ class SparseMatrix:
return self.c_sparse_matrix.has_duplicate()
def create_from_coo(
def from_coo(
row: torch.Tensor,
col: torch.Tensor,
val: Optional[torch.Tensor] = None,
......@@ -283,14 +283,14 @@ def create_from_coo(
>>> dst = torch.tensor([1, 1, 2])
>>> src = torch.tensor([2, 4, 3])
>>> A = create_from_coo(dst, src)
>>> A = from_coo(dst, src)
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([1., 1., 1.]),
shape=(3, 5), nnz=3)
>>> # Specify shape
>>> A = create_from_coo(dst, src, shape=(5, 5))
>>> A = from_coo(dst, src, shape=(5, 5))
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
......@@ -301,7 +301,7 @@ def create_from_coo(
vector data.
>>> val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])
>>> A = create_from_coo(dst, src, val)
>>> A = from_coo(dst, src, val)
SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]),
values=tensor([[1., 1.],
......@@ -314,12 +314,10 @@ def create_from_coo(
if val is None:
val = torch.ones(row.shape[0]).to(row.device)
return SparseMatrix(
torch.ops.dgl_sparse.create_from_coo(row, col, val, shape)
)
return SparseMatrix(torch.ops.dgl_sparse.from_coo(row, col, val, shape))
def create_from_csr(
def from_csr(
indptr: torch.Tensor,
indices: torch.Tensor,
val: Optional[torch.Tensor] = None,
......@@ -364,14 +362,14 @@ def create_from_csr(
>>> indptr = torch.tensor([0, 1, 2, 5])
>>> indices = torch.tensor([1, 2, 0, 1, 2])
>>> A = create_from_csr(indptr, indices)
>>> A = from_csr(indptr, indices)
>>> print(A)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 3), nnz=5)
>>> # Specify shape
>>> A = create_from_csr(indptr, indices, shape=(3, 5))
>>> A = from_csr(indptr, indices, shape=(3, 5))
>>> print(A)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]),
......@@ -382,7 +380,7 @@ def create_from_csr(
vector data.
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = create_from_csr(indptr, indices, val)
>>> A = from_csr(indptr, indices, val)
>>> print(A)
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]),
......@@ -399,11 +397,11 @@ def create_from_csr(
val = torch.ones(indices.shape[0]).to(indptr.device)
return SparseMatrix(
torch.ops.dgl_sparse.create_from_csr(indptr, indices, val, shape)
torch.ops.dgl_sparse.from_csr(indptr, indices, val, shape)
)
def create_from_csc(
def from_csc(
indptr: torch.Tensor,
indices: torch.Tensor,
val: Optional[torch.Tensor] = None,
......@@ -448,14 +446,14 @@ def create_from_csc(
>>> indptr = torch.tensor([0, 1, 3, 5])
>>> indices = torch.tensor([2, 0, 2, 1, 2])
>>> A = create_from_csc(indptr, indices)
>>> A = from_csc(indptr, indices)
>>> print(A)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]),
values=tensor([1., 1., 1., 1., 1.]),
shape=(3, 3), nnz=5)
>>> # Specify shape
>>> A = create_from_csc(indptr, indices, shape=(5, 3))
>>> A = from_csc(indptr, indices, shape=(5, 3))
>>> print(A)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]),
......@@ -466,7 +464,7 @@ def create_from_csc(
vector data.
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = create_from_csc(indptr, indices, val)
>>> A = from_csc(indptr, indices, val)
>>> print(A)
SparseMatrix(indices=tensor([[2, 0, 2, 1, 2],
[0, 1, 1, 2, 2]]),
......@@ -483,7 +481,7 @@ def create_from_csc(
val = torch.ones(indices.shape[0]).to(indptr.device)
return SparseMatrix(
torch.ops.dgl_sparse.create_from_csc(indptr, indices, val, shape)
torch.ops.dgl_sparse.from_csc(indptr, indices, val, shape)
)
......@@ -511,7 +509,7 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 4, 3])
>>> val = torch.ones(3)
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> B = val_like(A, torch.tensor([2, 2, 2]))
>>> print(B)
SparseMatrix(indices=tensor([[1, 1, 2],
......
......@@ -17,7 +17,7 @@ def neg(A: SparseMatrix) -> SparseMatrix:
>>> row = torch.tensor([1, 1, 3])
>>> col = torch.tensor([1, 2, 3])
>>> val = torch.tensor([1., 1., 2.])
>>> A = create_from_coo(row, col, val)
>>> A = from_coo(row, col, val)
>>> A = -A
>>> print(A)
SparseMatrix(indices=tensor([[1, 1, 3],
......
......@@ -4,13 +4,7 @@ import backend as F
import pytest
import torch
from dgl.sparse import (
add,
create_from_coo,
create_from_csc,
create_from_csr,
diag,
)
from dgl.sparse import add, diag, from_coo, from_csc, from_csr
# TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"):
......@@ -23,12 +17,12 @@ def test_add_coo(val_shape):
row = torch.tensor([1, 0, 2]).to(ctx)
col = torch.tensor([0, 3, 2]).to(ctx)
val = torch.randn(row.shape + val_shape).to(ctx)
A = create_from_coo(row, col, val)
A = from_coo(row, col, val)
row = torch.tensor([1, 0]).to(ctx)
col = torch.tensor([0, 2]).to(ctx)
val = torch.randn(row.shape + val_shape).to(ctx)
B = create_from_coo(row, col, val, shape=A.shape)
B = from_coo(row, col, val, shape=A.shape)
sum1 = (A + B).dense()
sum2 = add(A, B).dense()
......@@ -44,12 +38,12 @@ def test_add_csr(val_shape):
indptr = torch.tensor([0, 1, 2, 3]).to(ctx)
indices = torch.tensor([3, 0, 2]).to(ctx)
val = torch.randn(indices.shape + val_shape).to(ctx)
A = create_from_csr(indptr, indices, val)
A = from_csr(indptr, indices, val)
indptr = torch.tensor([0, 1, 2, 2]).to(ctx)
indices = torch.tensor([2, 0]).to(ctx)
val = torch.randn(indices.shape + val_shape).to(ctx)
B = create_from_csr(indptr, indices, val, shape=A.shape)
B = from_csr(indptr, indices, val, shape=A.shape)
sum1 = (A + B).dense()
sum2 = add(A, B).dense()
......@@ -65,12 +59,12 @@ def test_add_csc(val_shape):
indptr = torch.tensor([0, 1, 1, 2, 3]).to(ctx)
indices = torch.tensor([1, 2, 0]).to(ctx)
val = torch.randn(indices.shape + val_shape).to(ctx)
A = create_from_csc(indptr, indices, val)
A = from_csc(indptr, indices, val)
indptr = torch.tensor([0, 1, 1, 2, 2]).to(ctx)
indices = torch.tensor([1, 0]).to(ctx)
val = torch.randn(indices.shape + val_shape).to(ctx)
B = create_from_csc(indptr, indices, val, shape=A.shape)
B = from_csc(indptr, indices, val, shape=A.shape)
sum1 = (A + B).dense()
sum2 = add(A, B).dense()
......@@ -102,7 +96,7 @@ def test_add_sparse_diag(val_shape):
row = torch.tensor([1, 0, 2]).to(ctx)
col = torch.tensor([0, 3, 2]).to(ctx)
val = torch.randn(row.shape + val_shape).to(ctx)
A = create_from_coo(row, col, val)
A = from_coo(row, col, val)
shape = (3, 4)
val_shape = (shape[0],) + val_shape
......
......@@ -4,7 +4,7 @@ import backend as F
import pytest
import torch
from dgl.sparse import create_from_coo, power
from dgl.sparse import from_coo, power
# TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"):
......@@ -26,7 +26,7 @@ def test_mul_scalar(v_scalar):
row = torch.tensor([1, 0, 2]).to(ctx)
col = torch.tensor([0, 3, 2]).to(ctx)
val = torch.randn(len(row)).to(ctx)
A1 = create_from_coo(row, col, val, shape=(3, 4))
A1 = from_coo(row, col, val, shape=(3, 4))
# A * v
A2 = A1 * v_scalar
......@@ -46,7 +46,7 @@ def test_pow(val_shape):
row = torch.tensor([1, 0, 2]).to(ctx)
col = torch.tensor([0, 3, 2]).to(ctx)
val = torch.randn(val_shape).to(ctx)
A = create_from_coo(row, col, val, shape=(3, 4))
A = from_coo(row, col, val, shape=(3, 4))
exponent = 2
A_new = A**exponent
assert torch.allclose(A_new.val, val**exponent)
......
......@@ -4,7 +4,7 @@ import backend as F
import pytest
import torch
from dgl.sparse import bspmm, create_from_coo, val_like
from dgl.sparse import bspmm, from_coo, val_like
from .utils import (
clone_detach_and_grad,
......@@ -123,13 +123,13 @@ def test_spspmm_duplicate():
col = torch.tensor([1, 1, 1, 2, 2]).to(dev)
val = torch.randn(len(row)).to(dev)
shape = (4, 4)
A1 = create_from_coo(row, col, val, shape)
A1 = from_coo(row, col, val, shape)
row = torch.tensor([1, 0, 0, 1]).to(dev)
col = torch.tensor([1, 1, 2, 2]).to(dev)
val = torch.randn(len(row)).to(dev)
shape = (4, 4)
A2 = create_from_coo(row, col, val, shape)
A2 = from_coo(row, col, val, shape)
try:
A1 @ A2
......
......@@ -93,7 +93,7 @@ def test_reduce_all(shape, op, use_reduce):
val2 = val.clone()
val = val.requires_grad_()
val2 = val2.requires_grad_()
A = dglsp.create_from_coo(row, col, val, shape=(NUM_ROWS, NUM_COLS))
A = dglsp.from_coo(row, col, val, shape=(NUM_ROWS, NUM_COLS))
A2, M = _coalesce_dense(row, col, val2, NUM_ROWS, NUM_COLS, op)
......@@ -139,7 +139,7 @@ def test_reduce_along(shape, dim, empty_nnz, op, use_reduce):
row[row == 0] = 1
col[col == 0] = 1
A = dglsp.create_from_coo(row, col, val, shape=(NUM_ROWS, NUM_COLS))
A = dglsp.from_coo(row, col, val, shape=(NUM_ROWS, NUM_COLS))
A2, M = _coalesce_dense(row, col, val2, NUM_ROWS, NUM_COLS, op)
......
......@@ -6,7 +6,7 @@ import dgl
import pytest
import torch
from dgl.sparse import create_from_coo, softmax
from dgl.sparse import from_coo, softmax
# TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"):
......@@ -26,7 +26,7 @@ def test_softmax(val_D, csr):
val = torch.randn(nnz, val_D).to(dev)
val_sparse = val.clone().requires_grad_()
A = create_from_coo(row, col, val_sparse)
A = from_coo(row, col, val_sparse)
if csr:
# Test CSR
......
......@@ -4,12 +4,7 @@ import backend as F
import pytest
import torch
from dgl.sparse import (
create_from_coo,
create_from_csc,
create_from_csr,
val_like,
)
from dgl.sparse import from_coo, from_csc, from_csr, val_like
# TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"):
......@@ -20,7 +15,7 @@ if not sys.platform.startswith("linux"):
@pytest.mark.parametrize("row", [(0, 0, 1, 2), (0, 1, 2, 4)])
@pytest.mark.parametrize("col", [(0, 1, 2, 2), (1, 3, 3, 4)])
@pytest.mark.parametrize("shape", [None, (5, 5), (5, 6)])
def test_create_from_coo(dense_dim, row, col, shape):
def test_from_coo(dense_dim, row, col, shape):
val_shape = (len(row),)
if dense_dim is not None:
val_shape += (dense_dim,)
......@@ -28,7 +23,7 @@ def test_create_from_coo(dense_dim, row, col, shape):
val = torch.randn(val_shape).to(ctx)
row = torch.tensor(row).to(ctx)
col = torch.tensor(col).to(ctx)
mat = create_from_coo(row, col, val, shape)
mat = from_coo(row, col, val, shape)
if shape is None:
shape = (torch.max(row).item() + 1, torch.max(col).item() + 1)
......@@ -48,7 +43,7 @@ def test_create_from_coo(dense_dim, row, col, shape):
@pytest.mark.parametrize("indptr", [(0, 0, 1, 4), (0, 1, 2, 4)])
@pytest.mark.parametrize("indices", [(0, 1, 2, 3), (1, 2, 3, 4)])
@pytest.mark.parametrize("shape", [None, (3, 5)])
def test_create_from_csr(dense_dim, indptr, indices, shape):
def test_from_csr(dense_dim, indptr, indices, shape):
val_shape = (len(indices),)
if dense_dim is not None:
val_shape += (dense_dim,)
......@@ -56,7 +51,7 @@ def test_create_from_csr(dense_dim, indptr, indices, shape):
val = torch.randn(val_shape).to(ctx)
indptr = torch.tensor(indptr).to(ctx)
indices = torch.tensor(indices).to(ctx)
mat = create_from_csr(indptr, indices, val, shape)
mat = from_csr(indptr, indices, val, shape)
if shape is None:
shape = (indptr.numel() - 1, torch.max(indices).item() + 1)
......@@ -76,7 +71,7 @@ def test_create_from_csr(dense_dim, indptr, indices, shape):
@pytest.mark.parametrize("indptr", [(0, 0, 1, 4), (0, 1, 2, 4)])
@pytest.mark.parametrize("indices", [(0, 1, 2, 3), (1, 2, 3, 4)])
@pytest.mark.parametrize("shape", [None, (5, 3)])
def test_create_from_csc(dense_dim, indptr, indices, shape):
def test_from_csc(dense_dim, indptr, indices, shape):
val_shape = (len(indices),)
if dense_dim is not None:
val_shape += (dense_dim,)
......@@ -84,7 +79,7 @@ def test_create_from_csc(dense_dim, indptr, indices, shape):
val = torch.randn(val_shape).to(ctx)
indptr = torch.tensor(indptr).to(ctx)
indices = torch.tensor(indices).to(ctx)
mat = create_from_csc(indptr, indices, val, shape)
mat = from_csc(indptr, indices, val, shape)
if shape is None:
shape = (torch.max(indices).item() + 1, indptr.numel() - 1)
......@@ -107,7 +102,7 @@ def test_dense(val_shape):
row = torch.tensor([1, 1, 2]).to(ctx)
col = torch.tensor([2, 4, 3]).to(ctx)
val = torch.randn(val_shape).to(ctx)
A = create_from_coo(row, col, val)
A = from_coo(row, col, val)
A_dense = A.dense()
shape = A.shape + val.shape[1:]
......@@ -128,7 +123,7 @@ def test_csr_to_coo(dense_dim, indptr, indices, shape):
val = torch.randn(val_shape).to(ctx)
indptr = torch.tensor(indptr).to(ctx)
indices = torch.tensor(indices).to(ctx)
mat = create_from_csr(indptr, indices, val, shape)
mat = from_csr(indptr, indices, val, shape)
if shape is None:
shape = (indptr.numel() - 1, torch.max(indices).item() + 1)
......@@ -163,7 +158,7 @@ def test_csc_to_coo(dense_dim, indptr, indices, shape):
val = torch.randn(val_shape).to(ctx)
indptr = torch.tensor(indptr).to(ctx)
indices = torch.tensor(indices).to(ctx)
mat = create_from_csc(indptr, indices, val, shape)
mat = from_csc(indptr, indices, val, shape)
if shape is None:
shape = (torch.max(indices).item() + 1, indptr.numel() - 1)
......@@ -205,7 +200,7 @@ def test_coo_to_csr(dense_dim, row, col, shape):
val = torch.randn(val_shape).to(ctx)
row = torch.tensor(row).to(ctx)
col = torch.tensor(col).to(ctx)
mat = create_from_coo(row, col, val, shape)
mat = from_coo(row, col, val, shape)
if shape is None:
shape = (torch.max(row).item() + 1, torch.max(col).item() + 1)
......@@ -237,7 +232,7 @@ def test_csc_to_csr(dense_dim, indptr, indices, shape):
val = torch.randn(val_shape).to(ctx)
indptr = torch.tensor(indptr).to(ctx)
indices = torch.tensor(indices).to(ctx)
mat = create_from_csc(indptr, indices, val, shape)
mat = from_csc(indptr, indices, val, shape)
mat_indptr, mat_indices, value_indices = mat.csr()
mat_val = mat.val if value_indices is None else mat.val[value_indices]
......@@ -280,7 +275,7 @@ def test_coo_to_csc(dense_dim, row, col, shape):
val = torch.randn(val_shape).to(ctx)
row = torch.tensor(row).to(ctx)
col = torch.tensor(col).to(ctx)
mat = create_from_coo(row, col, val, shape)
mat = from_coo(row, col, val, shape)
if shape is None:
shape = (torch.max(row).item() + 1, torch.max(col).item() + 1)
......@@ -312,7 +307,7 @@ def test_csr_to_csc(dense_dim, indptr, indices, shape):
val = torch.randn(val_shape).to(ctx)
indptr = torch.tensor(indptr).to(ctx)
indices = torch.tensor(indices).to(ctx)
mat = create_from_csr(indptr, indices, val, shape)
mat = from_csr(indptr, indices, val, shape)
mat_indptr, mat_indices, value_indices = mat.csc()
mat_val = mat.val if value_indices is None else mat.val[value_indices]
......@@ -358,20 +353,20 @@ def test_val_like(val_shape, shape):
row = torch.tensor([1, 1, 2]).to(ctx)
col = torch.tensor([2, 4, 3]).to(ctx)
val = torch.randn(3).to(ctx)
coo_A = create_from_coo(row, col, val, shape)
coo_A = from_coo(row, col, val, shape)
new_val = torch.randn(val_shape).to(ctx)
coo_B = val_like(coo_A, new_val)
check_val_like(coo_A, coo_B)
# CSR
indptr, indices, _ = coo_A.csr()
csr_A = create_from_csr(indptr, indices, val, shape)
csr_A = from_csr(indptr, indices, val, shape)
csr_B = val_like(csr_A, new_val)
check_val_like(csr_A, csr_B)
# CSC
indptr, indices, _ = coo_A.csc()
csc_A = create_from_csc(indptr, indices, val, shape)
csc_A = from_csc(indptr, indices, val, shape)
csc_B = val_like(csc_A, new_val)
check_val_like(csc_A, csc_B)
......@@ -382,7 +377,7 @@ def test_coalesce():
row = torch.tensor([1, 0, 0, 0, 1]).to(ctx)
col = torch.tensor([1, 1, 1, 2, 2]).to(ctx)
val = torch.arange(len(row)).to(ctx)
A = create_from_coo(row, col, val, (4, 4))
A = from_coo(row, col, val, (4, 4))
assert A.has_duplicate()
......@@ -406,17 +401,17 @@ def test_has_duplicate():
shape = (4, 4)
# COO
coo_A = create_from_coo(row, col, val, shape)
coo_A = from_coo(row, col, val, shape)
assert coo_A.has_duplicate()
# CSR
indptr, indices, _ = coo_A.csr()
csr_A = create_from_csr(indptr, indices, val, shape)
csr_A = from_csr(indptr, indices, val, shape)
assert csr_A.has_duplicate()
# CSC
indptr, indices, _ = coo_A.csc()
csc_A = create_from_csc(indptr, indices, val, shape)
csc_A = from_csc(indptr, indices, val, shape)
assert csc_A.has_duplicate()
......@@ -427,12 +422,12 @@ def test_print():
row = torch.tensor([1, 1, 3]).to(ctx)
col = torch.tensor([2, 1, 3]).to(ctx)
val = torch.tensor([1.0, 1.0, 2.0]).to(ctx)
A = create_from_coo(row, col, val)
A = from_coo(row, col, val)
print(A)
# vector-shape non zero
row = torch.tensor([1, 1, 3]).to(ctx)
col = torch.tensor([2, 1, 3]).to(ctx)
val = torch.randn(3, 2).to(ctx)
A = create_from_coo(row, col, val)
A = from_coo(row, col, val)
print(A)
......@@ -4,7 +4,7 @@ import backend as F
import pytest
import torch
from dgl.sparse import create_from_coo, diag
from dgl.sparse import diag, from_coo
# TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"):
......@@ -37,7 +37,7 @@ def test_sparse_matrix_transpose(dense_dim, row, col, extra_shape):
val = torch.randn(val_shape).to(ctx)
row = torch.tensor(row).to(ctx)
col = torch.tensor(col).to(ctx)
mat = create_from_coo(row, col, val, mat_shape).transpose()
mat = from_coo(row, col, val, mat_shape).transpose()
mat_row, mat_col = mat.coo()
mat_val = mat.val
......
......@@ -4,7 +4,7 @@ import backend as F
import pytest
import torch
from dgl.sparse import create_from_coo
from dgl.sparse import from_coo
# TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"):
......@@ -16,7 +16,7 @@ def test_neg():
row = torch.tensor([1, 1, 3]).to(ctx)
col = torch.tensor([1, 2, 3]).to(ctx)
val = torch.tensor([1.0, 1.0, 2.0]).to(ctx)
A = create_from_coo(row, col, val)
A = from_coo(row, col, val)
neg_A = -A
assert A.shape == neg_A.shape
assert A.nnz == neg_A.nnz
......
import numpy as np
import torch
from dgl.sparse import (
create_from_coo,
create_from_csc,
create_from_csr,
SparseMatrix,
)
from dgl.sparse import from_coo, from_csc, from_csr, SparseMatrix
np.random.seed(42)
torch.random.manual_seed(42)
......@@ -28,7 +23,7 @@ def rand_coo(shape, nnz, dev, nz_dim=None):
val = torch.randn(nnz, device=dev, requires_grad=True)
else:
val = torch.randn(nnz, nz_dim, device=dev, requires_grad=True)
return create_from_coo(row, col, val, shape)
return from_coo(row, col, val, shape)
def rand_csr(shape, nnz, dev, nz_dim=None):
......@@ -47,7 +42,7 @@ def rand_csr(shape, nnz, dev, nz_dim=None):
indptr = torch.cumsum(indptr, 0)
row_sorted, row_sorted_idx = torch.sort(row)
indices = col[row_sorted_idx]
return create_from_csr(indptr, indices, val, shape=shape)
return from_csr(indptr, indices, val, shape=shape)
def rand_csc(shape, nnz, dev, nz_dim=None):
......@@ -66,7 +61,7 @@ def rand_csc(shape, nnz, dev, nz_dim=None):
indptr = torch.cumsum(indptr, 0)
col_sorted, col_sorted_idx = torch.sort(col)
indices = row[col_sorted_idx]
return create_from_csc(indptr, indices, val, shape=shape)
return from_csc(indptr, indices, val, shape=shape)
def rand_coo_uncoalesced(shape, nnz, dev):
......@@ -74,7 +69,7 @@ def rand_coo_uncoalesced(shape, nnz, dev):
row = torch.randint(shape[0], (nnz,), device=dev)
col = torch.randint(shape[1], (nnz,), device=dev)
val = torch.randn(nnz, device=dev, requires_grad=True)
return create_from_coo(row, col, val, shape)
return from_coo(row, col, val, shape)
def rand_csr_uncoalesced(shape, nnz, dev):
......@@ -88,7 +83,7 @@ def rand_csr_uncoalesced(shape, nnz, dev):
indptr = torch.cumsum(indptr, 0)
row_sorted, row_sorted_idx = torch.sort(row)
indices = col[row_sorted_idx]
return create_from_csr(indptr, indices, val, shape=shape)
return from_csr(indptr, indices, val, shape=shape)
def rand_csc_uncoalesced(shape, nnz, dev):
......@@ -102,7 +97,7 @@ def rand_csc_uncoalesced(shape, nnz, dev):
indptr = torch.cumsum(indptr, 0)
col_sorted, col_sorted_idx = torch.sort(col)
indices = row[col_sorted_idx]
return create_from_csc(indptr, indices, val, shape=shape)
return from_csc(indptr, indices, val, shape=shape)
def sparse_matrix_to_dense(A: SparseMatrix):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment