Unverified Commit 255ad1b3 authored by Minjie Wang's avatar Minjie Wang Committed by GitHub
Browse files

[Sparse] New print for SparseMatrix and DiagMatrix (#5085)

* new print func

* lintrunner
parent 025e4321
...@@ -3,7 +3,7 @@ from typing import Optional, Tuple ...@@ -3,7 +3,7 @@ from typing import Optional, Tuple
import torch import torch
from .sparse_matrix import SparseMatrix, create_from_coo from .sparse_matrix import create_from_coo, SparseMatrix
class DiagMatrix: class DiagMatrix:
...@@ -62,7 +62,7 @@ class DiagMatrix: ...@@ -62,7 +62,7 @@ class DiagMatrix:
return self._shape return self._shape
def __repr__(self): def __repr__(self):
return f"DiagMatrix(val={self.val}, \nshape={self.shape})" return _diag_matrix_str(self)
@property @property
def nnz(self) -> int: def nnz(self) -> int:
...@@ -284,3 +284,28 @@ def identity( ...@@ -284,3 +284,28 @@ def identity(
val_shape = (len_val, d) val_shape = (len_val, d)
val = torch.ones(val_shape, dtype=dtype, device=device) val = torch.ones(val_shape, dtype=dtype, device=device)
return diag(val, shape) return diag(val, shape)
def _diag_matrix_str(spmat: DiagMatrix) -> str:
"""Internal function for converting a diagonal matrix to string representation."""
values_str = str(spmat.val)
meta_str = f"size={spmat.shape}"
if spmat.val.dim() > 1:
val_size = tuple(spmat.val.shape[1:])
meta_str += f", val_size={val_size}"
prefix = f"{type(spmat).__name__}("
def _add_indent(_str, indent):
lines = _str.split("\n")
lines = [lines[0]] + [" " * indent + line for line in lines[1:]]
return "\n".join(lines)
final_str = (
"values="
+ _add_indent(values_str, len("values="))
+ ",\n"
+ meta_str
+ ")"
)
final_str = prefix + _add_indent(final_str, len(prefix))
return final_str
...@@ -112,8 +112,7 @@ class SparseMatrix: ...@@ -112,8 +112,7 @@ class SparseMatrix:
raise NotImplementedError raise NotImplementedError
def __repr__(self): def __repr__(self):
return f'SparseMatrix(indices={self.indices("COO")}, \ return _sparse_matrix_str(self)
\nvalues={self.val}, \nshape={self.shape}, nnz={self.nnz})'
def coo(self) -> Tuple[torch.Tensor, ...]: def coo(self) -> Tuple[torch.Tensor, ...]:
"""Get the coordinate (COO) representation of the sparse matrix. """Get the coordinate (COO) representation of the sparse matrix.
...@@ -521,3 +520,32 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix: ...@@ -521,3 +520,32 @@ def val_like(mat: SparseMatrix, val: torch.Tensor) -> SparseMatrix:
shape=(3, 5), nnz=3) shape=(3, 5), nnz=3)
""" """
return SparseMatrix(torch.ops.dgl_sparse.val_like(mat.c_sparse_matrix, val)) return SparseMatrix(torch.ops.dgl_sparse.val_like(mat.c_sparse_matrix, val))
def _sparse_matrix_str(spmat: SparseMatrix) -> str:
"""Internal function for converting a sparse matrix to string representation."""
indices_str = str(spmat.indices("COO"))
values_str = str(spmat.val)
meta_str = f"size={spmat.shape}, nnz={spmat.nnz}"
if spmat.val.dim() > 1:
val_size = tuple(spmat.val.shape[1:])
meta_str += f", val_size={val_size}"
prefix = f"{type(spmat).__name__}("
def _add_indent(_str, indent):
lines = _str.split("\n")
lines = [lines[0]] + [" " * indent + line for line in lines[1:]]
return "\n".join(lines)
final_str = (
"indices="
+ _add_indent(indices_str, len("indices="))
+ ",\n"
+ "values="
+ _add_indent(values_str, len("values="))
+ ",\n"
+ meta_str
+ ")"
)
final_str = prefix + _add_indent(final_str, len(prefix))
return final_str
import sys
import backend as F
import pytest import pytest
import torch import torch
import sys
from dgl.mock_sparse2 import diag, identity, DiagMatrix from dgl.mock_sparse2 import diag, DiagMatrix, identity
# TODO(#4818): Skipping tests on win. # TODO(#4818): Skipping tests on win.
if not sys.platform.startswith("linux"): if not sys.platform.startswith("linux"):
...@@ -12,8 +14,9 @@ if not sys.platform.startswith("linux"): ...@@ -12,8 +14,9 @@ if not sys.platform.startswith("linux"):
@pytest.mark.parametrize("val_shape", [(3,), (3, 2)]) @pytest.mark.parametrize("val_shape", [(3,), (3, 2)])
@pytest.mark.parametrize("mat_shape", [None, (3, 5), (5, 3)]) @pytest.mark.parametrize("mat_shape", [None, (3, 5), (5, 3)])
def test_diag(val_shape, mat_shape): def test_diag(val_shape, mat_shape):
ctx = F.ctx()
# creation # creation
val = torch.randn(val_shape) val = torch.randn(val_shape).to(ctx)
mat = diag(val, mat_shape) mat = diag(val, mat_shape)
# val, shape attributes # val, shape attributes
...@@ -22,8 +25,7 @@ def test_diag(val_shape, mat_shape): ...@@ -22,8 +25,7 @@ def test_diag(val_shape, mat_shape):
mat_shape = (val_shape[0], val_shape[0]) mat_shape = (val_shape[0], val_shape[0])
assert mat.shape == mat_shape assert mat.shape == mat_shape
# __call__ val = torch.randn(val_shape).to(ctx)
val = torch.randn(val_shape)
# nnz # nnz
assert mat.nnz == val.shape[0] assert mat.nnz == val.shape[0]
...@@ -54,6 +56,7 @@ def test_diag(val_shape, mat_shape): ...@@ -54,6 +56,7 @@ def test_diag(val_shape, mat_shape):
@pytest.mark.parametrize("shape", [(3, 3), (3, 5), (5, 3)]) @pytest.mark.parametrize("shape", [(3, 3), (3, 5), (5, 3)])
@pytest.mark.parametrize("d", [None, 2]) @pytest.mark.parametrize("d", [None, 2])
def test_identity(shape, d): def test_identity(shape, d):
ctx = F.ctx()
# creation # creation
mat = identity(shape, d) mat = identity(shape, d)
# type # type
...@@ -68,3 +71,17 @@ def test_identity(shape, d): ...@@ -68,3 +71,17 @@ def test_identity(shape, d):
val_shape = (len_val, d) val_shape = (len_val, d)
val = torch.ones(val_shape) val = torch.ones(val_shape)
assert torch.allclose(val, mat.val) assert torch.allclose(val, mat.val)
def test_print():
ctx = F.ctx()
# basic
val = torch.tensor([1.0, 1.0, 2.0]).to(ctx)
A = diag(val)
print(A)
# vector-shape non zero
val = torch.randn(3, 2).to(ctx)
A = diag(val)
print(A)
...@@ -418,3 +418,21 @@ def test_has_duplicate(): ...@@ -418,3 +418,21 @@ def test_has_duplicate():
indptr, indices, _ = coo_A.csc() indptr, indices, _ = coo_A.csc()
csc_A = create_from_csc(indptr, indices, val, shape) csc_A = create_from_csc(indptr, indices, val, shape)
assert csc_A.has_duplicate() assert csc_A.has_duplicate()
def test_print():
ctx = F.ctx()
# basic
row = torch.tensor([1, 1, 3]).to(ctx)
col = torch.tensor([2, 1, 3]).to(ctx)
val = torch.tensor([1.0, 1.0, 2.0]).to(ctx)
A = create_from_coo(row, col, val)
print(A)
# vector-shape non zero
row = torch.tensor([1, 1, 3]).to(ctx)
col = torch.tensor([2, 1, 3]).to(ctx)
val = torch.randn(3, 2).to(ctx)
A = create_from_coo(row, col, val)
print(A)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment