"llmdeploy/serve/vscode:/vscode.git/clone" did not exist on "ee9627849e8fbeecaf735e1b22fd4e2fa3154ba8"
Unverified Commit b3308ae7 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

Cleanup mock_sparse related code. (#5230)



* remove_mock_sparse_example

* mock_sparse_test

* remove_mock_sparse

---------
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 829ce109
import operator
import numpy as np
import pytest
import torch
import dgl
from dgl.mock_sparse import SparseMatrix, diag
parametrize_idtype = pytest.mark.parametrize(
"idtype", [torch.int32, torch.int64]
)
parametrize_dtype = pytest.mark.parametrize(
"dtype", [torch.float32, torch.float64]
)
def all_close_sparse(A, B):
assert torch.allclose(A.indices(), B.indices())
assert torch.allclose(A.values(), B.values())
assert A.shape == B.shape
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_sparse_op_sparse(idtype, dtype, op):
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
valA = torch.rand(len(rowA))
A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
w = torch.rand(len(rowA))
A1 = SparseMatrix(rowA, colA, w, shape=(10, 50))
rowB = torch.tensor([1, 9, 2, 7, 1, 1, 0])
colB = torch.tensor([0, 1, 2, 1, 7, 11, 15])
valB = torch.rand(len(rowB))
B = SparseMatrix(rowB, colB, valB, shape=(10, 50))
def _test():
if op is not operator.truediv:
all_close_sparse(op(A.adj, A1.adj), op(A, A1).adj)
all_close_sparse(op(A.adj, B.adj), op(A, B).adj)
else:
# sparse div is not supported in PyTorch
assert np.allclose(
op(A, A1).val, op(A.val, A1.val), rtol=1e-4, atol=1e-4
)
_test()
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_sparse_op_diag(idtype, dtype, op):
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
valA = torch.rand(len(rowA))
A = SparseMatrix(rowA, colA, valA, shape=(10, 50))
D = diag(torch.arange(2, 12), shape=A.shape)
D_sp = D.as_sparse()
def _test():
if op is not operator.truediv:
all_close_sparse(op(A.adj, D_sp.adj), op(A, D).adj)
else:
# NOTE (Israt): Matrices mush have same sparsity pattern for div
D2 = diag(torch.arange(12, 22), shape=A.shape)
A_sp2 = D2.as_sparse()
assert np.allclose(
op(A_sp2, D).val, op(A_sp2.val, D_sp.val), rtol=1e-4, atol=1e-4
)
_test()
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize("v_scalar", [2, 2.5])
def test_sparse_op_scalar(idtype, dtype, v_scalar):
row = torch.randint(1, 500, (100,))
col = torch.randint(1, 500, (100,))
val = torch.rand(100)
A = SparseMatrix(row, col, val)
all_close_sparse(A.adj * v_scalar, (A * v_scalar).adj)
all_close_sparse(A.adj / v_scalar, (A / v_scalar).adj)
all_close_sparse(pow(A.adj, v_scalar), pow(A, v_scalar).adj)
@parametrize_idtype
@parametrize_dtype
@pytest.mark.parametrize("v_scalar", [2, 2.5])
def test_scalar_op_sparse(idtype, dtype, v_scalar):
row = torch.randint(1, 500, (100,))
col = torch.randint(1, 500, (100,))
val = torch.rand(100)
A = SparseMatrix(row, col, val)
all_close_sparse(v_scalar * A.adj, (v_scalar * A).adj)
def test_expose_op():
rowA = torch.tensor([1, 0, 2, 7, 1])
colA = torch.tensor([0, 49, 2, 1, 7])
A = dgl.mock_sparse.SparseMatrix(rowA, colA, shape=(10, 50))
dgl.mock_sparse.add(A, A)
dgl.mock_sparse.sub(A, A)
dgl.mock_sparse.mul(A, A)
dgl.mock_sparse.div(A, A)
import torch
import backend as F
from dgl.mock_sparse import create_from_coo, diag, bspmm, bspspmm
def get_adj(A):
edge_index = torch.cat((A.row.unsqueeze(0), A.col.unsqueeze(0)), 0)
shape = A.shape
if len(A.val.shape) > 1:
shape += (A.val.shape[-1],)
return torch.sparse_coo_tensor(edge_index, A.val, shape).coalesce().to_dense()
def test_sparse_dense_mm():
dev = F.ctx()
# A: shape (N, M), X: shape (M, F)
row = torch.tensor([0, 1, 1]).to(dev)
col = torch.tensor([1, 0, 1]).to(dev)
val = torch.randn(len(row)).to(dev)
A = create_from_coo(row, col, val)
X = torch.randn(2, 3).to(dev)
sparse_result = A @ X
adj = get_adj(A)
dense_result = adj @ X
assert torch.allclose(sparse_result, dense_result)
# X: shape (M)
X = torch.randn(2).to(dev)
sparse_result = A @ X
dense_result = adj @ X
assert torch.allclose(sparse_result, dense_result)
def test_sparse_sparse_mm():
dev = F.ctx()
row1 = torch.tensor([0, 1, 1]).to(dev)
col1 = torch.tensor([1, 0, 1]).to(dev)
val1 = torch.randn(len(row1)).to(dev)
A1 = create_from_coo(row1, col1, val1)
row2 = torch.tensor([0, 1, 1]).to(dev)
col2 = torch.tensor([0, 2, 1]).to(dev)
val2 = torch.randn(len(row2)).to(dev)
A2 = create_from_coo(row2, col2, val2)
sparse_result = get_adj(A1 @ A2)
dense_result = get_adj(A1) @ get_adj(A2)
assert torch.allclose(sparse_result, dense_result)
def test_sparse_diag_mm():
dev = F.ctx()
row = torch.tensor([0, 1, 1]).to(dev)
col = torch.tensor([1, 0, 1]).to(dev)
val1 = torch.randn(len(row)).to(dev)
A = create_from_coo(row, col, val1)
val2 = torch.randn(2).to(dev)
D = diag(val2, (2, 3))
M1 = get_adj(A @ D)
M2 = get_adj(A @ D.as_sparse())
assert torch.allclose(M1, M2)
def test_diag_dense_mm():
dev = F.ctx()
# D: shape (N, N), X: shape (N, F)
val = torch.randn(3).to(dev)
D = diag(val)
X = torch.randn(3, 2).to(dev)
sparse_result = D @ X
dense_result = get_adj(D.as_sparse()) @ X
assert torch.allclose(sparse_result, dense_result)
# D: shape (N, M), N > M, X: shape (M, F)
val = torch.randn(3).to(dev)
D = diag(val, shape=(4, 3))
sparse_result = D @ X
dense_result = get_adj(D.as_sparse()) @ X
assert torch.allclose(sparse_result, dense_result)
# D: shape (N, M), N < M, X: shape (M, F)
val = torch.randn(2).to(dev)
D = diag(val, shape=(2, 3))
sparse_result = D @ X
dense_result = get_adj(D.as_sparse()) @ X
assert torch.allclose(sparse_result, dense_result)
# D: shape (N, M), X: shape (M)
val = torch.randn(3).to(dev)
D = diag(val)
X = torch.randn(3).to(dev)
sparse_result = D @ X
dense_result = get_adj(D.as_sparse()) @ X
assert torch.allclose(sparse_result, dense_result)
def test_diag_sparse_mm():
dev = F.ctx()
row = torch.tensor([0, 1, 1]).to(dev)
col = torch.tensor([1, 0, 1]).to(dev)
val1 = torch.randn(len(row)).to(dev)
A = create_from_coo(row, col, val1)
val2 = torch.randn(2).to(dev)
D = diag(val2, (3, 2))
M1 = get_adj(D @ A)
M2 = get_adj(D.as_sparse() @ A)
assert torch.allclose(M1, M2)
def test_diag_diag_mm():
dev = F.ctx()
# D1, D2: shape (N, N)
val1 = torch.randn(3).to(dev)
D1 = diag(val1)
val2 = torch.randn(3).to(dev)
D2 = diag(val2)
sparse_result = D1 @ D2
assert torch.allclose(sparse_result.val, D1.val * D2.val)
# D1: shape (N, M), D2: shape (M, P)
N = 3
M = 4
P = 2
val1 = torch.randn(N).to(dev)
D1 = diag(val1, (N, M))
val2 = torch.randn(P).to(dev)
D2 = diag(val2, (M, P))
M1 = get_adj((D1 @ D2).as_sparse())
M2 = get_adj(D1.as_sparse() @ D2.as_sparse())
assert torch.allclose(M1, M2)
def test_batch_sparse_dense_mm():
dev = F.ctx()
# A: shape (N, M), val shape (nnz, H)
# X: shape (M, F, H)
H = 4
row = torch.tensor([0, 1, 1]).to(dev)
col = torch.tensor([1, 0, 1]).to(dev)
val = torch.randn(len(row), H).to(dev)
A = create_from_coo(row, col, val)
X = torch.randn(2, 3, H).to(dev)
sparse_result = bspmm(A, X)
dense_A = get_adj(A)
dense_result = torch.stack([
dense_A[:, :, i] @ X[..., i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
# X: shape (M, H)
X = torch.randn(2, H).to(dev)
sparse_result = bspmm(A, X)
dense_A = get_adj(A)
dense_result = torch.stack([
dense_A[:, :, i] @ X[..., i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
def test_batch_sparse_sparse_mm():
H = 4
dev = F.ctx()
row1 = torch.tensor([0, 1, 1]).to(dev)
col1 = torch.tensor([1, 0, 1]).to(dev)
val1 = torch.randn(len(row1), H).to(dev)
A1 = create_from_coo(row1, col1, val1)
row2 = torch.tensor([0, 1, 1]).to(dev)
col2 = torch.tensor([0, 2, 1]).to(dev)
val2 = torch.randn(len(row2), H).to(dev)
A2 = create_from_coo(row2, col2, val2)
sparse_result = get_adj(bspspmm(A1, A2))
dense_A1 = get_adj(A1)
dense_A2 = get_adj(A2)
dense_result = torch.stack([
dense_A1[:, :, i] @ dense_A2[:, :, i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
def test_batch_sparse_diag_mm():
H = 4
dev = F.ctx()
row = torch.tensor([0, 1, 1]).to(dev)
col = torch.tensor([1, 0, 1]).to(dev)
val1 = torch.randn(len(row), H).to(dev)
A = create_from_coo(row, col, val1)
val2 = torch.randn(2, H).to(dev)
D = diag(val2, (2, 3))
sparse_result = get_adj(bspspmm(A, D))
dense_A = get_adj(A)
dense_D = get_adj(D.as_sparse())
dense_result = torch.stack([
dense_A[:, :, i] @ dense_D[:, :, i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
def test_batch_diag_dense_mm():
dev = F.ctx()
H = 4
# X: shape (N, F, H)
val = torch.randn(3, H).to(dev)
D = diag(val)
X = torch.randn(3, 2, H).to(dev)
sparse_result = bspmm(D, X)
dense_D = get_adj(D.as_sparse())
dense_result = torch.stack([
dense_D[:, :, i] @ X[..., i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
# X: shape (N, H)
X = torch.randn(3, H).to(dev)
sparse_result = bspmm(D, X)
dense_D = get_adj(D.as_sparse())
dense_result = torch.stack([
dense_D[:, :, i] @ X[..., i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
def test_batch_diag_sparse_mm():
dev = F.ctx()
H = 4
row = torch.tensor([0, 1, 1]).to(dev)
col = torch.tensor([1, 0, 1]).to(dev)
val1 = torch.randn(len(row), H).to(dev)
A = create_from_coo(row, col, val1)
val2 = torch.randn(2, H).to(dev)
D = diag(val2, (3, 2))
sparse_result = get_adj(bspspmm(D, A))
dense_A = get_adj(A)
dense_D = get_adj(D.as_sparse())
dense_result = torch.stack([
dense_D[:, :, i] @ dense_A[:, :, i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
def test_batch_diag_diag_mm():
dev = F.ctx()
H = 4
# D1, D2: shape (N, N)
val1 = torch.randn(3, H).to(dev)
D1 = diag(val1)
val2 = torch.randn(3, H).to(dev)
D2 = diag(val2)
M1 = bspspmm(D1, D2)
assert M1.shape == (3, 3)
assert torch.allclose(M1.val, val1 * val2)
# D1: shape (N, M), D2: shape (M, P)
N = 3
M = 4
P = 2
val1 = torch.randn(N, H).to(dev)
D1 = diag(val1, (N, M))
val2 = torch.randn(P, H).to(dev)
D2 = diag(val2, (M, P))
sparse_result = get_adj(bspspmm(D1, D2).as_sparse())
dense_D1 = get_adj(D1.as_sparse())
dense_D2 = get_adj(D2.as_sparse())
dense_result = torch.stack([
dense_D1[:, :, i] @ dense_D2[:, :, i] for i in range(H)
], dim=-1)
assert torch.allclose(sparse_result, dense_result)
import pytest
import torch
import numpy
from dgl.mock_sparse import create_from_coo
@pytest.mark.skip(reason="no way of currently testing this")
@pytest.mark.parametrize("dense_dim", [None, 2])
@pytest.mark.parametrize("row", [[0, 0, 1, 2], (0, 1, 2, 4)])
@pytest.mark.parametrize("col", [(0, 1, 2, 2), (1, 3, 3, 4)])
@pytest.mark.parametrize("extra_shape", [(0, 1), (2, 1)])
@pytest.mark.parametrize("reduce_type", ['sum', 'smax', 'smin', 'smean'])
@pytest.mark.parametrize("dim", [None, 0, 1])
def test_reduction(dense_dim, row, col, extra_shape, reduce_type, dim):
mat_shape = (max(row) + 1 + extra_shape[0], max(col) + 1 + extra_shape[1])
val_shape = (len(row),)
if dense_dim is not None:
val_shape += (dense_dim,)
val = torch.randn(val_shape)
row = torch.tensor(row)
col = torch.tensor(col)
mat = create_from_coo(row, col, val, mat_shape)
reduce_func = getattr(mat, reduce_type)
reduced = reduce_func(dim)
def calc_expected(row, col, val, mat_shape, reduce_type, dim):
def reduce_func(reduce_type, lhs, rhs):
if lhs is None:
return rhs
if reduce_type == 'sum' or reduce_type == 'smean':
return lhs + rhs
if reduce_type == 'smax':
return numpy.maximum(lhs, rhs)
if reduce_type == 'smin':
return numpy.minimum(lhs, rhs)
val = val.numpy()
row = row.numpy()
col = col.numpy()
if dim is None:
reduced = None
for i in range(val.shape[0]):
reduced = reduce_func(reduce_type, reduced, val[i])
if reduced is None:
reduced = numpy.zeros(val.shape[1:])
if reduce_type == 'smean':
reduced = reduced / val.shape[0]
return reduced
reduced_shape = (mat_shape[0] if dim == 1 else mat_shape[1])
reduced = [None] * reduced_shape
count = [0] * reduced_shape
for i, (r, c) in enumerate(zip(row, col)):
axis = r if dim == 1 else c
reduced[axis] = reduce_func(reduce_type, reduced[axis], val[i])
count[axis] += 1
for i in range(reduced_shape):
if count[i] == 0:
reduced[i] = numpy.zeros(val.shape[1:])
else:
if reduce_type == 'smean':
reduced[i] /= count[i]
return numpy.stack(reduced, axis=0)
expected = calc_expected(row, col, val, mat_shape, reduce_type, dim)
assert torch.allclose(reduced, torch.tensor(expected).float())
import unittest
import backend as F
import dgl
import pytest
import torch
from dgl.mock_sparse import SparseMatrix
parametrize_idtype = pytest.mark.parametrize(
"idtype", [torch.int32, torch.int64]
)
parametrize_dtype = pytest.mark.parametrize(
"dtype", [torch.float32, torch.float64]
)
def all_close_sparse(A, B):
assert torch.allclose(A.indices(), B.indices())
assert torch.allclose(A.values(), B.values())
assert A.shape == B.shape
# TODO (Israt): Implement sddmm. Do not rely on PyTorch.
@unittest.skipIf(
F._default_context_str == "cpu",
reason="sddmm uses sampled_addmm from pytorch which supports only CUDA",
)
@unittest.skipIf(
F._default_context_str == "gpu",
reason="sddmm uses sampled_addmm from pytorch which requires pytorch "
"1.12 or higher. Current CI doesn't support that.",
)
@parametrize_idtype
@parametrize_dtype
def test_sddmm(idtype, dtype):
row = torch.tensor([1, 0, 2, 9, 1])
col = torch.tensor([0, 49, 2, 1, 7])
val = torch.arange(1, 6).float()
A = SparseMatrix(row, col, val, shape=(10, 50))
matB = torch.rand(10, 5)
matC = torch.rand(5, 50)
dgl_result = dgl.mock_sparse.sddmm(A, matB, matC)
th_result = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), matB, matC)
all_close_sparse(dgl_result.adj, th_result.to_sparse_coo())
import pytest
import torch
from dgl.mock_sparse import create_from_coo, create_from_csr, create_from_csc
@pytest.mark.parametrize("dense_dim", [None, 4])
@pytest.mark.parametrize("row", [[0, 0, 1, 2], (0, 1, 2, 4)])
@pytest.mark.parametrize("col", [(0, 1, 2, 2), (1, 3, 3, 4)])
@pytest.mark.parametrize("mat_shape", [None, (3, 5), (5, 3)])
def test_create_from_coo(dense_dim, row, col, mat_shape):
# Skip invalid matrices
if mat_shape is not None and (
max(row) >= mat_shape[0] or max(col) >= mat_shape[1]
):
return
val_shape = (len(row),)
if dense_dim is not None:
val_shape += (dense_dim,)
val = torch.randn(val_shape)
row = torch.tensor(row)
col = torch.tensor(col)
mat = create_from_coo(row, col, val, mat_shape)
if mat_shape is None:
mat_shape = (torch.max(row).item() + 1, torch.max(col).item() + 1)
assert mat.shape == mat_shape
assert mat.nnz == row.numel()
assert mat.dtype == val.dtype
assert torch.allclose(mat.val, val)
assert torch.allclose(mat.row, row)
assert torch.allclose(mat.col, col)
@pytest.mark.skip(reason="no way of currently testing this")
@pytest.mark.parametrize("dense_dim", [None, 4])
@pytest.mark.parametrize("indptr", [[0, 0, 1, 4], (0, 1, 2, 4)])
@pytest.mark.parametrize("indices", [(0, 1, 2, 3), (1, 2, 3, 4)])
@pytest.mark.parametrize("mat_shape", [None, (3, 5)])
def test_create_from_csr(dense_dim, indptr, indices, mat_shape):
val_shape = (len(indices),)
if dense_dim is not None:
val_shape += (dense_dim,)
val = torch.randn(val_shape)
indptr = torch.tensor(indptr)
indices = torch.tensor(indices)
mat = create_from_csr(indptr, indices, val, mat_shape)
if mat_shape is None:
mat_shape = (indptr.numel() - 1, torch.max(indices).item() + 1)
assert mat.device == val.device
assert mat.shape == mat_shape
assert mat.nnz == indices.numel()
assert mat.dtype == val.dtype
assert torch.allclose(mat.val, val)
deg = torch.diff(indptr)
row = torch.repeat_interleave(torch.arange(deg.numel()), deg)
assert torch.allclose(mat.row, row)
col = indices
assert torch.allclose(mat.col, col)
@pytest.mark.skip(reason="no way of currently testing this")
@pytest.mark.parametrize("dense_dim", [None, 4])
@pytest.mark.parametrize("indptr", [[0, 0, 1, 4], (0, 1, 2, 4)])
@pytest.mark.parametrize("indices", [(0, 1, 2, 3), (1, 2, 3, 4)])
@pytest.mark.parametrize("mat_shape", [None, (5, 3)])
def test_create_from_csc(dense_dim, indptr, indices, mat_shape):
val_shape = (len(indices),)
if dense_dim is not None:
val_shape += (dense_dim,)
val = torch.randn(val_shape)
indptr = torch.tensor(indptr)
indices = torch.tensor(indices)
mat = create_from_csc(indptr, indices, val, mat_shape)
if mat_shape is None:
mat_shape = (torch.max(indices).item() + 1, indptr.numel() - 1)
assert mat.device == val.device
assert mat.shape == mat_shape
assert mat.nnz == indices.numel()
assert mat.dtype == val.dtype
assert torch.allclose(mat.val, val)
row = indices
assert torch.allclose(mat.row, row)
deg = torch.diff(indptr)
col = torch.repeat_interleave(torch.arange(deg.numel()), deg)
assert torch.allclose(mat.col, col)
import pytest
import torch
from dgl.mock_sparse import diag, create_from_coo
@pytest.mark.parametrize("val_shape", [(3,), (3, 2)])
@pytest.mark.parametrize("mat_shape", [None, (3, 5), (5, 3)])
def test_diag_matrix_transpose(val_shape, mat_shape):
val = torch.randn(val_shape)
mat = diag(val, mat_shape).transpose()
assert torch.allclose(mat.val, val)
if mat_shape is None:
mat_shape = (val_shape[0], val_shape[0])
assert mat.shape == mat_shape[::-1]
@pytest.mark.parametrize("dense_dim", [None, 2])
@pytest.mark.parametrize("row", [[0, 0, 1, 2], (0, 1, 2, 4)])
@pytest.mark.parametrize("col", [(0, 1, 2, 2), (1, 3, 3, 4)])
@pytest.mark.parametrize("extra_shape", [(0, 1), (2, 1)])
def test_sparse_matrix_transpose(dense_dim, row, col, extra_shape):
mat_shape = (max(row) + 1 + extra_shape[0], max(col) + 1 + extra_shape[1])
val_shape = (len(row),)
if dense_dim is not None:
val_shape += (dense_dim,)
val = torch.randn(val_shape)
row = torch.tensor(row)
col = torch.tensor(col)
mat = create_from_coo(row, col, val, mat_shape).transpose()
assert mat.shape == mat_shape[::-1]
assert torch.allclose(mat.val, val)
assert torch.allclose(mat.row, col)
assert torch.allclose(mat.col, row)
\ No newline at end of file
import pytest
import torch
import backend as F
from dgl.convert import graph
from dgl.mock_sparse import diag, create_from_coo
from dgl.ops import edge_softmax
@pytest.mark.parametrize('val_shape', [(3,), (3, 2)])
@pytest.mark.parametrize('mat_shape', [(3, 3), (5, 3)])
def test_neg_diag(val_shape, mat_shape):
val = torch.randn(val_shape).to(F.ctx())
mat = diag(val, mat_shape)
neg_mat = -mat
assert neg_mat.shape == mat.shape
assert torch.allclose(-mat.val, neg_mat.val)
def test_inv_diag():
val = torch.randn(3).to(F.ctx())
mat = diag(val, (3, 3))
inv_mat = mat.inv()
assert inv_mat.shape == mat.shape
assert torch.allclose(1. / mat.val, inv_mat.val)
@pytest.mark.parametrize('val_shape', [(3,), (3, 2)])
@pytest.mark.parametrize('mat_shape', [(3, 3), (5, 3)])
def test_softmax_diag(val_shape, mat_shape):
val = torch.randn(val_shape).to(F.ctx())
mat = diag(val, mat_shape)
softmax_mat = mat.softmax()
assert softmax_mat.shape == mat.shape
assert torch.allclose(softmax_mat.val, torch.ones_like(mat.val))
@pytest.mark.parametrize('val_shape', [(3,), (3, 2)])
@pytest.mark.parametrize('mat_shape', [(4, 4), (5, 4)])
def test_neg_sp(val_shape, mat_shape):
device = F.ctx()
row = torch.tensor([1, 1, 3]).to(device)
col = torch.tensor([1, 2, 3]).to(device)
val = torch.randn(val_shape).to(device)
mat = create_from_coo(row, col, val, mat_shape)
neg_mat = -mat
assert neg_mat.shape == mat.shape
assert torch.allclose(-mat.val, neg_mat.val)
def test_inv_sp():
device = F.ctx()
row = torch.tensor([0, 1, 1]).to(device)
col = torch.tensor([0, 0, 1]).to(device)
val = torch.tensor([1., 1., 2.]).to(device)
mat = create_from_coo(row, col, val)
inv_mat = mat.inv()
assert inv_mat.shape == mat.shape
assert torch.allclose(torch.tensor([1., -0.5, 0.5]).to(device), inv_mat.val)
@pytest.mark.parametrize('val_shape', [(4,), (4, 2)])
def test_softmax_sp(val_shape):
device = F.ctx()
row = torch.tensor([0, 0, 1, 2]).to(device)
col = torch.tensor([1, 2, 2, 0]).to(device)
val = torch.randn(val_shape).to(device)
mat = create_from_coo(row, col, val)
result = mat.softmax()
assert result.shape == mat.shape
g = graph((mat.col, mat.row))
assert torch.allclose(result.val, edge_softmax(g, mat.val))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment