Unverified Commit 9088c635 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Feature] Change the Default Value of transpose in DGLGraph.adj (#2046)

* Update adj

* Fix
parent edfbee2c
......@@ -67,7 +67,7 @@ def gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, d
# initialize graph
dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx)
adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs):
start = time.time()
if distributed:
......
......@@ -65,7 +65,7 @@ def graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samp
# initialize graph
dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx)
adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs):
start = time.time()
if distributed:
......
......@@ -191,7 +191,7 @@ def gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, d
# initialize graph
dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx)
adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs):
start = time.time()
if distributed:
......
......@@ -229,7 +229,7 @@ def graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samp
# initialize graph
dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx)
adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs):
start = time.time()
if distributed:
......
......@@ -106,7 +106,7 @@ class AdaptGenerator(object):
def __init__(self, graph, num_blocks, node_feature=None, sampler=None, num_workers=0, coalesce=False,
sampler_weights=None, layer_nodes=None):
self.node_feature = node_feature
adj = graph.adjacency_matrix_scipy()
adj = graph.adjacency_matrix_scipy(transpose=False)
adj.data = np.ones(adj.nnz)
self.norm_adj = normalize_adj(adj).tocsr()
self.layer_nodes = layer_nodes
......@@ -370,7 +370,7 @@ class AdaptGraphSAGENet(nn.Module):
])
self.sample_weights = sample_weights
self.node_feature = node_feature
self.norm_adj = normalize_adj(trainG.adjacency_matrix_scipy())
self.norm_adj = normalize_adj(trainG.adjacency_matrix_scipy(transpose=False))
def forward(self, nf, h, is_test=False):
for i, layer in enumerate(self.layers):
......
......@@ -123,7 +123,7 @@ class DiffPoolBatchedGraphLayer(nn.Module):
assign_tensor = masked_softmax(assign_tensor, mask,
memory_efficient=False)
h = torch.matmul(torch.t(assign_tensor), feat)
adj = g.adjacency_matrix(ctx=device)
adj = g.adjacency_matrix(transpose=False, ctx=device)
adj_new = torch.sparse.mm(adj, assign_tensor)
adj_new = torch.mm(torch.t(assign_tensor), adj_new)
......
......@@ -3329,11 +3329,11 @@ class DGLHeteroGraph(object):
else:
return deg
def adjacency_matrix(self, transpose=None, ctx=F.cpu(), scipy_fmt=None, etype=None):
def adjacency_matrix(self, transpose=True, ctx=F.cpu(), scipy_fmt=None, etype=None):
"""Alias of :func:`adj`"""
return self.adj(transpose, ctx, scipy_fmt, etype)
def adj(self, transpose=None, ctx=F.cpu(), scipy_fmt=None, etype=None):
def adj(self, transpose=True, ctx=F.cpu(), scipy_fmt=None, etype=None):
"""Return the adjacency matrix of edges of the given edge type.
By default, a row of returned adjacency matrix represents the
......@@ -3345,7 +3345,7 @@ class DGLHeteroGraph(object):
Parameters
----------
transpose : bool, optional
A flag to transpose the returned adjacency matrix. (Default: False)
A flag to transpose the returned adjacency matrix. (Default: True)
ctx : context, optional
The context of returned adjacency matrix. (Default: cpu)
scipy_fmt : str, optional
......@@ -3381,31 +3381,24 @@ class DGLHeteroGraph(object):
Get a backend dependent sparse tensor. Here we use PyTorch for example.
>>> g.adj(etype='develops')
tensor(indices=tensor([[0, 2],
[0, 1]]),
tensor(indices=tensor([[0, 1],
[0, 2]]),
values=tensor([1., 1.]),
size=(3, 2), nnz=2, layout=torch.sparse_coo)
size=(2, 3), nnz=2, layout=torch.sparse_coo)
Get a scipy coo sparse matrix.
>>> g.adj(scipy_fmt='coo', etype='develops')
<3x2 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in COOrdinate format>
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in COOrdinate format>
"""
if transpose is None:
dgl_warning(
"Currently adjacency_matrix() returns a matrix with destination as rows"
" by default.\n\tIn 0.5 the result will have source as rows"
" (i.e. transpose=True)")
transpose = False
etid = self.get_etype_id(etype)
if scipy_fmt is None:
return self._graph.adjacency_matrix(etid, transpose, ctx)[0]
else:
return self._graph.adjacency_matrix_scipy(etid, transpose, scipy_fmt, False)
def adjacency_matrix_scipy(self, transpose=None, fmt='csr', return_edge_ids=None):
def adjacency_matrix_scipy(self, transpose=True, fmt='csr', return_edge_ids=None):
"""DEPRECATED: please use ``dgl.adjacency_matrix(transpose, scipy_fmt=fmt)``.
"""
dgl_warning('DGLGraph.adjacency_matrix_scipy is deprecated. '
......
......@@ -453,7 +453,7 @@ def test_edge_ids():
@parametrize_dtype
def test_adj(idtype):
g = create_test_heterograph(idtype)
adj = F.sparse_to_numpy(g.adj(etype='follows'))
adj = F.sparse_to_numpy(g.adj(transpose=False, etype='follows'))
assert np.allclose(
adj,
np.array([[0., 0., 0.],
......@@ -465,7 +465,7 @@ def test_adj(idtype):
np.array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]]))
adj = F.sparse_to_numpy(g.adj(etype='plays'))
adj = F.sparse_to_numpy(g.adj(transpose=False, etype='plays'))
assert np.allclose(
adj,
np.array([[1., 1., 0.],
......@@ -477,29 +477,29 @@ def test_adj(idtype):
[1., 1.],
[0., 1.]]))
adj = g.adj(scipy_fmt='csr', etype='follows')
adj = g.adj(transpose=False, scipy_fmt='csr', etype='follows')
assert np.allclose(
adj.todense(),
np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]]))
adj = g.adj(scipy_fmt='coo', etype='follows')
adj = g.adj(transpose=False, scipy_fmt='coo', etype='follows')
assert np.allclose(
adj.todense(),
np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]]))
adj = g.adj(scipy_fmt='csr', etype='plays')
adj = g.adj(transpose=False, scipy_fmt='csr', etype='plays')
assert np.allclose(
adj.todense(),
np.array([[1., 1., 0.],
[0., 1., 1.]]))
adj = g.adj(scipy_fmt='coo', etype='plays')
adj = g.adj(transpose=False, scipy_fmt='coo', etype='plays')
assert np.allclose(
adj.todense(),
np.array([[1., 1., 0.],
[0., 1., 1.]]))
adj = F.sparse_to_numpy(g['follows'].adj())
adj = F.sparse_to_numpy(g['follows'].adj(transpose=False))
assert np.allclose(
adj,
np.array([[0., 0., 0.],
......
......@@ -69,7 +69,7 @@ def test_topological_nodes(idtype, n=100):
layers_dgl = dgl.topological_nodes_generator(g)
adjmat = g.adjacency_matrix()
adjmat = g.adjacency_matrix(transpose=False)
def tensor_topo_traverse():
n = g.number_of_nodes()
mask = F.copy_to(F.ones((n, 1)), F.cpu())
......
......@@ -24,7 +24,7 @@ def test_graph_conv(idtype):
g = dgl.from_networkx(nx.path_graph(3))
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx)
adj = g.adjacency_matrix(transpose=False, ctx=ctx)
conv = nn.GraphConv(5, 2, norm='none', bias=True)
conv.initialize(ctx=ctx)
......@@ -131,7 +131,7 @@ def _S2AXWb(A, N, X, W, b):
def test_tagconv():
g = dgl.from_networkx(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx)
adj = g.adjacency_matrix(transpose=False, ctx=ctx)
norm = mx.nd.power(g.in_degrees().astype('float32'), -0.5)
conv = nn.TAGConv(5, 2, bias=True)
......@@ -294,7 +294,7 @@ def test_dense_cheb_conv():
for k in range(1, 4):
ctx = F.ctx()
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.3)).to(F.ctx())
adj = g.adjacency_matrix(ctx=ctx).tostype('default')
adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
cheb = nn.ChebConv(5, 2, k)
dense_cheb = nn.DenseChebConv(5, 2, k)
cheb.initialize(ctx=ctx)
......@@ -318,7 +318,7 @@ def test_dense_cheb_conv():
def test_dense_graph_conv(idtype, g, norm_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx).tostype('default')
adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
conv.initialize(ctx=ctx)
......@@ -337,7 +337,7 @@ def test_dense_graph_conv(idtype, g, norm_type):
def test_dense_sage_conv(idtype, g):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx).tostype('default')
adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2)
sage.initialize(ctx=ctx)
......
......@@ -20,7 +20,7 @@ def _AXWb(A, X, W, b):
def test_graph_conv0():
g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx)
adj = g.adjacency_matrix(transpose=False, ctx=ctx)
conv = nn.GraphConv(5, 2, norm='none', bias=True)
conv = conv.to(ctx)
......@@ -125,7 +125,7 @@ def test_tagconv():
g = dgl.DGLGraph(nx.path_graph(3))
g = g.to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx)
adj = g.adjacency_matrix(transpose=False, ctx=ctx)
norm = th.pow(g.in_degrees().float(), -0.5)
conv = nn.TAGConv(5, 2, bias=True)
......@@ -599,7 +599,7 @@ def test_dense_graph_conv(norm_type, g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
# TODO(minjie): enable the following option after #1385
adj = g.adjacency_matrix(ctx=ctx).to_dense()
adj = g.adjacency_matrix(transpose=False, ctx=ctx).to_dense()
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
dense_conv.weight.data = conv.weight.data
......@@ -616,7 +616,7 @@ def test_dense_graph_conv(norm_type, g, idtype):
def test_dense_sage_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx).to_dense()
adj = g.adjacency_matrix(transpose=False, ctx=ctx).to_dense()
sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2)
dense_sage.fc.weight.data = sage.fc_neigh.weight.data
......@@ -662,7 +662,7 @@ def test_dense_cheb_conv():
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
g = g.to(F.ctx())
adj = g.adjacency_matrix(ctx=ctx).to_dense()
adj = g.adjacency_matrix(transpose=False, ctx=ctx).to_dense()
cheb = nn.ChebConv(5, 2, k, None)
dense_cheb = nn.DenseChebConv(5, 2, k)
#for i in range(len(cheb.fc)):
......
......@@ -21,7 +21,7 @@ def _AXWb(A, X, W, b):
def test_graph_conv():
g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(ctx=ctx)))
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(transpose=False, ctx=ctx)))
conv = nn.GraphConv(5, 2, norm='none', bias=True)
# conv = conv
......@@ -488,7 +488,7 @@ def test_dense_cheb_conv():
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1, random_state=42))
g = g.to(ctx)
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(ctx=ctx)))
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(transpose=False, ctx=ctx)))
cheb = nn.ChebConv(5, 2, k, None, bias=True)
dense_cheb = nn.DenseChebConv(5, 2, k, bias=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment