Unverified Commit 9088c635 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Feature] Change the Default Value of transpose in DGLGraph.adj (#2046)

* Update adj

* Fix
parent edfbee2c
...@@ -67,7 +67,7 @@ def gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, d ...@@ -67,7 +67,7 @@ def gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, d
# initialize graph # initialize graph
dur = [] dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx) adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs): for epoch in range(args.n_epochs):
start = time.time() start = time.time()
if distributed: if distributed:
......
...@@ -65,7 +65,7 @@ def graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samp ...@@ -65,7 +65,7 @@ def graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samp
# initialize graph # initialize graph
dur = [] dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx) adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs): for epoch in range(args.n_epochs):
start = time.time() start = time.time()
if distributed: if distributed:
......
...@@ -191,7 +191,7 @@ def gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, d ...@@ -191,7 +191,7 @@ def gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, d
# initialize graph # initialize graph
dur = [] dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx) adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs): for epoch in range(args.n_epochs):
start = time.time() start = time.time()
if distributed: if distributed:
......
...@@ -229,7 +229,7 @@ def graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samp ...@@ -229,7 +229,7 @@ def graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samp
# initialize graph # initialize graph
dur = [] dur = []
adj = g.adjacency_matrix().as_in_context(g_ctx) adj = g.adjacency_matrix(transpose=False).as_in_context(g_ctx)
for epoch in range(args.n_epochs): for epoch in range(args.n_epochs):
start = time.time() start = time.time()
if distributed: if distributed:
......
...@@ -106,7 +106,7 @@ class AdaptGenerator(object): ...@@ -106,7 +106,7 @@ class AdaptGenerator(object):
def __init__(self, graph, num_blocks, node_feature=None, sampler=None, num_workers=0, coalesce=False, def __init__(self, graph, num_blocks, node_feature=None, sampler=None, num_workers=0, coalesce=False,
sampler_weights=None, layer_nodes=None): sampler_weights=None, layer_nodes=None):
self.node_feature = node_feature self.node_feature = node_feature
adj = graph.adjacency_matrix_scipy() adj = graph.adjacency_matrix_scipy(transpose=False)
adj.data = np.ones(adj.nnz) adj.data = np.ones(adj.nnz)
self.norm_adj = normalize_adj(adj).tocsr() self.norm_adj = normalize_adj(adj).tocsr()
self.layer_nodes = layer_nodes self.layer_nodes = layer_nodes
...@@ -370,7 +370,7 @@ class AdaptGraphSAGENet(nn.Module): ...@@ -370,7 +370,7 @@ class AdaptGraphSAGENet(nn.Module):
]) ])
self.sample_weights = sample_weights self.sample_weights = sample_weights
self.node_feature = node_feature self.node_feature = node_feature
self.norm_adj = normalize_adj(trainG.adjacency_matrix_scipy()) self.norm_adj = normalize_adj(trainG.adjacency_matrix_scipy(transpose=False))
def forward(self, nf, h, is_test=False): def forward(self, nf, h, is_test=False):
for i, layer in enumerate(self.layers): for i, layer in enumerate(self.layers):
......
...@@ -123,7 +123,7 @@ class DiffPoolBatchedGraphLayer(nn.Module): ...@@ -123,7 +123,7 @@ class DiffPoolBatchedGraphLayer(nn.Module):
assign_tensor = masked_softmax(assign_tensor, mask, assign_tensor = masked_softmax(assign_tensor, mask,
memory_efficient=False) memory_efficient=False)
h = torch.matmul(torch.t(assign_tensor), feat) h = torch.matmul(torch.t(assign_tensor), feat)
adj = g.adjacency_matrix(ctx=device) adj = g.adjacency_matrix(transpose=False, ctx=device)
adj_new = torch.sparse.mm(adj, assign_tensor) adj_new = torch.sparse.mm(adj, assign_tensor)
adj_new = torch.mm(torch.t(assign_tensor), adj_new) adj_new = torch.mm(torch.t(assign_tensor), adj_new)
......
...@@ -3329,11 +3329,11 @@ class DGLHeteroGraph(object): ...@@ -3329,11 +3329,11 @@ class DGLHeteroGraph(object):
else: else:
return deg return deg
def adjacency_matrix(self, transpose=None, ctx=F.cpu(), scipy_fmt=None, etype=None): def adjacency_matrix(self, transpose=True, ctx=F.cpu(), scipy_fmt=None, etype=None):
"""Alias of :func:`adj`""" """Alias of :func:`adj`"""
return self.adj(transpose, ctx, scipy_fmt, etype) return self.adj(transpose, ctx, scipy_fmt, etype)
def adj(self, transpose=None, ctx=F.cpu(), scipy_fmt=None, etype=None): def adj(self, transpose=True, ctx=F.cpu(), scipy_fmt=None, etype=None):
"""Return the adjacency matrix of edges of the given edge type. """Return the adjacency matrix of edges of the given edge type.
By default, a row of returned adjacency matrix represents the By default, a row of returned adjacency matrix represents the
...@@ -3345,7 +3345,7 @@ class DGLHeteroGraph(object): ...@@ -3345,7 +3345,7 @@ class DGLHeteroGraph(object):
Parameters Parameters
---------- ----------
transpose : bool, optional transpose : bool, optional
A flag to transpose the returned adjacency matrix. (Default: False) A flag to transpose the returned adjacency matrix. (Default: True)
ctx : context, optional ctx : context, optional
The context of returned adjacency matrix. (Default: cpu) The context of returned adjacency matrix. (Default: cpu)
scipy_fmt : str, optional scipy_fmt : str, optional
...@@ -3381,31 +3381,24 @@ class DGLHeteroGraph(object): ...@@ -3381,31 +3381,24 @@ class DGLHeteroGraph(object):
Get a backend dependent sparse tensor. Here we use PyTorch for example. Get a backend dependent sparse tensor. Here we use PyTorch for example.
>>> g.adj(etype='develops') >>> g.adj(etype='develops')
tensor(indices=tensor([[0, 2], tensor(indices=tensor([[0, 1],
[0, 1]]), [0, 2]]),
values=tensor([1., 1.]), values=tensor([1., 1.]),
size=(3, 2), nnz=2, layout=torch.sparse_coo) size=(2, 3), nnz=2, layout=torch.sparse_coo)
Get a scipy coo sparse matrix. Get a scipy coo sparse matrix.
>>> g.adj(scipy_fmt='coo', etype='develops') >>> g.adj(scipy_fmt='coo', etype='develops')
<3x2 sparse matrix of type '<class 'numpy.int64'>' <2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in COOrdinate format> with 2 stored elements in COOrdinate format>
""" """
if transpose is None:
dgl_warning(
"Currently adjacency_matrix() returns a matrix with destination as rows"
" by default.\n\tIn 0.5 the result will have source as rows"
" (i.e. transpose=True)")
transpose = False
etid = self.get_etype_id(etype) etid = self.get_etype_id(etype)
if scipy_fmt is None: if scipy_fmt is None:
return self._graph.adjacency_matrix(etid, transpose, ctx)[0] return self._graph.adjacency_matrix(etid, transpose, ctx)[0]
else: else:
return self._graph.adjacency_matrix_scipy(etid, transpose, scipy_fmt, False) return self._graph.adjacency_matrix_scipy(etid, transpose, scipy_fmt, False)
def adjacency_matrix_scipy(self, transpose=None, fmt='csr', return_edge_ids=None): def adjacency_matrix_scipy(self, transpose=True, fmt='csr', return_edge_ids=None):
"""DEPRECATED: please use ``dgl.adjacency_matrix(transpose, scipy_fmt=fmt)``. """DEPRECATED: please use ``dgl.adjacency_matrix(transpose, scipy_fmt=fmt)``.
""" """
dgl_warning('DGLGraph.adjacency_matrix_scipy is deprecated. ' dgl_warning('DGLGraph.adjacency_matrix_scipy is deprecated. '
......
...@@ -453,7 +453,7 @@ def test_edge_ids(): ...@@ -453,7 +453,7 @@ def test_edge_ids():
@parametrize_dtype @parametrize_dtype
def test_adj(idtype): def test_adj(idtype):
g = create_test_heterograph(idtype) g = create_test_heterograph(idtype)
adj = F.sparse_to_numpy(g.adj(etype='follows')) adj = F.sparse_to_numpy(g.adj(transpose=False, etype='follows'))
assert np.allclose( assert np.allclose(
adj, adj,
np.array([[0., 0., 0.], np.array([[0., 0., 0.],
...@@ -465,7 +465,7 @@ def test_adj(idtype): ...@@ -465,7 +465,7 @@ def test_adj(idtype):
np.array([[0., 1., 0.], np.array([[0., 1., 0.],
[0., 0., 1.], [0., 0., 1.],
[0., 0., 0.]])) [0., 0., 0.]]))
adj = F.sparse_to_numpy(g.adj(etype='plays')) adj = F.sparse_to_numpy(g.adj(transpose=False, etype='plays'))
assert np.allclose( assert np.allclose(
adj, adj,
np.array([[1., 1., 0.], np.array([[1., 1., 0.],
...@@ -477,29 +477,29 @@ def test_adj(idtype): ...@@ -477,29 +477,29 @@ def test_adj(idtype):
[1., 1.], [1., 1.],
[0., 1.]])) [0., 1.]]))
adj = g.adj(scipy_fmt='csr', etype='follows') adj = g.adj(transpose=False, scipy_fmt='csr', etype='follows')
assert np.allclose( assert np.allclose(
adj.todense(), adj.todense(),
np.array([[0., 0., 0.], np.array([[0., 0., 0.],
[1., 0., 0.], [1., 0., 0.],
[0., 1., 0.]])) [0., 1., 0.]]))
adj = g.adj(scipy_fmt='coo', etype='follows') adj = g.adj(transpose=False, scipy_fmt='coo', etype='follows')
assert np.allclose( assert np.allclose(
adj.todense(), adj.todense(),
np.array([[0., 0., 0.], np.array([[0., 0., 0.],
[1., 0., 0.], [1., 0., 0.],
[0., 1., 0.]])) [0., 1., 0.]]))
adj = g.adj(scipy_fmt='csr', etype='plays') adj = g.adj(transpose=False, scipy_fmt='csr', etype='plays')
assert np.allclose( assert np.allclose(
adj.todense(), adj.todense(),
np.array([[1., 1., 0.], np.array([[1., 1., 0.],
[0., 1., 1.]])) [0., 1., 1.]]))
adj = g.adj(scipy_fmt='coo', etype='plays') adj = g.adj(transpose=False, scipy_fmt='coo', etype='plays')
assert np.allclose( assert np.allclose(
adj.todense(), adj.todense(),
np.array([[1., 1., 0.], np.array([[1., 1., 0.],
[0., 1., 1.]])) [0., 1., 1.]]))
adj = F.sparse_to_numpy(g['follows'].adj()) adj = F.sparse_to_numpy(g['follows'].adj(transpose=False))
assert np.allclose( assert np.allclose(
adj, adj,
np.array([[0., 0., 0.], np.array([[0., 0., 0.],
......
...@@ -69,7 +69,7 @@ def test_topological_nodes(idtype, n=100): ...@@ -69,7 +69,7 @@ def test_topological_nodes(idtype, n=100):
layers_dgl = dgl.topological_nodes_generator(g) layers_dgl = dgl.topological_nodes_generator(g)
adjmat = g.adjacency_matrix() adjmat = g.adjacency_matrix(transpose=False)
def tensor_topo_traverse(): def tensor_topo_traverse():
n = g.number_of_nodes() n = g.number_of_nodes()
mask = F.copy_to(F.ones((n, 1)), F.cpu()) mask = F.copy_to(F.ones((n, 1)), F.cpu())
......
...@@ -24,7 +24,7 @@ def test_graph_conv(idtype): ...@@ -24,7 +24,7 @@ def test_graph_conv(idtype):
g = dgl.from_networkx(nx.path_graph(3)) g = dgl.from_networkx(nx.path_graph(3))
g = g.astype(idtype).to(F.ctx()) g = g.astype(idtype).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx) adj = g.adjacency_matrix(transpose=False, ctx=ctx)
conv = nn.GraphConv(5, 2, norm='none', bias=True) conv = nn.GraphConv(5, 2, norm='none', bias=True)
conv.initialize(ctx=ctx) conv.initialize(ctx=ctx)
...@@ -131,7 +131,7 @@ def _S2AXWb(A, N, X, W, b): ...@@ -131,7 +131,7 @@ def _S2AXWb(A, N, X, W, b):
def test_tagconv(): def test_tagconv():
g = dgl.from_networkx(nx.path_graph(3)).to(F.ctx()) g = dgl.from_networkx(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx) adj = g.adjacency_matrix(transpose=False, ctx=ctx)
norm = mx.nd.power(g.in_degrees().astype('float32'), -0.5) norm = mx.nd.power(g.in_degrees().astype('float32'), -0.5)
conv = nn.TAGConv(5, 2, bias=True) conv = nn.TAGConv(5, 2, bias=True)
...@@ -294,7 +294,7 @@ def test_dense_cheb_conv(): ...@@ -294,7 +294,7 @@ def test_dense_cheb_conv():
for k in range(1, 4): for k in range(1, 4):
ctx = F.ctx() ctx = F.ctx()
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.3)).to(F.ctx()) g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.3)).to(F.ctx())
adj = g.adjacency_matrix(ctx=ctx).tostype('default') adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
cheb = nn.ChebConv(5, 2, k) cheb = nn.ChebConv(5, 2, k)
dense_cheb = nn.DenseChebConv(5, 2, k) dense_cheb = nn.DenseChebConv(5, 2, k)
cheb.initialize(ctx=ctx) cheb.initialize(ctx=ctx)
...@@ -318,7 +318,7 @@ def test_dense_cheb_conv(): ...@@ -318,7 +318,7 @@ def test_dense_cheb_conv():
def test_dense_graph_conv(idtype, g, norm_type): def test_dense_graph_conv(idtype, g, norm_type):
g = g.astype(idtype).to(F.ctx()) g = g.astype(idtype).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx).tostype('default') adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True) conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
conv.initialize(ctx=ctx) conv.initialize(ctx=ctx)
...@@ -337,7 +337,7 @@ def test_dense_graph_conv(idtype, g, norm_type): ...@@ -337,7 +337,7 @@ def test_dense_graph_conv(idtype, g, norm_type):
def test_dense_sage_conv(idtype, g): def test_dense_sage_conv(idtype, g):
g = g.astype(idtype).to(F.ctx()) g = g.astype(idtype).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx).tostype('default') adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
sage = nn.SAGEConv(5, 2, 'gcn') sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2) dense_sage = nn.DenseSAGEConv(5, 2)
sage.initialize(ctx=ctx) sage.initialize(ctx=ctx)
......
...@@ -20,7 +20,7 @@ def _AXWb(A, X, W, b): ...@@ -20,7 +20,7 @@ def _AXWb(A, X, W, b):
def test_graph_conv0(): def test_graph_conv0():
g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx()) g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx) adj = g.adjacency_matrix(transpose=False, ctx=ctx)
conv = nn.GraphConv(5, 2, norm='none', bias=True) conv = nn.GraphConv(5, 2, norm='none', bias=True)
conv = conv.to(ctx) conv = conv.to(ctx)
...@@ -125,7 +125,7 @@ def test_tagconv(): ...@@ -125,7 +125,7 @@ def test_tagconv():
g = dgl.DGLGraph(nx.path_graph(3)) g = dgl.DGLGraph(nx.path_graph(3))
g = g.to(F.ctx()) g = g.to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx) adj = g.adjacency_matrix(transpose=False, ctx=ctx)
norm = th.pow(g.in_degrees().float(), -0.5) norm = th.pow(g.in_degrees().float(), -0.5)
conv = nn.TAGConv(5, 2, bias=True) conv = nn.TAGConv(5, 2, bias=True)
...@@ -599,7 +599,7 @@ def test_dense_graph_conv(norm_type, g, idtype): ...@@ -599,7 +599,7 @@ def test_dense_graph_conv(norm_type, g, idtype):
g = g.astype(idtype).to(F.ctx()) g = g.astype(idtype).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
# TODO(minjie): enable the following option after #1385 # TODO(minjie): enable the following option after #1385
adj = g.adjacency_matrix(ctx=ctx).to_dense() adj = g.adjacency_matrix(transpose=False, ctx=ctx).to_dense()
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True) conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
dense_conv.weight.data = conv.weight.data dense_conv.weight.data = conv.weight.data
...@@ -616,7 +616,7 @@ def test_dense_graph_conv(norm_type, g, idtype): ...@@ -616,7 +616,7 @@ def test_dense_graph_conv(norm_type, g, idtype):
def test_dense_sage_conv(g, idtype): def test_dense_sage_conv(g, idtype):
g = g.astype(idtype).to(F.ctx()) g = g.astype(idtype).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = g.adjacency_matrix(ctx=ctx).to_dense() adj = g.adjacency_matrix(transpose=False, ctx=ctx).to_dense()
sage = nn.SAGEConv(5, 2, 'gcn') sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2) dense_sage = nn.DenseSAGEConv(5, 2)
dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.weight.data = sage.fc_neigh.weight.data
...@@ -662,7 +662,7 @@ def test_dense_cheb_conv(): ...@@ -662,7 +662,7 @@ def test_dense_cheb_conv():
ctx = F.ctx() ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
g = g.to(F.ctx()) g = g.to(F.ctx())
adj = g.adjacency_matrix(ctx=ctx).to_dense() adj = g.adjacency_matrix(transpose=False, ctx=ctx).to_dense()
cheb = nn.ChebConv(5, 2, k, None) cheb = nn.ChebConv(5, 2, k, None)
dense_cheb = nn.DenseChebConv(5, 2, k) dense_cheb = nn.DenseChebConv(5, 2, k)
#for i in range(len(cheb.fc)): #for i in range(len(cheb.fc)):
......
...@@ -21,7 +21,7 @@ def _AXWb(A, X, W, b): ...@@ -21,7 +21,7 @@ def _AXWb(A, X, W, b):
def test_graph_conv(): def test_graph_conv():
g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx()) g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx() ctx = F.ctx()
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(ctx=ctx))) adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(transpose=False, ctx=ctx)))
conv = nn.GraphConv(5, 2, norm='none', bias=True) conv = nn.GraphConv(5, 2, norm='none', bias=True)
# conv = conv # conv = conv
...@@ -488,7 +488,7 @@ def test_dense_cheb_conv(): ...@@ -488,7 +488,7 @@ def test_dense_cheb_conv():
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1, random_state=42)) g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1, random_state=42))
g = g.to(ctx) g = g.to(ctx)
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(ctx=ctx))) adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(transpose=False, ctx=ctx)))
cheb = nn.ChebConv(5, 2, k, None, bias=True) cheb = nn.ChebConv(5, 2, k, None, bias=True)
dense_cheb = nn.DenseChebConv(5, 2, k, bias=True) dense_cheb = nn.DenseChebConv(5, 2, k, bias=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment