Commit e19cd62e authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by Minjie Wang
Browse files

[Test] Unify tests for different backends (#333)

* test basics

* batched graph & filter, mxnet filter fix

* frame and function; bugfix

* test graph adj and inc matrices

* fixing start = 0 for mxnet

* test index

* inplace update & line graph

* multi send recv

* more tests

* oops

* more tests

* removing old test files; readonly graphs for mxnet still kept

* modifying test scripts

* adding a placeholder for pytorch to reserve directory

* torch 0.4.1 compat fixes

* moving backend out of compute to avoid nose detection

* tests guide

* mx sparse-to-dense/sparse-to-numpy is buggy

* oops

* contribution guide for unit tests

* printing incmat

* printing dlpack

* small push

* typo

* fixing duplicate entries that causes undefined behavior

* move equal comparison to backend
parent 3edcaa1e
import torch as th
import numpy as np
import scipy.sparse as sp
import dgl
import dgl.function as fn
import utils as U
import backend as F
D = 5
......@@ -16,19 +15,19 @@ def generate_graph():
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
g.ndata['f'] = th.randn(10, D)
g.edata['e'] = th.randn(17, D)
g.ndata['f'] = F.randn((10, D))
g.edata['e'] = F.randn((17, D))
return g
def test_inplace_recv():
u = th.tensor([0, 0, 0, 3, 4, 9])
v = th.tensor([1, 2, 3, 9, 9, 0])
u = F.tensor([0, 0, 0, 3, 4, 9])
v = F.tensor([1, 2, 3, 9, 9, 0])
def message_func(edges):
return {'m' : edges.src['f'] + edges.dst['f']}
def reduce_func(nodes):
return {'f' : th.sum(nodes.mailbox['m'], 1)}
return {'f' : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
......@@ -43,26 +42,26 @@ def test_inplace_recv():
result = g.get_n_repr()['f']
# inplace deg bucket run
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.send((u, v), message_func)
g.recv([0,1,2,3,9], reduce_func, apply_func, inplace=True)
r1 = g.get_n_repr()['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace e2v
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.send((u, v), message_func)
g.recv([0,1,2,3,9], fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# test send_and_recv with apply_func
_test(apply_func)
......@@ -70,14 +69,14 @@ def test_inplace_recv():
_test(None)
def test_inplace_snr():
u = th.tensor([0, 0, 0, 3, 4, 9])
v = th.tensor([1, 2, 3, 9, 9, 0])
u = F.tensor([0, 0, 0, 3, 4, 9])
v = F.tensor([1, 2, 3, 9, 9, 0])
def message_func(edges):
return {'m' : edges.src['f']}
def reduce_func(nodes):
return {'f' : th.sum(nodes.mailbox['m'], 1)}
return {'f' : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
......@@ -92,36 +91,36 @@ def test_inplace_snr():
result = g.ndata['f']
# inplace deg bucket
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.send_and_recv((u, v), message_func, reduce_func, apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace v2v spmv
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.send_and_recv((u, v), fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace e2v spmv
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.send_and_recv((u, v), message_func,
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# test send_and_recv with apply_func
_test(apply_func)
......@@ -129,13 +128,13 @@ def test_inplace_snr():
_test(None)
def test_inplace_push():
nodes = th.tensor([0, 3, 4, 9])
nodes = F.tensor([0, 3, 4, 9])
def message_func(edges):
return {'m' : edges.src['f']}
def reduce_func(nodes):
return {'f' : th.sum(nodes.mailbox['m'], 1)}
return {'f' : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
......@@ -150,36 +149,36 @@ def test_inplace_push():
result = g.ndata['f']
# inplace deg bucket
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.push(nodes, message_func, reduce_func, apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace v2v spmv
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.push(nodes, fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace e2v spmv
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.push(nodes,
message_func, fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# test send_and_recv with apply_func
_test(apply_func)
......@@ -187,13 +186,13 @@ def test_inplace_push():
_test(None)
def test_inplace_pull():
nodes = th.tensor([1, 2, 3, 9])
nodes = F.tensor([1, 2, 3, 9])
def message_func(edges):
return {'m' : edges.src['f']}
def reduce_func(nodes):
return {'f' : th.sum(nodes.mailbox['m'], 1)}
return {'f' : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
......@@ -208,36 +207,36 @@ def test_inplace_pull():
result = g.ndata['f']
# inplace deg bucket
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.pull(nodes, message_func, reduce_func, apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace v2v spmv
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.pull(nodes, fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# inplace e2v spmv
v1 = f.clone()
v1 = F.clone(f)
g.ndata['f'] = v1
g.pull(nodes,
message_func, fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert U.allclose(r1, result)
assert F.allclose(r1, result)
# check inplace
assert U.allclose(v1, r1)
assert F.allclose(v1, r1)
# test send_and_recv with apply_func
_test(apply_func)
......@@ -261,11 +260,11 @@ def test_inplace_apply():
g.ndata['f'] = nf
g.apply_nodes(apply_node_func, nodes, inplace=True)
# check results correct and in place
assert U.allclose(nf, new_nf)
assert F.allclose(nf, new_nf)
# test apply all nodes, should not be done in place
g.ndata['f'] = nf
g.apply_nodes(apply_node_func, inplace=True)
assert U.allclose(nf, g.ndata['f']) == False
assert F.allclose(nf, g.ndata['f']) == False
edges = [3, 5, 7, 10]
ef = g.edata['e']
......@@ -276,11 +275,11 @@ def test_inplace_apply():
g.edata['e'] = ef
g.apply_edges(apply_edge_func, edges, inplace=True)
g.edata['e'] = ef
assert U.allclose(ef, new_ef)
assert F.allclose(ef, new_ef)
# test apply all edges, should not be done in place
g.edata['e'] == ef
g.apply_edges(apply_edge_func, inplace=True)
assert U.allclose(ef, g.edata['e']) == False
assert F.allclose(ef, g.edata['e']) == False
if __name__ == '__main__':
test_inplace_recv()
......
import torch as th
from torch.autograd import Variable
import numpy as np
import dgl
from dgl.graph import DGLGraph
import utils as U
from collections import defaultdict as ddict
import scipy.sparse as sp
import backend as F
D = 5
......@@ -18,7 +16,7 @@ def reduce_func(nodes):
msgs = nodes.mailbox['m']
assert len(msgs.shape) == 3
assert msgs.shape[2] == D
return {'accum' : th.sum(msgs, 1)}
return {'accum' : F.sum(msgs, 1)}
def apply_node_func(nodes):
return {'h' : nodes.data['h'] + nodes.data['accum']}
......@@ -31,8 +29,11 @@ def generate_graph(grad=False):
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
ncol = Variable(th.randn(10, D), requires_grad=grad)
ecol = Variable(th.randn(16, D), requires_grad=grad)
ncol = F.randn((10, D))
ecol = F.randn((16, D))
if grad:
ncol = F.attach_grad(ncol)
ecol = F.attach_grad(ecol)
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
g.ndata['h'] = ncol
......@@ -46,24 +47,24 @@ def test_multi_send():
return {'m' : edges.src['h']}
g.register_message_func(_fmsg)
# many-many send
u = th.tensor([0, 0, 0, 0, 0])
v = th.tensor([1, 2, 3, 4, 5])
u = F.tensor([0, 0, 0, 0, 0])
v = F.tensor([1, 2, 3, 4, 5])
g.send((u, v))
# duplicate send
u = th.tensor([0])
v = th.tensor([1, 2, 3, 4, 5])
u = F.tensor([0])
v = F.tensor([1, 2, 3, 4, 5])
g.send((u, v))
# send more
u = th.tensor([1, 2, 3, 4, 5])
v = th.tensor([9])
u = F.tensor([1, 2, 3, 4, 5])
v = F.tensor([9])
g.send((u, v))
# check if message indicator is as expected
expected = th.zeros((g.number_of_edges(),), dtype=th.int64)
expected = F.zeros((g.number_of_edges(),), dtype=F.int64)
eid = g.edge_ids([0, 0, 0, 0, 0, 1, 2, 3, 4, 5],
[1, 2, 3, 4, 5, 9, 9, 9, 9, 9])
expected[eid] = 1
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
def test_multi_recv():
# basic recv test
......@@ -72,53 +73,53 @@ def test_multi_recv():
g.register_message_func(message_func)
g.register_reduce_func(reduce_func)
g.register_apply_node_func(apply_node_func)
expected = th.zeros((g.number_of_edges(),), dtype=th.int64)
expected = F.zeros((g.number_of_edges(),), dtype=F.int64)
# two separate round of send and recv
u = [4, 5, 6]
v = [9]
g.send((u, v))
eid = g.edge_ids(u, v)
expected[eid] = 1
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
g.recv(v)
expected[eid] = 0
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
u = [0]
v = [1, 2, 3]
g.send((u, v))
eid = g.edge_ids(u, v)
expected[eid] = 1
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
g.recv(v)
expected[eid] = 0
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
h1 = g.ndata['h']
# one send, two recv
g.ndata['h'] = h
u = th.tensor([0, 0, 0, 4, 5, 6])
v = th.tensor([1, 2, 3, 9, 9, 9])
u = F.tensor([0, 0, 0, 4, 5, 6])
v = F.tensor([1, 2, 3, 9, 9, 9])
g.send((u, v))
eid = g.edge_ids(u, v)
expected[eid] = 1
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
u = [4, 5, 6]
v = [9]
g.recv(v)
eid = g.edge_ids(u, v)
expected[eid] = 0
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
u = [0]
v = [1, 2, 3]
g.recv(v)
eid = g.edge_ids(u, v)
expected[eid] = 0
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
h2 = g.ndata['h']
assert U.allclose(h1, h2)
assert F.allclose(h1, h2)
def test_multi_recv_0deg():
# test recv with 0deg nodes;
......@@ -130,7 +131,7 @@ def test_multi_recv_0deg():
def _apply(nodes):
return {'h' : nodes.data['h'] * 2}
def _init2(shape, dtype, ctx, ids):
return 2 + th.zeros(shape, dtype=dtype, device=ctx)
return 2 + F.zeros(shape, dtype=dtype, ctx=ctx)
g.register_message_func(_message)
g.register_reduce_func(_reduce)
g.register_apply_node_func(_apply)
......@@ -138,30 +139,30 @@ def test_multi_recv_0deg():
g.add_nodes(2)
g.add_edge(0, 1)
# recv both 0deg and non-0deg nodes
old = th.randn((2, 5))
old = F.randn((2, 5))
g.ndata['h'] = old
g.send((0, 1))
g.recv([0, 1])
new = g.ndata['h']
# 0deg check: initialized with the func and got applied
assert U.allclose(new[0], th.full((5,), 4))
assert F.allclose(new[0], F.full((5,), 4, F.float32))
# non-0deg check
assert U.allclose(new[1], th.sum(old, 0) * 2)
assert F.allclose(new[1], F.sum(old, 0) * 2)
# recv again on zero degree node
g.recv([0])
assert U.allclose(g.nodes[0].data['h'], th.full((5,), 8))
assert F.allclose(g.nodes[0].data['h'], F.full((5,), 8, F.float32))
# recv again on node with no incoming message
g.recv([1])
assert U.allclose(g.nodes[1].data['h'], th.sum(old, 0) * 4)
assert F.allclose(g.nodes[1].data['h'], F.sum(old, 0) * 4)
def test_send_twice_different_shape():
g = generate_graph()
def _message_1(edges):
return {'h': edges.src['h']}
def _message_2(edges):
return {'h': th.cat((edges.src['h'], edges.data['w']), dim=1)}
return {'h': F.cat((edges.src['h'], edges.data['w']), dim=1)}
g.send(message_func=_message_1)
g.send(message_func=_message_2)
......@@ -176,22 +177,22 @@ def test_send_twice_different_msg():
def _message_b(edges):
return {'a': edges.src['a'] * 3}
def _reduce(nodes):
return {'a': nodes.mailbox['a'].max(1)[0]}
return {'a': F.max(nodes.mailbox['a'], 1)}
old_repr = th.randn(3, 5)
old_repr = F.randn((3, 5))
g.ndata['a'] = old_repr
g.send((0, 1), _message_a)
g.send((0, 1), _message_b)
g.recv(1, _reduce)
new_repr = g.ndata['a']
assert U.allclose(new_repr[1], old_repr[0] * 3)
assert F.allclose(new_repr[1], old_repr[0] * 3)
g.ndata['a'] = old_repr
g.send((0, 1), _message_a)
g.send((2, 1), _message_b)
g.recv(1, _reduce)
new_repr = g.ndata['a']
assert U.allclose(new_repr[1], th.stack([old_repr[0], old_repr[2] * 3], 0).max(0)[0])
assert F.allclose(new_repr[1], F.max(F.stack([old_repr[0], old_repr[2] * 3], 0), 0))
def test_send_twice_different_field():
g = DGLGraph()
......@@ -203,16 +204,16 @@ def test_send_twice_different_field():
def _message_b(edges):
return {'b': edges.src['b']}
def _reduce(nodes):
return {'a': nodes.mailbox['a'].sum(1), 'b': nodes.mailbox['b'].sum(1)}
old_a = th.randn(2, 5)
old_b = th.randn(2, 5)
return {'a': F.sum(nodes.mailbox['a'], 1), 'b': F.sum(nodes.mailbox['b'], 1)}
old_a = F.randn((2, 5))
old_b = F.randn((2, 5))
g.set_n_repr({'a': old_a, 'b': old_b})
g.send((0, 1), _message_a)
g.send((0, 1), _message_b)
g.recv([1], _reduce)
new_repr = g.get_n_repr()
assert th.allclose(new_repr['a'][1], old_a[0])
assert th.allclose(new_repr['b'][1], old_b[0])
assert F.allclose(new_repr['a'][1], old_a[0])
assert F.allclose(new_repr['b'][1], old_b[0])
def test_dynamic_addition():
N = 3
......@@ -220,7 +221,7 @@ def test_dynamic_addition():
g = DGLGraph()
def _init(shape, dtype, ctx, ids):
return th.randn(shape, dtype=dtype, device=ctx)
return F.copy_to(F.astype(F.randn(shape), dtype), ctx)
g.set_n_initializer(_init)
g.set_e_initializer(_init)
......@@ -228,7 +229,7 @@ def test_dynamic_addition():
return {'m' : edges.src['h1'] + edges.dst['h2'] + edges.data['h1'] +
edges.data['h2']}
def _reduce(nodes):
return {'h' : nodes.mailbox['m'].sum(1)}
return {'h' : F.sum(nodes.mailbox['m'], 1)}
def _apply(nodes):
return {'h' : nodes.data['h']}
......@@ -240,24 +241,24 @@ def test_dynamic_addition():
# add nodes and edges
g.add_nodes(N)
g.ndata.update({'h1': th.randn(N, D),
'h2': th.randn(N, D)})
g.ndata.update({'h1': F.randn((N, D)),
'h2': F.randn((N, D))})
g.add_nodes(3)
g.add_edge(0, 1)
g.add_edge(1, 0)
g.edata.update({'h1': th.randn(2, D),
'h2': th.randn(2, D)})
g.edata.update({'h1': F.randn((2, D)),
'h2': F.randn((2, D))})
g.send()
expected = th.ones((g.number_of_edges(),), dtype=th.int64)
assert th.equal(g._msg_index.tousertensor(), expected)
expected = F.ones((g.number_of_edges(),), dtype=F.int64)
assert F.array_equal(g._msg_index.tousertensor(), expected)
# add more edges
g.add_edges([0, 2], [2, 0], {'h1': th.randn(2, D)})
g.add_edges([0, 2], [2, 0], {'h1': F.randn((2, D))})
g.send(([0, 2], [2, 0]))
g.recv(0)
g.add_edge(1, 2)
g.edges[4].data['h1'] = th.randn(1, D)
g.edges[4].data['h1'] = F.randn((1, D))
g.send((1, 2))
g.recv([1, 2])
......@@ -266,7 +267,7 @@ def test_dynamic_addition():
# a complete round of send and recv
g.send()
g.recv()
assert U.allclose(h, g.ndata['h'])
assert F.allclose(h, g.ndata['h'])
def test_recv_no_send():
g = generate_graph()
......@@ -276,14 +277,14 @@ def test_recv_no_send():
g.add_nodes(3)
g.add_edges([0, 1], [1, 2])
g.set_n_initializer(dgl.init.zero_initializer)
g.ndata['h'] = th.randn(3, D)
g.ndata['h'] = F.randn((3, D))
g.send((1, 2), message_func)
expected = th.zeros((2,), dtype=th.int64)
expected = F.zeros((2,), dtype=F.int64)
expected[1] = 1
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
g.recv(2, reduce_func)
expected[1] = 0
assert th.equal(g._msg_index.tousertensor(), expected)
assert F.array_equal(g._msg_index.tousertensor(), expected)
def test_send_recv_after_conversion():
# test send and recv after converting from a graph with edges
......@@ -303,7 +304,9 @@ def test_send_recv_after_conversion():
row, col= g.all_edges()
data = range(len(row))
n = g.number_of_nodes()
a = sp.coo_matrix((data, (row, col)), shape=(n, n))
a = sp.coo_matrix(
(data, (F.zerocopy_to_numpy(row), F.zerocopy_to_numpy(col))),
shape=(n, n))
g2 = DGLGraph()
# some random node and edges
g2.add_nodes(5)
......@@ -333,8 +336,8 @@ def test_send_recv_after_conversion():
g2.recv([0, 2, 4, 8], reduce_func=reduce_func,
apply_node_func=apply_node_func)
assert U.allclose(g.ndata['h'], g1.ndata['h'])
assert U.allclose(g.ndata['h'], g2.ndata['h'])
assert F.allclose(g.ndata['h'], g1.ndata['h'])
assert F.allclose(g.ndata['h'], g2.ndata['h'])
if __name__ == '__main__':
......
......@@ -2,10 +2,8 @@ import dgl
from dgl.frame import Frame, FrameRef, Column
from dgl.graph_index import create_graph_index
from dgl.utils import toindex
import dgl.backend as backend
import dgl.function as F
import utils as U
import torch
import backend as F
import dgl.function as fn
import pickle
import io
......@@ -25,7 +23,7 @@ def test_pickling_index():
i2 = _reconstruct_pickle(i)
assert torch.equal(i2.tousertensor(), i.tousertensor())
assert F.array_equal(i2.tousertensor(), i.tousertensor())
def test_pickling_graph_index():
......@@ -39,24 +37,24 @@ def test_pickling_graph_index():
assert gi2.number_of_nodes() == gi.number_of_nodes()
src_idx2, dst_idx2, _ = gi2.edges()
assert torch.equal(src_idx.tousertensor(), src_idx2.tousertensor())
assert torch.equal(dst_idx.tousertensor(), dst_idx2.tousertensor())
assert F.array_equal(src_idx.tousertensor(), src_idx2.tousertensor())
assert F.array_equal(dst_idx.tousertensor(), dst_idx2.tousertensor())
def test_pickling_frame():
x = torch.randn(3, 7)
y = torch.randn(3, 5)
x = F.randn((3, 7))
y = F.randn((3, 5))
c = Column(x)
c2 = _reconstruct_pickle(c)
assert U.allclose(c.data, c2.data)
assert F.allclose(c.data, c2.data)
fr = Frame({'x': x, 'y': y})
fr2 = _reconstruct_pickle(fr)
assert U.allclose(fr2['x'].data, x)
assert U.allclose(fr2['y'].data, y)
assert F.allclose(fr2['x'].data, x)
assert F.allclose(fr2['y'].data, y)
fr = Frame()
......@@ -65,15 +63,15 @@ def _assert_is_identical(g, g2):
assert g.number_of_nodes() == g2.number_of_nodes()
src, dst = g.all_edges()
src2, dst2 = g2.all_edges()
assert torch.equal(src, src2)
assert torch.equal(dst, dst2)
assert F.array_equal(src, src2)
assert F.array_equal(dst, dst2)
assert len(g.ndata) == len(g2.ndata)
assert len(g.edata) == len(g2.edata)
for k in g.ndata:
assert U.allclose(g.ndata[k], g2.ndata[k])
assert F.allclose(g.ndata[k], g2.ndata[k])
for k in g.edata:
assert U.allclose(g.edata[k], g2.edata[k])
assert F.allclose(g.edata[k], g2.edata[k])
def _global_message_func(nodes):
......@@ -83,14 +81,14 @@ def test_pickling_graph():
# graph structures and frames are pickled
g = dgl.DGLGraph()
g.add_nodes(3)
src = torch.LongTensor([0, 0])
dst = torch.LongTensor([1, 2])
src = F.tensor([0, 0])
dst = F.tensor([1, 2])
g.add_edges(src, dst)
x = torch.randn(3, 7)
y = torch.randn(3, 5)
a = torch.randn(2, 6)
b = torch.randn(2, 4)
x = F.randn((3, 7))
y = F.randn((3, 5))
a = F.randn((2, 6))
b = F.randn((2, 4))
g.ndata['x'] = x
g.ndata['y'] = y
......@@ -99,7 +97,7 @@ def test_pickling_graph():
# registered functions are pickled
g.register_message_func(_global_message_func)
reduce_func = F.sum('x', 'x')
reduce_func = fn.sum('x', 'x')
g.register_reduce_func(reduce_func)
# custom attributes should be pickled
......@@ -112,21 +110,21 @@ def test_pickling_graph():
assert new_g._message_func == _global_message_func
assert isinstance(new_g._reduce_func, type(reduce_func))
assert new_g._reduce_func._name == 'sum'
assert new_g._reduce_func.reduce_op == backend.sum
assert new_g._reduce_func.reduce_op == F.sum
assert new_g._reduce_func.msg_field == 'x'
assert new_g._reduce_func.out_field == 'x'
# test batched graph with partial set case
g2 = dgl.DGLGraph()
g2.add_nodes(4)
src2 = torch.LongTensor([0, 1])
dst2 = torch.LongTensor([2, 3])
src2 = F.tensor([0, 1])
dst2 = F.tensor([2, 3])
g2.add_edges(src2, dst2)
x2 = torch.randn(4, 7)
y2 = torch.randn(3, 5)
a2 = torch.randn(2, 6)
b2 = torch.randn(2, 4)
x2 = F.randn((4, 7))
y2 = F.randn((3, 5))
a2 = F.randn((2, 6))
b2 = F.randn((2, 4))
g2.ndata['x'] = x2
g2.nodes[[0, 1, 3]].data['y'] = y2
......
import dgl
import networkx as nx
import torch as th
import backend as F
import utils as U
def mfunc(edges):
return {'m' : edges.src['x']}
def rfunc(nodes):
msg = th.sum(nodes.mailbox['m'], 1)
msg = F.sum(nodes.mailbox['m'], 1)
return {'x' : nodes.data['x'] + msg}
def test_prop_nodes_bfs():
g = dgl.DGLGraph(nx.path_graph(5))
g.ndata['x'] = th.ones((5, 2))
g.ndata['x'] = F.ones((5, 2))
g.register_message_func(mfunc)
g.register_reduce_func(rfunc)
dgl.prop_nodes_bfs(g, 0)
# pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
assert U.allclose(g.ndata['x'],
th.tensor([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
assert F.allclose(g.ndata['x'],
F.tensor([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
def test_prop_edges_dfs():
g = dgl.DGLGraph(nx.path_graph(5))
g.register_message_func(mfunc)
g.register_reduce_func(rfunc)
g.ndata['x'] = th.ones((5, 2))
g.ndata['x'] = F.ones((5, 2))
dgl.prop_edges_dfs(g, 0)
# snr using dfs results in a cumsum
assert U.allclose(g.ndata['x'],
th.tensor([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]]))
assert F.allclose(g.ndata['x'],
F.tensor([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]]))
g.ndata['x'] = th.ones((5, 2))
g.ndata['x'] = F.ones((5, 2))
dgl.prop_edges_dfs(g, 0, has_reverse_edge=True)
# result is cumsum[i] + cumsum[i-1]
assert U.allclose(g.ndata['x'],
th.tensor([[1., 1.], [3., 3.], [5., 5.], [7., 7.], [9., 9.]]))
assert F.allclose(g.ndata['x'],
F.tensor([[1., 1.], [3., 3.], [5., 5.], [7., 7.], [9., 9.]]))
g.ndata['x'] = th.ones((5, 2))
g.ndata['x'] = F.ones((5, 2))
dgl.prop_edges_dfs(g, 0, has_nontree_edge=True)
# result is cumsum[i] + cumsum[i+1]
assert U.allclose(g.ndata['x'],
th.tensor([[3., 3.], [5., 5.], [7., 7.], [9., 9.], [5., 5.]]))
assert F.allclose(g.ndata['x'],
F.tensor([[3., 3.], [5., 5.], [7., 7.], [9., 9.], [5., 5.]]))
def test_prop_nodes_topo():
# bi-directional chain
......@@ -59,12 +59,12 @@ def test_prop_nodes_topo():
tree.register_message_func(mfunc)
tree.register_reduce_func(rfunc)
# init node feature data
tree.ndata['x'] = th.zeros((5, 2))
tree.ndata['x'] = F.zeros((5, 2))
# set all leaf nodes to be ones
tree.nodes[[1, 3, 4]].data['x'] = th.ones((3, 2))
tree.nodes[[1, 3, 4]].data['x'] = F.ones((3, 2))
dgl.prop_nodes_topo(tree)
# root node get the sum
assert U.allclose(tree.nodes[0].data['x'], th.tensor([[3., 3.]]))
assert F.allclose(tree.nodes[0].data['x'], F.tensor([[3., 3.]]))
if __name__ == '__main__':
test_prop_nodes_bfs()
......
import torch as th
import dgl
import utils as U
import backend as F
def test_simple_readout():
g1 = dgl.DGLGraph()
......@@ -9,47 +8,47 @@ def test_simple_readout():
g2.add_nodes(4) # no edges
g1.add_edges([0, 1, 2], [2, 0, 1])
n1 = th.randn(3, 5)
n2 = th.randn(4, 5)
e1 = th.randn(3, 5)
s1 = n1.sum(0) # node sums
s2 = n2.sum(0)
se1 = e1.sum(0) # edge sums
m1 = n1.mean(0) # node means
m2 = n2.mean(0)
me1 = e1.mean(0) # edge means
w1 = th.randn(3)
w2 = th.randn(4)
ws1 = (n1 * w1[:, None]).sum(0) # weighted node sums
ws2 = (n2 * w2[:, None]).sum(0)
wm1 = (n1 * w1[:, None]).sum(0) / w1[:, None].sum(0) # weighted node means
wm2 = (n2 * w2[:, None]).sum(0) / w2[:, None].sum(0)
n1 = F.randn((3, 5))
n2 = F.randn((4, 5))
e1 = F.randn((3, 5))
s1 = F.sum(n1, 0) # node sums
s2 = F.sum(n2, 0)
se1 = F.sum(e1, 0) # edge sums
m1 = F.mean(n1, 0) # node means
m2 = F.mean(n2, 0)
me1 = F.mean(e1, 0) # edge means
w1 = F.randn((3,))
w2 = F.randn((4,))
ws1 = F.sum(n1 * F.unsqueeze(w1, 1), 0)
ws2 = F.sum(n2 * F.unsqueeze(w2, 1), 0)
wm1 = F.sum(n1 * F.unsqueeze(w1, 1), 0) / F.sum(F.unsqueeze(w1, 1), 0)
wm2 = F.sum(n2 * F.unsqueeze(w2, 1), 0) / F.sum(F.unsqueeze(w2, 1), 0)
g1.ndata['x'] = n1
g2.ndata['x'] = n2
g1.ndata['w'] = w1
g2.ndata['w'] = w2
g1.edata['x'] = e1
assert U.allclose(dgl.sum_nodes(g1, 'x'), s1)
assert U.allclose(dgl.sum_nodes(g1, 'x', 'w'), ws1)
assert U.allclose(dgl.sum_edges(g1, 'x'), se1)
assert U.allclose(dgl.mean_nodes(g1, 'x'), m1)
assert U.allclose(dgl.mean_nodes(g1, 'x', 'w'), wm1)
assert U.allclose(dgl.mean_edges(g1, 'x'), me1)
assert F.allclose(dgl.sum_nodes(g1, 'x'), s1)
assert F.allclose(dgl.sum_nodes(g1, 'x', 'w'), ws1)
assert F.allclose(dgl.sum_edges(g1, 'x'), se1)
assert F.allclose(dgl.mean_nodes(g1, 'x'), m1)
assert F.allclose(dgl.mean_nodes(g1, 'x', 'w'), wm1)
assert F.allclose(dgl.mean_edges(g1, 'x'), me1)
g = dgl.batch([g1, g2])
s = dgl.sum_nodes(g, 'x')
m = dgl.mean_nodes(g, 'x')
assert U.allclose(s, th.stack([s1, s2], 0))
assert U.allclose(m, th.stack([m1, m2], 0))
assert F.allclose(s, F.stack([s1, s2], 0))
assert F.allclose(m, F.stack([m1, m2], 0))
ws = dgl.sum_nodes(g, 'x', 'w')
wm = dgl.mean_nodes(g, 'x', 'w')
assert U.allclose(ws, th.stack([ws1, ws2], 0))
assert U.allclose(wm, th.stack([wm1, wm2], 0))
assert F.allclose(ws, F.stack([ws1, ws2], 0))
assert F.allclose(wm, F.stack([wm1, wm2], 0))
s = dgl.sum_edges(g, 'x')
m = dgl.mean_edges(g, 'x')
assert U.allclose(s, th.stack([se1, th.zeros(5)], 0))
assert U.allclose(m, th.stack([me1, th.zeros(5)], 0))
assert F.allclose(s, F.stack([se1, F.zeros(5)], 0))
assert F.allclose(m, F.stack([me1, F.zeros(5)], 0))
if __name__ == '__main__':
......
import torch as th
import numpy as np
import scipy.sparse as sp
import dgl
import dgl.function as fn
import utils as U
import backend as F
D = 5
......@@ -16,9 +15,9 @@ def generate_graph():
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
g.set_n_repr({'f1' : th.randn(10,), 'f2' : th.randn(10, D)})
weights = th.randn(17,)
g.set_e_repr({'e1': weights, 'e2': th.unsqueeze(weights, 1)})
g.set_n_repr({'f1' : F.randn((10,)), 'f2' : F.randn((10, D))})
weights = F.randn((17,))
g.set_e_repr({'e1': weights, 'e2': F.unsqueeze(weights, 1)})
return g
def test_v2v_update_all():
......@@ -33,7 +32,7 @@ def test_v2v_update_all():
return {'m' : edges.src[fld] * edges.data['e2']}
def reduce_func(nodes):
return {fld : th.sum(nodes.mailbox['m'], 1)}
return {fld : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {fld : 2 * nodes.data[fld]}
......@@ -45,7 +44,7 @@ def test_v2v_update_all():
g.set_n_repr({fld : v1})
g.update_all(message_func, reduce_func, apply_func)
v3 = g.ndata[fld]
assert U.allclose(v2, v3)
assert F.allclose(v2, v3)
# update all with edge weights
v1 = g.ndata[fld]
g.update_all(fn.src_mul_edge(src=fld, edge='e1', out='m'),
......@@ -58,16 +57,16 @@ def test_v2v_update_all():
g.set_n_repr({fld : v1})
g.update_all(message_func_edge, reduce_func, apply_func)
v4 = g.ndata[fld]
assert U.allclose(v2, v3)
assert U.allclose(v3, v4)
assert F.allclose(v2, v3)
assert F.allclose(v3, v4)
# test 1d node features
_test('f1')
# test 2d node features
_test('f2')
def test_v2v_snr():
u = th.tensor([0, 0, 0, 3, 4, 9])
v = th.tensor([1, 2, 3, 9, 9, 0])
u = F.tensor([0, 0, 0, 3, 4, 9])
v = F.tensor([1, 2, 3, 9, 9, 0])
def _test(fld):
def message_func(edges):
return {'m' : edges.src[fld]}
......@@ -79,7 +78,7 @@ def test_v2v_snr():
return {'m' : edges.src[fld] * edges.data['e2']}
def reduce_func(nodes):
return {fld : th.sum(nodes.mailbox['m'], 1)}
return {fld : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {fld : 2 * nodes.data[fld]}
......@@ -92,7 +91,7 @@ def test_v2v_snr():
g.set_n_repr({fld : v1})
g.send_and_recv((u, v), message_func, reduce_func, apply_func)
v3 = g.ndata[fld]
assert U.allclose(v2, v3)
assert F.allclose(v2, v3)
# send and recv with edge weights
v1 = g.ndata[fld]
g.send_and_recv((u, v), fn.src_mul_edge(src=fld, edge='e1', out='m'),
......@@ -105,8 +104,8 @@ def test_v2v_snr():
g.set_n_repr({fld : v1})
g.send_and_recv((u, v), message_func_edge, reduce_func, apply_func)
v4 = g.ndata[fld]
assert U.allclose(v2, v3)
assert U.allclose(v3, v4)
assert F.allclose(v2, v3)
assert F.allclose(v3, v4)
# test 1d node features
_test('f1')
# test 2d node features
......@@ -114,7 +113,7 @@ def test_v2v_snr():
def test_v2v_pull():
nodes = th.tensor([1, 2, 3, 9])
nodes = F.tensor([1, 2, 3, 9])
def _test(fld):
def message_func(edges):
return {'m' : edges.src[fld]}
......@@ -126,7 +125,7 @@ def test_v2v_pull():
return {'m' : edges.src[fld] * edges.data['e2']}
def reduce_func(nodes):
return {fld : th.sum(nodes.mailbox['m'], 1)}
return {fld : F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {fld : 2 * nodes.data[fld]}
......@@ -138,7 +137,7 @@ def test_v2v_pull():
g.ndata[fld] = v1
g.pull(nodes, message_func, reduce_func, apply_func)
v3 = g.ndata[fld]
assert U.allclose(v2, v3)
assert F.allclose(v2, v3)
# send and recv with edge weights
v1 = g.ndata[fld]
g.pull(nodes, fn.src_mul_edge(src=fld, edge='e1', out='m'),
......@@ -151,8 +150,8 @@ def test_v2v_pull():
g.ndata[fld] = v1
g.pull(nodes, message_func_edge, reduce_func, apply_func)
v4 = g.ndata[fld]
assert U.allclose(v2, v3)
assert U.allclose(v3, v4)
assert F.allclose(v2, v3)
assert F.allclose(v3, v4)
# test 1d node features
_test('f1')
# test 2d node features
......@@ -166,10 +165,10 @@ def test_v2v_update_all_multi_fn():
return {'m2': edges.src['f2'] * edges.data['e2']}
def reduce_func(nodes):
return {'v1': th.sum(nodes.mailbox['m2'], 1)}
return {'v1': F.sum(nodes.mailbox['m2'], 1)}
g = generate_graph()
g.set_n_repr({'v1' : th.zeros((10,)), 'v2' : th.zeros((10,))})
g.set_n_repr({'v1' : F.zeros((10,)), 'v2' : F.zeros((10,))})
fld = 'f2'
g.update_all(message_func, reduce_func)
......@@ -179,8 +178,8 @@ def test_v2v_update_all_multi_fn():
g.update_all(fn.copy_src(src=fld, out='m'), [fn.sum(msg='m', out='v2'), fn.sum(msg='m', out='v3')])
v2 = g.ndata['v2']
v3 = g.ndata['v3']
assert U.allclose(v1, v2)
assert U.allclose(v1, v3)
assert F.allclose(v1, v2)
assert F.allclose(v1, v3)
# update all with edge weights, 2 message, 3 reduces
g.update_all([fn.src_mul_edge(src=fld, edge='e1', out='m1'), fn.src_mul_edge(src=fld, edge='e2', out='m2')],
......@@ -189,17 +188,17 @@ def test_v2v_update_all_multi_fn():
v1 = g.ndata['v1']
v2 = g.ndata['v2']
v3 = g.ndata['v3']
assert U.allclose(v1, v2)
assert U.allclose(v1, v3)
assert F.allclose(v1, v2)
assert F.allclose(v1, v3)
# run UDF with single message and reduce
g.update_all(message_func_edge, reduce_func, None)
v2 = g.ndata['v2']
assert U.allclose(v1, v2)
assert F.allclose(v1, v2)
def test_v2v_snr_multi_fn():
u = th.tensor([0, 0, 0, 3, 4, 9])
v = th.tensor([1, 2, 3, 9, 9, 0])
u = F.tensor([0, 0, 0, 3, 4, 9])
v = F.tensor([1, 2, 3, 9, 9, 0])
def message_func(edges):
return {'m2': edges.src['f2']}
......@@ -208,11 +207,11 @@ def test_v2v_snr_multi_fn():
return {'m2': edges.src['f2'] * edges.data['e2']}
def reduce_func(nodes):
return {'v1' : th.sum(nodes.mailbox['m2'], 1)}
return {'v1' : F.sum(nodes.mailbox['m2'], 1)}
g = generate_graph()
g.set_n_repr({'v1' : th.zeros((10, D)), 'v2' : th.zeros((10, D)),
'v3' : th.zeros((10, D))})
g.set_n_repr({'v1' : F.zeros((10, D)), 'v2' : F.zeros((10, D)),
'v3' : F.zeros((10, D))})
fld = 'f2'
g.send_and_recv((u, v), message_func, reduce_func)
......@@ -225,8 +224,8 @@ def test_v2v_snr_multi_fn():
None)
v2 = g.ndata['v2']
v3 = g.ndata['v3']
assert U.allclose(v1, v2)
assert U.allclose(v1, v3)
assert F.allclose(v1, v2)
assert F.allclose(v1, v3)
# send and recv with edge weights, 2 message, 3 reduces
g.send_and_recv((u, v),
......@@ -236,14 +235,14 @@ def test_v2v_snr_multi_fn():
v1 = g.ndata['v1']
v2 = g.ndata['v2']
v3 = g.ndata['v3']
assert U.allclose(v1, v2)
assert U.allclose(v1, v3)
assert F.allclose(v1, v2)
assert F.allclose(v1, v3)
# run UDF with single message and reduce
g.send_and_recv((u, v), message_func_edge,
reduce_func, None)
v2 = g.ndata['v2']
assert U.allclose(v1, v2)
assert F.allclose(v1, v2)
def test_e2v_update_all_multi_fn():
def _test(fld):
......@@ -252,7 +251,7 @@ def test_e2v_update_all_multi_fn():
'm2' : edges.src[fld] * edges.dst[fld]}
def reduce_func(nodes):
return {fld : th.sum(nodes.mailbox['m1'] + nodes.mailbox['m2'], 1)}
return {fld : F.sum(nodes.mailbox['m1'] + nodes.mailbox['m2'], 1)}
def apply_func(nodes):
return {fld : 2 * nodes.data[fld]}
......@@ -274,7 +273,7 @@ def test_e2v_update_all_multi_fn():
apply_func_2)
v3 = g.get_n_repr()[fld]
assert U.allclose(v2, v3)
assert F.allclose(v2, v3)
# test 1d node features
_test('f1')
......@@ -282,15 +281,15 @@ def test_e2v_update_all_multi_fn():
_test('f2')
def test_e2v_snr_multi_fn():
u = th.tensor([0, 0, 0, 3, 4, 9])
v = th.tensor([1, 2, 3, 9, 9, 0])
u = F.tensor([0, 0, 0, 3, 4, 9])
v = F.tensor([1, 2, 3, 9, 9, 0])
def _test(fld):
def message_func(edges):
return {'m1' : edges.src[fld] + edges.dst[fld],
'm2' : edges.src[fld] * edges.dst[fld]}
def reduce_func(nodes):
return {fld : th.sum(nodes.mailbox['m1'] + nodes.mailbox['m2'], 1)}
return {fld : F.sum(nodes.mailbox['m1'] + nodes.mailbox['m2'], 1)}
def apply_func(nodes):
return {fld : 2 * nodes.data[fld]}
......@@ -312,7 +311,7 @@ def test_e2v_snr_multi_fn():
apply_func_2)
v3 = g.get_n_repr()[fld]
assert U.allclose(v2, v3)
assert F.allclose(v2, v3)
# test 1d node features
_test('f1')
......@@ -320,15 +319,15 @@ def test_e2v_snr_multi_fn():
_test('f2')
def test_e2v_recv_multi_fn():
u = th.tensor([0, 0, 0, 3, 4, 9])
v = th.tensor([1, 2, 3, 9, 9, 0])
u = F.tensor([0, 0, 0, 3, 4, 9])
v = F.tensor([1, 2, 3, 9, 9, 0])
def _test(fld):
def message_func(edges):
return {'m1' : edges.src[fld] + edges.dst[fld],
'm2' : edges.src[fld] * edges.dst[fld]}
def reduce_func(nodes):
return {fld : th.sum(nodes.mailbox['m1'] + nodes.mailbox['m2'], 1)}
return {fld : F.sum(nodes.mailbox['m1'] + nodes.mailbox['m2'], 1)}
def apply_func(nodes):
return {fld : 2 * nodes.data[fld]}
......@@ -352,7 +351,7 @@ def test_e2v_recv_multi_fn():
apply_func_2)
v3 = g.get_n_repr()[fld]
assert U.allclose(v2, v3)
assert F.allclose(v2, v3)
# test 1d node features
_test('f1')
......@@ -366,19 +365,19 @@ def test_update_all_multi_fallback():
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
g.ndata['h'] = th.randn(10, D)
g.edata['w1'] = th.randn(16,)
g.edata['w2'] = th.randn(16, D)
g.ndata['h'] = F.randn((10, D))
g.edata['w1'] = F.randn((16,))
g.edata['w2'] = F.randn((16, D))
def _mfunc_hxw1(edges):
return {'m1' : edges.src['h'] * th.unsqueeze(edges.data['w1'], 1)}
return {'m1' : edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)}
def _mfunc_hxw2(edges):
return {'m2' : edges.src['h'] * edges.data['w2']}
def _rfunc_m1(nodes):
return {'o1' : th.sum(nodes.mailbox['m1'], 1)}
return {'o1' : F.sum(nodes.mailbox['m1'], 1)}
def _rfunc_m2(nodes):
return {'o2' : th.sum(nodes.mailbox['m2'], 1)}
return {'o2' : F.sum(nodes.mailbox['m2'], 1)}
def _rfunc_m1max(nodes):
return {'o3' : th.max(nodes.mailbox['m1'], 1)[0]}
return {'o3' : F.max(nodes.mailbox['m1'], 1)}
def _afunc(nodes):
ret = {}
for k, v in nodes.data.items():
......@@ -396,29 +395,29 @@ def test_update_all_multi_fallback():
g.update_all(fn.src_mul_edge(src='h', edge='w1', out='m1'),
fn.sum(msg='m1', out='o1'),
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o1, g.ndata.pop('o1'))
# v2v fallback to e2v
g.update_all(fn.src_mul_edge(src='h', edge='w2', out='m2'),
fn.sum(msg='m2', out='o2'),
_afunc)
assert U.allclose(o2, g.ndata.pop('o2'))
assert F.allclose(o2, g.ndata.pop('o2'))
# v2v fallback to degree bucketing
g.update_all(fn.src_mul_edge(src='h', edge='w1', out='m1'),
fn.max(msg='m1', out='o3'),
_afunc)
assert U.allclose(o3, g.ndata.pop('o3'))
assert F.allclose(o3, g.ndata.pop('o3'))
# multi builtins, both v2v spmv
g.update_all([fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.src_mul_edge(src='h', edge='w1', out='m2')],
[fn.sum(msg='m1', out='o1'), fn.sum(msg='m2', out='o2')],
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert U.allclose(o1, g.ndata.pop('o2'))
assert F.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o1, g.ndata.pop('o2'))
# multi builtins, one v2v spmv, one fallback to e2v
g.update_all([fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.src_mul_edge(src='h', edge='w2', out='m2')],
[fn.sum(msg='m1', out='o1'), fn.sum(msg='m2', out='o2')],
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert U.allclose(o2, g.ndata.pop('o2'))
assert F.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o2, g.ndata.pop('o2'))
# multi builtins, one v2v spmv, one fallback to e2v, one fallback to degree-bucketing
g.update_all([fn.src_mul_edge(src='h', edge='w1', out='m1'),
fn.src_mul_edge(src='h', edge='w2', out='m2'),
......@@ -427,9 +426,9 @@ def test_update_all_multi_fallback():
fn.sum(msg='m2', out='o2'),
fn.max(msg='m3', out='o3')],
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert U.allclose(o2, g.ndata.pop('o2'))
assert U.allclose(o3, g.ndata.pop('o3'))
assert F.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o2, g.ndata.pop('o2'))
assert F.allclose(o3, g.ndata.pop('o3'))
def test_pull_multi_fallback():
......@@ -439,19 +438,19 @@ def test_pull_multi_fallback():
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
g.ndata['h'] = th.randn(10, D)
g.edata['w1'] = th.randn(16,)
g.edata['w2'] = th.randn(16, D)
g.ndata['h'] = F.randn((10, D))
g.edata['w1'] = F.randn((16,))
g.edata['w2'] = F.randn((16, D))
def _mfunc_hxw1(edges):
return {'m1' : edges.src['h'] * th.unsqueeze(edges.data['w1'], 1)}
return {'m1' : edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)}
def _mfunc_hxw2(edges):
return {'m2' : edges.src['h'] * edges.data['w2']}
def _rfunc_m1(nodes):
return {'o1' : th.sum(nodes.mailbox['m1'], 1)}
return {'o1' : F.sum(nodes.mailbox['m1'], 1)}
def _rfunc_m2(nodes):
return {'o2' : th.sum(nodes.mailbox['m2'], 1)}
return {'o2' : F.sum(nodes.mailbox['m2'], 1)}
def _rfunc_m1max(nodes):
return {'o3' : th.max(nodes.mailbox['m1'], 1)[0]}
return {'o3' : F.max(nodes.mailbox['m1'], 1)}
def _afunc(nodes):
ret = {}
for k, v in nodes.data.items():
......@@ -471,31 +470,31 @@ def test_pull_multi_fallback():
g.pull(nodes, fn.src_mul_edge(src='h', edge='w1', out='m1'),
fn.sum(msg='m1', out='o1'),
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o1, g.ndata.pop('o1'))
# v2v fallback to e2v
g.pull(nodes, fn.src_mul_edge(src='h', edge='w2', out='m2'),
fn.sum(msg='m2', out='o2'),
_afunc)
assert U.allclose(o2, g.ndata.pop('o2'))
assert F.allclose(o2, g.ndata.pop('o2'))
# v2v fallback to degree bucketing
g.pull(nodes, fn.src_mul_edge(src='h', edge='w1', out='m1'),
fn.max(msg='m1', out='o3'),
_afunc)
assert U.allclose(o3, g.ndata.pop('o3'))
assert F.allclose(o3, g.ndata.pop('o3'))
# multi builtins, both v2v spmv
g.pull(nodes,
[fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.src_mul_edge(src='h', edge='w1', out='m2')],
[fn.sum(msg='m1', out='o1'), fn.sum(msg='m2', out='o2')],
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert U.allclose(o1, g.ndata.pop('o2'))
assert F.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o1, g.ndata.pop('o2'))
# multi builtins, one v2v spmv, one fallback to e2v
g.pull(nodes,
[fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.src_mul_edge(src='h', edge='w2', out='m2')],
[fn.sum(msg='m1', out='o1'), fn.sum(msg='m2', out='o2')],
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert U.allclose(o2, g.ndata.pop('o2'))
assert F.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o2, g.ndata.pop('o2'))
# multi builtins, one v2v spmv, one fallback to e2v, one fallback to degree-bucketing
g.pull(nodes,
[fn.src_mul_edge(src='h', edge='w1', out='m1'),
......@@ -505,9 +504,9 @@ def test_pull_multi_fallback():
fn.sum(msg='m2', out='o2'),
fn.max(msg='m3', out='o3')],
_afunc)
assert U.allclose(o1, g.ndata.pop('o1'))
assert U.allclose(o2, g.ndata.pop('o2'))
assert U.allclose(o3, g.ndata.pop('o3'))
assert F.allclose(o1, g.ndata.pop('o1'))
assert F.allclose(o2, g.ndata.pop('o2'))
assert F.allclose(o3, g.ndata.pop('o3'))
# test#1: non-0deg nodes
nodes = [1, 2, 9]
_pull_nodes(nodes)
......@@ -517,10 +516,10 @@ def test_pull_multi_fallback():
def test_spmv_3d_feat():
def src_mul_edge_udf(edges):
return {'sum': edges.src['h'] * edges.data['h'].unsqueeze(1).unsqueeze(1)}
return {'sum': edges.src['h'] * F.unsqueeze(F.unsqueeze(edges.data['h'], 1), 1)}
def sum_udf(nodes):
return {'h': nodes.mailbox['sum'].sum(1)}
return {'h': F.sum(nodes.mailbox['sum'], 1)}
n = 100
p = 0.1
......@@ -529,8 +528,8 @@ def test_spmv_3d_feat():
m = g.number_of_edges()
# test#1: v2v with adj data
h = th.randn((n, 5, 5))
e = th.randn((m,))
h = F.randn((n, 5, 5))
e = F.randn((m,))
g.ndata['h'] = h
g.edata['h'] = e
......@@ -540,19 +539,19 @@ def test_spmv_3d_feat():
g.ndata['h'] = h
g.edata['h'] = e
g.update_all(message_func=src_mul_edge_udf, reduce_func=fn.sum('sum', 'h')) # 2
assert U.allclose(g.ndata['h'], ans)
assert F.allclose(g.ndata['h'], ans)
g.ndata['h'] = h
g.edata['h'] = e
g.update_all(message_func=src_mul_edge_udf, reduce_func=sum_udf) # 3
assert U.allclose(g.ndata['h'], ans)
assert F.allclose(g.ndata['h'], ans)
# test#2: e2v
def src_mul_edge_udf(edges):
return {'sum': edges.src['h'] * edges.data['h']}
h = th.randn((n, 5, 5))
e = th.randn((m, 5, 5))
h = F.randn((n, 5, 5))
e = F.randn((m, 5, 5))
g.ndata['h'] = h
g.edata['h'] = e
......@@ -562,12 +561,12 @@ def test_spmv_3d_feat():
g.ndata['h'] = h
g.edata['h'] = e
g.update_all(message_func=src_mul_edge_udf, reduce_func=fn.sum('sum', 'h')) # 2
assert U.allclose(g.ndata['h'], ans)
assert F.allclose(g.ndata['h'], ans)
g.ndata['h'] = h
g.edata['h'] = e
g.update_all(message_func=src_mul_edge_udf, reduce_func=sum_udf) # 3
assert U.allclose(g.ndata['h'], ans)
assert F.allclose(g.ndata['h'], ans)
if __name__ == '__main__':
test_v2v_update_all()
......
import torch as th
from torch.autograd import Variable
import numpy as np
from dgl.graph import DGLGraph
import utils as U
import backend as F
D = 5
......@@ -15,8 +13,11 @@ def generate_graph(grad=False):
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
ncol = Variable(th.randn(10, D), requires_grad=grad)
ecol = Variable(th.randn(17, D), requires_grad=grad)
ncol = F.randn((10, D))
ecol = F.randn((17, D))
if grad:
ncol = F.attach_grad(ncol)
ecol = F.attach_grad(ecol)
g.ndata['h'] = ncol
g.edata['l'] = ecol
return g
......@@ -28,7 +29,7 @@ def test_basics():
nid = [0, 2, 3, 6, 7, 9]
sg = g.subgraph(nid)
eid = {2, 3, 4, 5, 10, 11, 12, 13, 16}
assert set(sg.parent_eid.numpy()) == eid
assert set(F.zerocopy_to_numpy(sg.parent_eid)) == eid
eid = sg.parent_eid
# the subgraph is empty initially
assert len(sg.ndata) == 0
......@@ -38,7 +39,7 @@ def test_basics():
assert len(sg.ndata) == 1
assert len(sg.edata) == 1
sh = sg.ndata['h']
assert U.allclose(h[nid], sh)
assert F.allclose(h[nid], sh)
'''
s, d, eid
0, 1, 0
......@@ -59,11 +60,11 @@ def test_basics():
8, 9, 15 3
9, 0, 16 1
'''
assert U.allclose(l[eid], sg.edata['l'])
assert F.allclose(F.gather_row(l, eid), sg.edata['l'])
# update the node/edge features on the subgraph should NOT
# reflect to the parent graph.
sg.ndata['h'] = th.zeros((6, D))
assert U.allclose(h, g.ndata['h'])
sg.ndata['h'] = F.zeros((6, D))
assert F.allclose(h, g.ndata['h'])
def test_merge():
# FIXME: current impl cannot handle this case!!!
......
import torch as th
import networkx as nx
import numpy as np
import dgl
import dgl.function as fn
import utils as U
import backend as F
D = 5
......@@ -11,24 +10,24 @@ D = 5
def test_line_graph():
N = 5
G = dgl.DGLGraph(nx.star_graph(N))
G.edata['h'] = th.randn((2 * N, D))
G.edata['h'] = F.randn((2 * N, D))
n_edges = G.number_of_edges()
L = G.line_graph(shared=True)
assert L.number_of_nodes() == 2 * N
L.ndata['h'] = th.randn((2 * N, D))
L.ndata['h'] = F.randn((2 * N, D))
# update node features on line graph should reflect to edge features on
# original graph.
u = [0, 0, 2, 3]
v = [1, 2, 0, 0]
eid = G.edge_ids(u, v)
L.nodes[eid].data['h'] = th.zeros((4, D))
assert U.allclose(G.edges[u, v].data['h'], th.zeros((4, D)))
L.nodes[eid].data['h'] = F.zeros((4, D))
assert F.allclose(G.edges[u, v].data['h'], F.zeros((4, D)))
# adding a new node feature on line graph should also reflect to a new
# edge feature on original graph
data = th.randn(n_edges, D)
data = F.randn((n_edges, D))
L.ndata['w'] = data
assert U.allclose(G.edata['w'], data)
assert F.allclose(G.edata['w'], data)
def test_no_backtracking():
N = 5
......@@ -47,15 +46,15 @@ def test_reverse():
g.add_nodes(5)
# The graph need not to be completely connected.
g.add_edges([0, 1, 2], [1, 2, 1])
g.ndata['h'] = th.tensor([[0.], [1.], [2.], [3.], [4.]])
g.edata['h'] = th.tensor([[5.], [6.], [7.]])
g.ndata['h'] = F.tensor([[0.], [1.], [2.], [3.], [4.]])
g.edata['h'] = F.tensor([[5.], [6.], [7.]])
rg = g.reverse()
assert g.is_multigraph == rg.is_multigraph
assert g.number_of_nodes() == rg.number_of_nodes()
assert g.number_of_edges() == rg.number_of_edges()
assert U.allclose(rg.has_edges_between([1, 2, 1], [0, 1, 2]).float(), th.ones(3))
assert F.allclose(F.astype(rg.has_edges_between([1, 2, 1], [0, 1, 2]), F.float32), F.ones((3,)))
assert g.edge_id(0, 1) == rg.edge_id(1, 0)
assert g.edge_id(1, 2) == rg.edge_id(2, 1)
assert g.edge_id(2, 1) == rg.edge_id(1, 2)
......@@ -64,35 +63,27 @@ def test_reverse_shared_frames():
g = dgl.DGLGraph()
g.add_nodes(3)
g.add_edges([0, 1, 2], [1, 2, 1])
g.ndata['h'] = th.tensor([[0.], [1.], [2.]], requires_grad=True)
g.edata['h'] = th.tensor([[3.], [4.], [5.]], requires_grad=True)
g.ndata['h'] = F.tensor([[0.], [1.], [2.]])
g.edata['h'] = F.tensor([[3.], [4.], [5.]])
rg = g.reverse(share_ndata=True, share_edata=True)
assert U.allclose(g.ndata['h'], rg.ndata['h'])
assert U.allclose(g.edata['h'], rg.edata['h'])
assert U.allclose(g.edges[[0, 2], [1, 1]].data['h'],
assert F.allclose(g.ndata['h'], rg.ndata['h'])
assert F.allclose(g.edata['h'], rg.edata['h'])
assert F.allclose(g.edges[[0, 2], [1, 1]].data['h'],
rg.edges[[1, 1], [0, 2]].data['h'])
rg.ndata['h'] = rg.ndata['h'] + 1
assert U.allclose(rg.ndata['h'], g.ndata['h'])
assert F.allclose(rg.ndata['h'], g.ndata['h'])
g.edata['h'] = g.edata['h'] - 1
assert U.allclose(rg.edata['h'], g.edata['h'])
assert F.allclose(rg.edata['h'], g.edata['h'])
src_msg = fn.copy_src(src='h', out='m')
sum_reduce = fn.sum(msg='m', out='h')
rg.update_all(src_msg, sum_reduce)
assert U.allclose(g.ndata['h'], rg.ndata['h'])
assert F.allclose(g.ndata['h'], rg.ndata['h'])
# Grad check
g.ndata['h'].retain_grad()
rg.ndata['h'].retain_grad()
loss_func = th.nn.MSELoss()
target = th.zeros(3, 1)
loss = loss_func(rg.ndata['h'], target)
loss.backward()
assert U.allclose(g.ndata['h'].grad, rg.ndata['h'].grad)
if __name__ == '__main__':
test_line_graph()
......
......@@ -6,15 +6,14 @@ import dgl
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch as th
import utils as U
import backend as F
import itertools
np.random.seed(42)
def toset(x):
return set(x.tolist())
return set(F.zerocopy_to_numpy(x).tolist())
def test_bfs(n=1000):
def _bfs_nx(g_nx, src):
......@@ -66,15 +65,15 @@ def test_topological_nodes(n=1000):
adjmat = g.adjacency_matrix()
def tensor_topo_traverse():
n = g.number_of_nodes()
mask = th.ones((n, 1))
degree = th.spmm(adjmat, mask)
while th.sum(mask) != 0.:
v = (degree == 0.).float()
mask = F.ones((n, 1))
degree = F.spmm(adjmat, mask)
while F.reduce_sum(mask) != 0.:
v = F.astype((degree == 0.), F.float32)
v = v * mask
mask = mask - v
frontier = th.squeeze(th.squeeze(v).nonzero(), 1)
frontier = F.nonzero_1d(F.squeeze(v, 1))
yield frontier
degree -= th.spmm(adjmat, v)
degree -= F.spmm(adjmat, v)
layers_spmv = list(tensor_topo_traverse())
......
import torch as th
def allclose(a, b):
return th.allclose(a, b, rtol=1e-4, atol=1e-4)
def check_fail(fn, *args, **kwargs):
try:
......
import os
os.environ['DGLBACKEND'] = 'mxnet'
import mxnet as mx
import numpy as np
import dgl
import dgl.function as fn
def generate_graph():
g = dgl.DGLGraph()
g.add_nodes(10) # 10 nodes.
h = mx.nd.arange(1, 11, dtype=np.float32)
g.ndata['h'] = h
# create a graph where 0 is the source and 9 is the sink
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
h = mx.nd.array([1., 2., 1., 3., 1., 4., 1., 5., 1., 6.,\
1., 7., 1., 8., 1., 9., 10.])
g.edata['h'] = h
return g
def reducer_both(nodes):
return {'h' : mx.nd.sum(nodes.mailbox['m'], 1)}
def test_copy_src():
# copy_src with both fields
g = generate_graph()
g.register_message_func(fn.copy_src(src='h', out='m'))
g.register_reduce_func(reducer_both)
g.update_all()
assert np.allclose(g.ndata['h'].asnumpy(),
np.array([10., 1., 1., 1., 1., 1., 1., 1., 1., 44.]))
def test_copy_edge():
# copy_edge with both fields
g = generate_graph()
g.register_message_func(fn.copy_edge(edge='h', out='m'))
g.register_reduce_func(reducer_both)
g.update_all()
assert np.allclose(g.ndata['h'].asnumpy(),
np.array([10., 1., 1., 1., 1., 1., 1., 1., 1., 44.]))
def test_src_mul_edge():
# src_mul_edge with all fields
g = generate_graph()
g.register_message_func(fn.src_mul_edge(src='h', edge='h', out='m'))
g.register_reduce_func(reducer_both)
g.update_all()
assert np.allclose(g.ndata['h'].asnumpy(),
np.array([100., 1., 1., 1., 1., 1., 1., 1., 1., 284.]))
if __name__ == '__main__':
test_copy_src()
test_copy_edge()
test_src_mul_edge()
import os
os.environ['DGLBACKEND'] = 'mxnet'
import mxnet as mx
import numpy as np
import scipy.sparse as sp
import dgl
import dgl.function as fn
D = 5
def generate_graph():
g = dgl.DGLGraph()
g.add_nodes(10)
# create a graph where 0 is the source and 9 is the sink
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
g.ndata['f'] = mx.nd.random.normal(shape=(10, D))
g.edata['e'] = mx.nd.random.normal(shape=(17, D))
return g
def test_inplace_recv():
u = mx.nd.array([0, 0, 0, 3, 4, 9], dtype=np.int64)
v = mx.nd.array([1, 2, 3, 9, 9, 0], dtype=np.int64)
def message_func(edges):
return {'m' : edges.src['f'] + edges.dst['f']}
def reduce_func(nodes):
return {'f' : mx.nd.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
def _test(apply_func):
g = generate_graph()
f = g.ndata['f']
# one out place run to get result
g.send((u, v), message_func)
g.recv(mx.nd.array([0,1,2,3,9], dtype=np.int64),
reduce_func, apply_func)
result = g.get_n_repr()['f']
# inplace deg bucket run
v1 = f.copy()
g.ndata['f'] = v1
g.send((u, v), message_func)
g.recv(mx.nd.array([0,1,2,3,9], dtype=np.int64),
reduce_func, apply_func, inplace=True)
r1 = g.get_n_repr()['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace e2v
v1 = f.copy()
g.ndata['f'] = v1
g.send((u, v), message_func)
g.recv(mx.nd.array([0,1,2,3,9], dtype=np.int64),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# test send_and_recv with apply_func
_test(apply_func)
# test send_and_recv without apply_func
_test(None)
def test_inplace_snr():
u = mx.nd.array([0, 0, 0, 3, 4, 9], dtype=np.int64)
v = mx.nd.array([1, 2, 3, 9, 9, 0], dtype=np.int64)
def message_func(edges):
return {'m' : edges.src['f']}
def reduce_func(nodes):
return {'f' : mx.nd.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
def _test(apply_func):
g = generate_graph()
f = g.ndata['f']
# an out place run to get result
g.send_and_recv((u, v), fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func)
result = g.ndata['f']
# inplace deg bucket
v1 = f.copy()
g.ndata['f'] = v1
g.send_and_recv((u, v), message_func, reduce_func, apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace v2v spmv
v1 = f.copy()
g.ndata['f'] = v1
g.send_and_recv((u, v), fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace e2v spmv
v1 = f.copy()
g.ndata['f'] = v1
g.send_and_recv((u, v), message_func,
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# test send_and_recv with apply_func
_test(apply_func)
# test send_and_recv without apply_func
_test(None)
def test_inplace_push():
nodes = mx.nd.array([0, 3, 4, 9], dtype=np.int64)
def message_func(edges):
return {'m' : edges.src['f']}
def reduce_func(nodes):
return {'f' : mx.nd.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
def _test(apply_func):
g = generate_graph()
f = g.ndata['f']
# an out place run to get result
g.push(nodes,
fn.copy_src(src='f', out='m'), fn.sum(msg='m', out='f'), apply_func)
result = g.ndata['f']
# inplace deg bucket
v1 = f.copy()
g.ndata['f'] = v1
g.push(nodes, message_func, reduce_func, apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace v2v spmv
v1 = f.copy()
g.ndata['f'] = v1
g.push(nodes, fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace e2v spmv
v1 = f.copy()
g.ndata['f'] = v1
g.push(nodes,
message_func, fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# test send_and_recv with apply_func
_test(apply_func)
# test send_and_recv without apply_func
_test(None)
def test_inplace_pull():
nodes = mx.nd.array([1, 2, 3, 9], dtype=np.int64)
def message_func(edges):
return {'m' : edges.src['f']}
def reduce_func(nodes):
return {'f' : mx.nd.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'f' : 2 * nodes.data['f']}
def _test(apply_func):
g = generate_graph()
f = g.ndata['f']
# an out place run to get result
g.pull(nodes,
fn.copy_src(src='f', out='m'), fn.sum(msg='m', out='f'), apply_func)
result = g.ndata['f']
# inplace deg bucket
v1 = f.copy()
g.ndata['f'] = v1
g.pull(nodes, message_func, reduce_func, apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace v2v spmv
v1 = f.copy()
g.ndata['f'] = v1
g.pull(nodes, fn.copy_src(src='f', out='m'),
fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# inplace e2v spmv
v1 = f.copy()
g.ndata['f'] = v1
g.pull(nodes,
message_func, fn.sum(msg='m', out='f'), apply_func, inplace=True)
r1 = g.ndata['f']
# check result
assert np.allclose(r1.asnumpy(), result.asnumpy())
# check inplace
assert np.allclose(v1.asnumpy(), r1.asnumpy())
# test send_and_recv with apply_func
_test(apply_func)
# test send_and_recv without apply_func
_test(None)
def test_inplace_apply():
def apply_node_func(nodes):
return {'f': nodes.data['f'] * 2}
def apply_edge_func(edges):
return {'e': edges.data['e'] * 2}
g = generate_graph()
nodes = [1, 2, 3, 9]
nf = g.ndata['f']
# out place run
g.apply_nodes(apply_node_func, nodes)
new_nf = g.ndata['f']
# in place run
g.ndata['f'] = nf
g.apply_nodes(apply_node_func, nodes, inplace=True)
# check results correct and in place
assert np.allclose(nf.asnumpy(), new_nf.asnumpy())
# test apply all nodes, should not be done in place
g.ndata['f'] = nf
g.apply_nodes(apply_node_func, inplace=True)
assert np.allclose(nf.asnumpy(), g.ndata['f'].asnumpy()) == False
edges = [3, 5, 7, 10]
ef = g.edata['e']
# out place run
g.apply_edges(apply_edge_func, edges)
new_ef = g.edata['e']
# in place run
g.edata['e'] = ef
g.apply_edges(apply_edge_func, edges, inplace=True)
g.edata['e'] = ef
assert np.allclose(ef.asnumpy(), new_ef.asnumpy())
# test apply all edges, should not be done in place
g.edata['e'] == ef
g.apply_edges(apply_edge_func, inplace=True)
assert np.allclose(ef.asnumpy(), g.edata['e'].asnumpy()) == False
if __name__ == '__main__':
test_inplace_recv()
test_inplace_snr()
test_inplace_push()
test_inplace_pull()
test_inplace_apply()
import os
os.environ['DGLBACKEND'] = 'mxnet'
import dgl
import networkx as nx
import numpy as np
import mxnet as mx
def mfunc(edges):
return {'m' : edges.src['x']}
def rfunc(nodes):
msg = mx.nd.sum(nodes.mailbox['m'], 1)
return {'x' : nodes.data['x'] + msg}
def test_prop_nodes_bfs():
g = dgl.DGLGraph(nx.path_graph(5))
g.ndata['x'] = mx.nd.ones(shape=(5, 2))
g.register_message_func(mfunc)
g.register_reduce_func(rfunc)
dgl.prop_nodes_bfs(g, 0)
# pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
assert np.allclose(g.ndata['x'].asnumpy(),
np.array([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
def test_prop_edges_dfs():
g = dgl.DGLGraph(nx.path_graph(5))
g.register_message_func(mfunc)
g.register_reduce_func(rfunc)
g.ndata['x'] = mx.nd.ones(shape=(5, 2))
dgl.prop_edges_dfs(g, 0)
# snr using dfs results in a cumsum
assert np.allclose(g.ndata['x'].asnumpy(),
np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]]))
g.ndata['x'] = mx.nd.ones(shape=(5, 2))
dgl.prop_edges_dfs(g, 0, has_reverse_edge=True)
# result is cumsum[i] + cumsum[i-1]
assert np.allclose(g.ndata['x'].asnumpy(),
np.array([[1., 1.], [3., 3.], [5., 5.], [7., 7.], [9., 9.]]))
g.ndata['x'] = mx.nd.ones(shape=(5, 2))
dgl.prop_edges_dfs(g, 0, has_nontree_edge=True)
# result is cumsum[i] + cumsum[i+1]
assert np.allclose(g.ndata['x'].asnumpy(),
np.array([[3., 3.], [5., 5.], [7., 7.], [9., 9.], [5., 5.]]))
def test_prop_nodes_topo():
# bi-directional chain
g = dgl.DGLGraph(nx.path_graph(5))
# tree
tree = dgl.DGLGraph()
tree.add_nodes(5)
tree.add_edge(1, 0)
tree.add_edge(2, 0)
tree.add_edge(3, 2)
tree.add_edge(4, 2)
tree.register_message_func(mfunc)
tree.register_reduce_func(rfunc)
# init node feature data
tree.ndata['x'] = mx.nd.zeros(shape=(5, 2))
# set all leaf nodes to be ones
tree.nodes[[1, 3, 4]].data['x'] = mx.nd.ones(shape=(3, 2))
dgl.prop_nodes_topo(tree)
# root node get the sum
assert np.allclose(tree.nodes[0].data['x'].asnumpy(), np.array([[3., 3.]]))
if __name__ == '__main__':
test_prop_nodes_bfs()
test_prop_edges_dfs()
test_prop_nodes_topo()
import os
os.environ['DGLBACKEND'] = 'mxnet'
import random
import sys
import time
import dgl
import networkx as nx
import numpy as np
import scipy.sparse as sp
import mxnet as mx
import itertools
np.random.seed(42)
def toset(x):
return set(x.asnumpy().tolist())
def test_bfs(n=1000):
def _bfs_nx(g_nx, src):
edges = nx.bfs_edges(g_nx, src)
layers_nx = [set([src])]
edges_nx = []
frontier = set()
edge_frontier = set()
for u, v in edges:
if u in layers_nx[-1]:
frontier.add(v)
edge_frontier.add(g.edge_id(u, v))
else:
layers_nx.append(frontier)
edges_nx.append(edge_frontier)
frontier = set([v])
edge_frontier = set([g.edge_id(u, v)])
layers_nx.append(frontier)
edges_nx.append(edge_frontier)
return layers_nx, edges_nx
g = dgl.DGLGraph()
a = sp.random(n, n, 10 / n, data_rvs=lambda n: np.ones(n))
g.from_scipy_sparse_matrix(a)
g_nx = g.to_networkx()
src = random.choice(range(n))
layers_nx, _ = _bfs_nx(g_nx, src)
layers_dgl = dgl.bfs_nodes_generator(g, src)
assert len(layers_dgl) == len(layers_nx)
assert all(toset(x) == y for x, y in zip(layers_dgl, layers_nx))
g_nx = nx.random_tree(n, seed=42)
g = dgl.DGLGraph()
g.from_networkx(g_nx)
src = 0
_, edges_nx = _bfs_nx(g_nx, src)
edges_dgl = dgl.bfs_edges_generator(g, src)
assert len(edges_dgl) == len(edges_nx)
assert all(toset(x) == y for x, y in zip(edges_dgl, edges_nx))
def test_topological_nodes(n=1000):
g = dgl.DGLGraph()
a = sp.random(n, n, 10 / n, data_rvs=lambda n: np.ones(n))
b = sp.tril(a, -1).tocoo()
g.from_scipy_sparse_matrix(b)
layers_dgl = dgl.topological_nodes_generator(g)
adjmat = g.adjacency_matrix()
def tensor_topo_traverse():
n = g.number_of_nodes()
mask = mx.nd.ones(shape=(n, 1))
degree = mx.nd.dot(adjmat, mask)
while mx.nd.sum(mask) != 0.:
v = (degree == 0.).astype(np.float32)
v = v * mask
mask = mask - v
tmp = np.nonzero(mx.nd.squeeze(v).asnumpy())[0]
frontier = mx.nd.array(tmp, dtype=tmp.dtype)
yield frontier
degree -= mx.nd.dot(adjmat, v)
layers_spmv = list(tensor_topo_traverse())
assert len(layers_dgl) == len(layers_spmv)
assert all(toset(x) == toset(y) for x, y in zip(layers_dgl, layers_spmv))
DFS_LABEL_NAMES = ['forward', 'reverse', 'nontree']
def test_dfs_labeled_edges(n=1000, example=False):
dgl_g = dgl.DGLGraph()
dgl_g.add_nodes(6)
dgl_g.add_edges([0, 1, 0, 3, 3], [1, 2, 2, 4, 5])
dgl_edges, dgl_labels = dgl.dfs_labeled_edges_generator(
dgl_g, [0, 3], has_reverse_edge=True, has_nontree_edge=True)
dgl_edges = [toset(t) for t in dgl_edges]
dgl_labels = [toset(t) for t in dgl_labels]
g1_solutions = [
# edges labels
[[0, 1, 1, 0, 2], [0, 0, 1, 1, 2]],
[[2, 2, 0, 1, 0], [0, 1, 0, 2, 1]],
]
g2_solutions = [
# edges labels
[[3, 3, 4, 4], [0, 1, 0, 1]],
[[4, 4, 3, 3], [0, 1, 0, 1]],
]
def combine_frontiers(sol):
es, ls = zip(*sol)
es = [set(i for i in t if i is not None)
for t in itertools.zip_longest(*es)]
ls = [set(i for i in t if i is not None)
for t in itertools.zip_longest(*ls)]
return es, ls
for sol_set in itertools.product(g1_solutions, g2_solutions):
es, ls = combine_frontiers(sol_set)
if es == dgl_edges and ls == dgl_labels:
break
else:
assert False
if __name__ == '__main__':
test_bfs()
test_topological_nodes()
test_dfs_labeled_edges()
"""
Placeholder file for framework-specific test
"""
......@@ -8,7 +8,9 @@ IF x%1x==xx (
SET BACKEND=%1
)
python -m nose -v --with-xunit tests || EXIT /B 1
SET PYTHONPATH=tests;!PYTHONPATH!
python -m nose -v --with-xunit tests\!BACKEND! || EXIT /B 1
python -m nose -v --with-xunit tests\graph_index || EXIT /B 1
python -m nose -v --with-xunit tests\compute || EXIT /B 1
EXIT /B
......@@ -15,7 +15,8 @@ if [ $# -ne 1 ]; then
fi
BACKEND=$1
export PYTHONPATH=tests:$PYTHONPATH
python3 -m nose -v --with-xunit tests || fail "tests"
python3 -m nose -v --with-xunit tests/$BACKEND || fail "backend"
python3 -m nose -v --with-xunit tests/$BACKEND || fail "backend-specific"
python3 -m nose -v --with-xunit tests/graph_index || fail "graph_index"
python3 -m nose -v --with-xunit tests/compute || fail "compute"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment