test_propagate.py 2.98 KB
Newer Older
1
import unittest
2
3
4

import backend as F
import networkx as nx
5
import utils as U
nv-dlasalle's avatar
nv-dlasalle committed
6
from test_utils import parametrize_idtype
7

8
9
10
import dgl


11
12
13
14
def create_graph(idtype):
    g = dgl.from_networkx(nx.path_graph(5), idtype=idtype, device=F.ctx())
    return g

15

16
def mfunc(edges):
17
18
    return {"m": edges.src["x"]}

19
20

def rfunc(nodes):
21
22
23
    msg = F.sum(nodes.mailbox["m"], 1)
    return {"x": nodes.data["x"] + msg}

24

25
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
nv-dlasalle's avatar
nv-dlasalle committed
26
@parametrize_idtype
27
def test_prop_nodes_bfs(idtype):
28
    g = create_graph(idtype)
29
30
31
32
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_nodes_bfs(
        g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
    )
33
    # pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
34
35
36
37
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[2.0, 2.0], [4.0, 4.0], [6.0, 6.0], [8.0, 8.0], [9.0, 9.0]]),
    )
38

39
40

@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
nv-dlasalle's avatar
nv-dlasalle committed
41
@parametrize_idtype
42
def test_prop_edges_dfs(idtype):
43
    g = create_graph(idtype)
44
45
46
47
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_edges_dfs(
        g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
    )
48
    # snr using dfs results in a cumsum
49
50
51
52
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]),
    )
53

54
55
56
57
58
59
60
61
62
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_edges_dfs(
        g,
        0,
        has_reverse_edge=True,
        message_func=mfunc,
        reduce_func=rfunc,
        apply_node_func=None,
    )
63
    # result is cumsum[i] + cumsum[i-1]
64
65
66
67
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[1.0, 1.0], [3.0, 3.0], [5.0, 5.0], [7.0, 7.0], [9.0, 9.0]]),
    )
68

69
70
71
72
73
74
75
76
77
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_edges_dfs(
        g,
        0,
        has_nontree_edge=True,
        message_func=mfunc,
        reduce_func=rfunc,
        apply_node_func=None,
    )
78
    # result is cumsum[i] + cumsum[i+1]
79
80
81
82
83
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[3.0, 3.0], [5.0, 5.0], [7.0, 7.0], [9.0, 9.0], [5.0, 5.0]]),
    )

84

85
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
nv-dlasalle's avatar
nv-dlasalle committed
86
@parametrize_idtype
87
def test_prop_nodes_topo(idtype):
88
    # bi-directional chain
89
    g = create_graph(idtype)
90
91
92
93
94
    assert U.check_fail(dgl.prop_nodes_topo, g)  # has loop

    # tree
    tree = dgl.DGLGraph()
    tree.add_nodes(5)
95
96
97
98
    tree.add_edges(1, 0)
    tree.add_edges(2, 0)
    tree.add_edges(3, 2)
    tree.add_edges(4, 2)
99
    tree = dgl.graph(tree.edges())
100
    # init node feature data
101
    tree.ndata["x"] = F.zeros((5, 2))
102
    # set all leaf nodes to be ones
103
104
105
106
    tree.nodes[[1, 3, 4]].data["x"] = F.ones((3, 2))
    dgl.prop_nodes_topo(
        tree, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
    )
107
    # root node get the sum
108
109
    assert F.allclose(tree.nodes[0].data["x"], F.tensor([[3.0, 3.0]]))

110

111
if __name__ == "__main__":
112
113
114
    test_prop_nodes_bfs()
    test_prop_edges_dfs()
    test_prop_nodes_topo()