test_propagate.py 2.97 KB
Newer Older
1
import unittest
2
3

import backend as F
4
5

import dgl
6
import networkx as nx
7
from utils import check_fail, parametrize_idtype
8

9

10
11
12
13
def create_graph(idtype):
    g = dgl.from_networkx(nx.path_graph(5), idtype=idtype, device=F.ctx())
    return g

14

15
def mfunc(edges):
16
17
    return {"m": edges.src["x"]}

18
19

def rfunc(nodes):
20
21
22
    msg = F.sum(nodes.mailbox["m"], 1)
    return {"x": nodes.data["x"] + msg}

23

24
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
nv-dlasalle's avatar
nv-dlasalle committed
25
@parametrize_idtype
26
def test_prop_nodes_bfs(idtype):
27
    g = create_graph(idtype)
28
29
30
31
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_nodes_bfs(
        g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
    )
32
    # pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
33
34
35
36
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[2.0, 2.0], [4.0, 4.0], [6.0, 6.0], [8.0, 8.0], [9.0, 9.0]]),
    )
37

38
39

@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
nv-dlasalle's avatar
nv-dlasalle committed
40
@parametrize_idtype
41
def test_prop_edges_dfs(idtype):
42
    g = create_graph(idtype)
43
44
45
46
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_edges_dfs(
        g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
    )
47
    # snr using dfs results in a cumsum
48
49
50
51
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]),
    )
52

53
54
55
56
57
58
59
60
61
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_edges_dfs(
        g,
        0,
        has_reverse_edge=True,
        message_func=mfunc,
        reduce_func=rfunc,
        apply_node_func=None,
    )
62
    # result is cumsum[i] + cumsum[i-1]
63
64
65
66
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[1.0, 1.0], [3.0, 3.0], [5.0, 5.0], [7.0, 7.0], [9.0, 9.0]]),
    )
67

68
69
70
71
72
73
74
75
76
    g.ndata["x"] = F.ones((5, 2))
    dgl.prop_edges_dfs(
        g,
        0,
        has_nontree_edge=True,
        message_func=mfunc,
        reduce_func=rfunc,
        apply_node_func=None,
    )
77
    # result is cumsum[i] + cumsum[i+1]
78
79
80
81
82
    assert F.allclose(
        g.ndata["x"],
        F.tensor([[3.0, 3.0], [5.0, 5.0], [7.0, 7.0], [9.0, 9.0], [5.0, 5.0]]),
    )

83

84
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
nv-dlasalle's avatar
nv-dlasalle committed
85
@parametrize_idtype
86
def test_prop_nodes_topo(idtype):
87
    # bi-directional chain
88
    g = create_graph(idtype)
89
    assert check_fail(dgl.prop_nodes_topo, g)  # has loop
90
91
92
93

    # tree
    tree = dgl.DGLGraph()
    tree.add_nodes(5)
94
95
96
97
    tree.add_edges(1, 0)
    tree.add_edges(2, 0)
    tree.add_edges(3, 2)
    tree.add_edges(4, 2)
98
    tree = dgl.graph(tree.edges())
99
    # init node feature data
100
    tree.ndata["x"] = F.zeros((5, 2))
101
    # set all leaf nodes to be ones
102
103
104
105
    tree.nodes[[1, 3, 4]].data["x"] = F.ones((3, 2))
    dgl.prop_nodes_topo(
        tree, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
    )
106
    # root node get the sum
107
108
    assert F.allclose(tree.nodes[0].data["x"], F.tensor([[3.0, 3.0]]))

109

110
if __name__ == "__main__":
111
112
113
    test_prop_nodes_bfs()
    test_prop_edges_dfs()
    test_prop_nodes_topo()