test_shared_mem_store.py 7.98 KB
Newer Older
1
import dgl
2
3
import sys
import random
4
5
import time
import numpy as np
6
from numpy.testing import assert_array_equal
Da Zheng's avatar
Da Zheng committed
7
from multiprocessing import Process, Manager
8
9
from scipy import sparse as spsp
import backend as F
10
import unittest
11
import dgl.function as fn
Da Zheng's avatar
Da Zheng committed
12
import traceback
13
from numpy.testing import assert_almost_equal
14
15
16

num_nodes = 100
num_edges = int(num_nodes * num_nodes * 0.1)
17
18
rand_port = random.randint(5000, 8000)
print('run graph store with port ' + str(rand_port), file=sys.stderr)
19

20
21
22
def check_array_shared_memory(g, worker_id, arrays):
    if worker_id == 0:
        for i, arr in enumerate(arrays):
23
            arr[0] = i + 10
24
        g._sync_barrier(60)
25
    else:
26
        g._sync_barrier(60)
27
        for i, arr in enumerate(arrays):
28
            assert_almost_equal(F.asnumpy(arr[0]), i + 10)
29

30
def create_graph_store(graph_name):
Da Zheng's avatar
Da Zheng committed
31
32
33
34
    for _ in range(10):
        try:
            g = dgl.contrib.graph_store.create_graph_from_store(graph_name, "shared_mem",
                                                                port=rand_port)
35
36
37
            return g
        except ConnectionError as e:
            traceback.print_exc()
Da Zheng's avatar
Da Zheng committed
38
            time.sleep(1)
39
    return None
40

Da Zheng's avatar
Da Zheng committed
41
def check_init_func(worker_id, graph_name, return_dict):
42
43
44
45
46
47
    time.sleep(3)
    print("worker starts")
    np.random.seed(0)
    csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)

    # Verify the graph structure loaded from the shared memory.
Da Zheng's avatar
Da Zheng committed
48
    try:
49
50
51
52
53
54
55
        g = create_graph_store(graph_name)
        if g is None:
            return_dict[worker_id] = -1
            return

        src, dst = g.all_edges()
        coo = csr.tocoo()
56
57
58
59
60
61
        assert_array_equal(F.asnumpy(dst), coo.row)
        assert_array_equal(F.asnumpy(src), coo.col)
        feat = F.asnumpy(g.nodes[0].data['feat'])
        assert_array_equal(np.squeeze(feat), np.arange(10, dtype=feat.dtype))
        feat = F.asnumpy(g.edges[0].data['feat'])
        assert_array_equal(np.squeeze(feat), np.arange(10, dtype=feat.dtype))
62
63
64
65
66
67
        g.init_ndata('test4', (g.number_of_nodes(), 10), 'float32')
        g.init_edata('test4', (g.number_of_edges(), 10), 'float32')
        g._sync_barrier(60)
        check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])

        data = g.nodes[:].data['test4']
68
69
        g.set_n_repr({'test4': F.ones((1, 10)) * 10}, u=[0])
        assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.nodes[0].data['test4'])))
70
71

        data = g.edges[:].data['test4']
72
73
        g.set_e_repr({'test4': F.ones((1, 10)) * 20}, edges=[0])
        assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.edges[0].data['test4'])))
74
75

        g.destroy()
Da Zheng's avatar
Da Zheng committed
76
77
78
        return_dict[worker_id] = 0
    except Exception as e:
        return_dict[worker_id] = -1
79
80
        g.destroy()
        print(e, file=sys.stderr)
Da Zheng's avatar
Da Zheng committed
81
82
        traceback.print_exc()

83
def server_func(num_workers, graph_name):
84
85
86
87
    print("server starts")
    np.random.seed(0)
    csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)

88
89
    g = dgl.contrib.graph_store.create_graph_store_server(csr, graph_name, "shared_mem", num_workers,
                                                          False, edge_dir="in", port=rand_port)
90
91
    assert num_nodes == g._graph.number_of_nodes()
    assert num_edges == g._graph.number_of_edges()
92
93
94
95
    nfeat = np.arange(0, num_nodes * 10).astype('float32').reshape((num_nodes, 10))
    efeat = np.arange(0, num_edges * 10).astype('float32').reshape((num_edges, 10))
    g.ndata['feat'] = F.tensor(nfeat)
    g.edata['feat'] = F.tensor(efeat)
96
97
    g.run()

98
def test_init():
Da Zheng's avatar
Da Zheng committed
99
100
    manager = Manager()
    return_dict = manager.dict()
101
    serv_p = Process(target=server_func, args=(2, 'test_graph1'))
Da Zheng's avatar
Da Zheng committed
102
103
    work_p1 = Process(target=check_init_func, args=(0, 'test_graph1', return_dict))
    work_p2 = Process(target=check_init_func, args=(1, 'test_graph1', return_dict))
104
105
106
107
108
109
    serv_p.start()
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
Da Zheng's avatar
Da Zheng committed
110
111
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id
112
113


114
def check_compute_func(worker_id, graph_name, return_dict):
115
116
    time.sleep(3)
    print("worker starts")
Da Zheng's avatar
Da Zheng committed
117
    try:
118
119
120
121
122
123
124
125
126
127
128
        g = create_graph_store(graph_name)
        if g is None:
            return_dict[worker_id] = -1
            return

        g._sync_barrier(60)
        in_feats = g.nodes[0].data['feat'].shape[1]

        # Test update all.
        g.update_all(fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='preprocess'))
        adj = g.adjacency_matrix()
129
130
        tmp = F.spmm(adj, g.nodes[:].data['feat'])
        assert_almost_equal(F.asnumpy(g.nodes[:].data['preprocess']), F.asnumpy(tmp))
131
132
133
134
135
        g._sync_barrier(60)
        check_array_shared_memory(g, worker_id, [g.nodes[:].data['preprocess']])

        # Test apply nodes.
        data = g.nodes[:].data['feat']
136
137
        g.apply_nodes(func=lambda nodes: {'feat': F.ones((1, in_feats)) * 10}, v=0)
        assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.nodes[0].data['feat'])))
138
139
140

        # Test apply edges.
        data = g.edges[:].data['feat']
141
142
        g.apply_edges(func=lambda edges: {'feat': F.ones((1, in_feats)) * 10}, edges=0)
        assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.edges[0].data['feat'])))
143
144
145
146
147

        g.init_ndata('tmp', (g.number_of_nodes(), 10), 'float32')
        data = g.nodes[:].data['tmp']
        # Test pull
        g.pull(1, fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='tmp'))
148
        assert_almost_equal(F.asnumpy(data[1]), np.squeeze(F.asnumpy(g.nodes[1].data['preprocess'])))
149
150
151
152

        # Test send_and_recv
        in_edges = g.in_edges(v=2)
        g.send_and_recv(in_edges, fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='tmp'))
153
        assert_almost_equal(F.asnumpy(data[2]), np.squeeze(F.asnumpy(g.nodes[2].data['preprocess'])))
154
155

        g.destroy()
Da Zheng's avatar
Da Zheng committed
156
157
158
        return_dict[worker_id] = 0
    except Exception as e:
        return_dict[worker_id] = -1
159
160
        g.destroy()
        print(e, file=sys.stderr)
Da Zheng's avatar
Da Zheng committed
161
162
        traceback.print_exc()

163
def test_compute():
Da Zheng's avatar
Da Zheng committed
164
165
    manager = Manager()
    return_dict = manager.dict()
166
    serv_p = Process(target=server_func, args=(2, 'test_graph3'))
Da Zheng's avatar
Da Zheng committed
167
168
    work_p1 = Process(target=check_compute_func, args=(0, 'test_graph3', return_dict))
    work_p2 = Process(target=check_compute_func, args=(1, 'test_graph3', return_dict))
169
170
171
172
173
174
    serv_p.start()
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
Da Zheng's avatar
Da Zheng committed
175
176
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id
177

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def check_sync_barrier(worker_id, graph_name, return_dict):
    time.sleep(3)
    print("worker starts")
    try:
        g = create_graph_store(graph_name)
        if g is None:
            return_dict[worker_id] = -1
            return

        if worker_id == 1:
            g.destroy()
            return_dict[worker_id] = 0
            return

        start = time.time()
        try:
            g._sync_barrier(10)
        except TimeoutError as e:
            # this is very loose.
            print("timeout: " + str(abs(time.time() - start)), file=sys.stderr)
            assert 5 < abs(time.time() - start) < 15
        g.destroy()
        return_dict[worker_id] = 0
    except Exception as e:
        return_dict[worker_id] = -1
        g.destroy()
        print(e, file=sys.stderr)
        traceback.print_exc()


def test_sync_barrier():
    manager = Manager()
    return_dict = manager.dict()
    serv_p = Process(target=server_func, args=(2, 'test_graph4'))
    work_p1 = Process(target=check_sync_barrier, args=(0, 'test_graph4', return_dict))
    work_p2 = Process(target=check_sync_barrier, args=(1, 'test_graph4', return_dict))
    serv_p.start()
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id

223
if __name__ == '__main__':
224
    test_init()
225
    test_sync_barrier()
226
    test_compute()