"docs/source/vscode:/vscode.git/clone" did not exist on "d427f36510798863a3953ba4ebf6ab364717bbbb"
test_nn.py 20.1 KB
Newer Older
1
2
3
4
import torch as th
import networkx as nx
import dgl
import dgl.nn.pytorch as nn
5
import dgl.function as fn
6
import backend as F
7
8
from copy import deepcopy

9
10
11
import numpy as np
import scipy as sp

12
13
14
15
16
17
18
def _AXWb(A, X, W, b):
    X = th.matmul(X, W)
    Y = th.matmul(A, X.view(X.shape[0], -1)).view_as(X)
    return Y + b

def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
19
20
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
21
22

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
23
    conv = conv.to(ctx)
24
25
    print(conv)
    # test#1: basic
26
    h0 = F.ones((3, 5))
27
    h1 = conv(g, h0)
28
29
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
30
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
31
    # test#2: more-dim
32
    h0 = F.ones((3, 5, 5))
33
    h1 = conv(g, h0)
34
35
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
36
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
37
38

    conv = nn.GraphConv(5, 2)
39
    conv = conv.to(ctx)
40
    # test#3: basic
41
    h0 = F.ones((3, 5))
42
    h1 = conv(g, h0)
43
44
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
45
    # test#4: basic
46
    h0 = F.ones((3, 5, 5))
47
    h1 = conv(g, h0)
48
49
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
50
51

    conv = nn.GraphConv(5, 2)
52
    conv = conv.to(ctx)
53
    # test#3: basic
54
    h0 = F.ones((3, 5))
55
    h1 = conv(g, h0)
56
57
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
58
    # test#4: basic
59
    h0 = F.ones((3, 5, 5))
60
    h1 = conv(g, h0)
61
62
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
63
64
65
66
67

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
68
    assert not F.allclose(old_weight, new_weight)
69

70
71
72
73
74
75
76
77
78
79
80
81
def _S2AXWb(A, N, X, W, b):
    X1 = X * N
    X1 = th.matmul(A, X1.view(X1.shape[0], -1))
    X1 = X1 * N
    X2 = X1 * N
    X2 = th.matmul(A, X2.view(X2.shape[0], -1))
    X2 = X2 * N
    X = th.cat([X, X1, X2], dim=-1)
    Y = th.matmul(X, W.rot90())

    return Y + b

82
def test_tagconv():
83
84
85
86
87
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
    norm = th.pow(g.in_degrees().float(), -0.5)

88
    conv = nn.TAGConv(5, 2, bias=True)
89
    conv = conv.to(ctx)
90
91
92
93
    print(conv)

    # test#1: basic
    h0 = F.ones((3, 5))
94
    h1 = conv(g, h0)
95
96
97
98
99
100
101
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    shp = norm.shape + (1,) * (h0.dim() - 1)
    norm = th.reshape(norm, shp).to(ctx)

    assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight, conv.lin.bias))

102
    conv = nn.TAGConv(5, 2)
103
    conv = conv.to(ctx)
104

105
106
    # test#2: basic
    h0 = F.ones((3, 5))
107
    h1 = conv(g, h0)
108
    assert h1.shape[-1] == 2
109

110
    # test reset_parameters
111
112
113
114
115
    old_weight = deepcopy(conv.lin.weight.data)
    conv.reset_parameters()
    new_weight = conv.lin.weight.data
    assert not F.allclose(old_weight, new_weight)

116
def test_set2set():
117
    ctx = F.ctx()
118
119
120
    g = dgl.DGLGraph(nx.path_graph(10))

    s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers
121
    s2s = s2s.to(ctx)
122
123
124
    print(s2s)

    # test#1: basic
125
    h0 = F.randn((g.number_of_nodes(), 5))
126
    h1 = s2s(g, h0)
127
    assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2
128
129
130
131
132

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(11))
    g2 = dgl.DGLGraph(nx.path_graph(5))
    bg = dgl.batch([g, g1, g2])
133
    h0 = F.randn((bg.number_of_nodes(), 5))
134
    h1 = s2s(bg, h0)
135
136
137
    assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.dim() == 2

def test_glob_att_pool():
138
    ctx = F.ctx()
139
140
141
    g = dgl.DGLGraph(nx.path_graph(10))

    gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10))
142
    gap = gap.to(ctx)
143
144
145
    print(gap)

    # test#1: basic
146
    h0 = F.randn((g.number_of_nodes(), 5))
147
    h1 = gap(g, h0)
148
    assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2
149
150
151

    # test#2: batched graph
    bg = dgl.batch([g, g, g, g])
152
    h0 = F.randn((bg.number_of_nodes(), 5))
153
    h1 = gap(bg, h0)
154
155
156
    assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2

def test_simple_pool():
157
    ctx = F.ctx()
158
159
160
161
162
163
164
165
166
    g = dgl.DGLGraph(nx.path_graph(15))

    sum_pool = nn.SumPooling()
    avg_pool = nn.AvgPooling()
    max_pool = nn.MaxPooling()
    sort_pool = nn.SortPooling(10) # k = 10
    print(sum_pool, avg_pool, max_pool, sort_pool)

    # test#1: basic
167
    h0 = F.randn((g.number_of_nodes(), 5))
168
169
170
171
    sum_pool = sum_pool.to(ctx)
    avg_pool = avg_pool.to(ctx)
    max_pool = max_pool.to(ctx)
    sort_pool = sort_pool.to(ctx)
172
    h1 = sum_pool(g, h0)
173
    assert F.allclose(F.squeeze(h1, 0), F.sum(h0, 0))
174
    h1 = avg_pool(g, h0)
175
    assert F.allclose(F.squeeze(h1, 0), F.mean(h0, 0))
176
    h1 = max_pool(g, h0)
177
    assert F.allclose(F.squeeze(h1, 0), F.max(h0, 0))
178
    h1 = sort_pool(g, h0)
179
    assert h1.shape[0] == 1 and h1.shape[1] == 10 * 5 and h1.dim() == 2
180
181
182
183

    # test#2: batched graph
    g_ = dgl.DGLGraph(nx.path_graph(5))
    bg = dgl.batch([g, g_, g, g_, g])
184
    h0 = F.randn((bg.number_of_nodes(), 5))
185
    h1 = sum_pool(bg, h0)
186
187
188
189
190
191
    truth = th.stack([F.sum(h0[:15], 0),
                      F.sum(h0[15:20], 0),
                      F.sum(h0[20:35], 0),
                      F.sum(h0[35:40], 0),
                      F.sum(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
192

193
    h1 = avg_pool(bg, h0)
194
195
196
197
198
199
    truth = th.stack([F.mean(h0[:15], 0),
                      F.mean(h0[15:20], 0),
                      F.mean(h0[20:35], 0),
                      F.mean(h0[35:40], 0),
                      F.mean(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
200

201
    h1 = max_pool(bg, h0)
202
203
204
205
206
207
    truth = th.stack([F.max(h0[:15], 0),
                      F.max(h0[15:20], 0),
                      F.max(h0[20:35], 0),
                      F.max(h0[35:40], 0),
                      F.max(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
208

209
    h1 = sort_pool(bg, h0)
210
211
212
    assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.dim() == 2

def test_set_trans():
213
    ctx = F.ctx()
214
215
216
217
218
    g = dgl.DGLGraph(nx.path_graph(15))

    st_enc_0 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'sab')
    st_enc_1 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'isab', 3)
    st_dec = nn.SetTransformerDecoder(50, 5, 10, 100, 2, 4)
219
220
221
    st_enc_0 = st_enc_0.to(ctx)
    st_enc_1 = st_enc_1.to(ctx)
    st_dec = st_dec.to(ctx)
222
223
224
    print(st_enc_0, st_enc_1, st_dec)

    # test#1: basic
225
    h0 = F.randn((g.number_of_nodes(), 50))
226
    h1 = st_enc_0(g, h0)
227
    assert h1.shape == h0.shape
228
    h1 = st_enc_1(g, h0)
229
    assert h1.shape == h0.shape
230
    h2 = st_dec(g, h1)
231
    assert h2.shape[0] == 1 and h2.shape[1] == 200 and h2.dim() == 2
232
233
234
235
236

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(5))
    g2 = dgl.DGLGraph(nx.path_graph(10))
    bg = dgl.batch([g, g1, g2])
237
    h0 = F.randn((bg.number_of_nodes(), 50))
238
    h1 = st_enc_0(bg, h0)
239
    assert h1.shape == h0.shape
240
    h1 = st_enc_1(bg, h0)
241
242
    assert h1.shape == h0.shape

243
    h2 = st_dec(bg, h1)
244
245
    assert h2.shape[0] == 3 and h2.shape[1] == 200 and h2.dim() == 2

246
247
248
249
250
251
def uniform_attention(g, shape):
    a = th.ones(shape)
    target_shape = (g.number_of_edges(),) + (1,) * (len(shape) - 1)
    return a / g.in_degrees(g.edges()[1]).view(target_shape).float()

def test_edge_softmax():
252
253
    # Basic
    g = dgl.DGLGraph(nx.path_graph(3))
254
    edata = F.ones((g.number_of_edges(), 1))
255
    a = nn.edge_softmax(g, edata)
256
257
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
258
    assert F.allclose(a, uniform_attention(g, a.shape))
259

260
    # Test higher dimension case
261
    edata = F.ones((g.number_of_edges(), 3, 1))
262
    a = nn.edge_softmax(g, edata)
263
264
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
265
    assert F.allclose(a, uniform_attention(g, a.shape))
266

267
268
269
270
271
272
273
274
    # Test both forward and backward with PyTorch built-in softmax.
    g = dgl.DGLGraph()
    g.add_nodes(30)
    # build a complete graph
    for i in range(30):
        for j in range(30):
            g.add_edge(i, j)

275
    score = F.randn((900, 1))
276
    score.requires_grad_()
277
278
    grad = F.randn((900, 1))
    y = F.softmax(score.view(30, 30), dim=0).view(-1, 1)
279
280
281
282
    y.backward(grad)
    grad_score = score.grad
    score.grad.zero_()
    y_dgl = nn.edge_softmax(g, score)
283
284
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
285
    # check forward
286
    assert F.allclose(y_dgl, y)
287
288
    y_dgl.backward(grad)
    # checkout gradient
289
    assert F.allclose(score.grad, grad_score)
290
291
292
    print(score.grad[:10], grad_score[:10])
    
    # Test 2
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
    def generate_rand_graph(n, m=None, ctor=dgl.DGLGraph):
        if m is None:
            m = n
        arr = (sp.sparse.random(m, n, density=0.1, format='coo') != 0).astype(np.int64)
        return ctor(arr, readonly=True)

    for g in [generate_rand_graph(50),
              generate_rand_graph(50, ctor=dgl.graph),
              generate_rand_graph(100, 50, ctor=dgl.bipartite)]:
        a1 = F.randn((g.number_of_edges(), 1)).requires_grad_()
        a2 = a1.clone().detach().requires_grad_()
        g.edata['s'] = a1
        g.group_apply_edges('dst', lambda edges: {'ss':F.softmax(edges.data['s'], 1)})
        g.edata['ss'].sum().backward()
        
        builtin_sm = nn.edge_softmax(g, a2)
        builtin_sm.sum().backward()
        print(a1.grad - a2.grad)
        assert len(g.srcdata) == 0
        assert len(g.dstdata) == 0
        assert len(g.edata) == 2
        assert F.allclose(a1.grad, a2.grad, rtol=1e-4, atol=1e-4) # Follow tolerance in unittest backend
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344

def test_partial_edge_softmax():
    g = dgl.DGLGraph()
    g.add_nodes(30)
    # build a complete graph
    for i in range(30):
        for j in range(30):
            g.add_edge(i, j)

    score = F.randn((300, 1))
    score.requires_grad_()
    grad = F.randn((300, 1))
    import numpy as np
    eids = np.random.choice(900, 300, replace=False).astype('int64')
    eids = F.zerocopy_from_numpy(eids)
    # compute partial edge softmax
    y_1 = nn.edge_softmax(g, score, eids)
    y_1.backward(grad)
    grad_1 = score.grad
    score.grad.zero_()
    # compute edge softmax on edge subgraph
    subg = g.edge_subgraph(eids)
    y_2 = nn.edge_softmax(subg, score)
    y_2.backward(grad)
    grad_2 = score.grad
    score.grad.zero_()

    assert F.allclose(y_1, y_2)
    assert F.allclose(grad_1, grad_2)

Minjie Wang's avatar
Minjie Wang committed
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
def test_rgcn():
    ctx = F.ctx()
    etype = []
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    # 5 etypes
    R = 5
    for i in range(g.number_of_edges()):
        etype.append(i % 5)
    B = 2
    I = 10
    O = 8

    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r)
    assert list(h_new.shape) == [100, O]

    rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_bdd(g, h, r)
    assert list(h_new.shape) == [100, O]

    # with norm
    norm = th.zeros((g.number_of_edges(), 1)).to(ctx)

    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r, norm)
    assert list(h_new.shape) == [100, O]

    rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_bdd(g, h, r, norm)
    assert list(h_new.shape) == [100, O]

    # id input
    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randint(0, I, (100,)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r)
    assert list(h_new.shape) == [100, O]
390

391
392
393
394
395
def test_gat_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((100, 5))
396
    gat = gat.to(ctx)
397
    h = gat(g, feat)
398
399
400
401
402
403
404
405
    assert h.shape[-1] == 2 and h.shape[-2] == 4

def test_sage_conv():
    for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        sage = nn.SAGEConv(5, 10, aggre_type)
        feat = F.randn((100, 5))
406
        sage = sage.to(ctx)
407
        h = sage(g, feat)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
408
409
410
411
412
413
414
        assert h.shape[-1] == 10

        g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
        sage = nn.SAGEConv(5, 10, aggre_type)
        feat = F.randn((100, 5))
        sage = sage.to(ctx)
        h = sage(g, feat)
415
416
        assert h.shape[-1] == 10

417
418
419
420
421
422
423
424
425
        g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
        dst_dim = 5 if aggre_type != 'gcn' else 10
        sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
        feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 200

426
427
428
429
430
431
def test_sgc_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    # not cached
    sgc = nn.SGConv(5, 10, 3)
    feat = F.randn((100, 5))
432
    sgc = sgc.to(ctx)
433

434
    h = sgc(g, feat)
435
436
437
438
    assert h.shape[-1] == 10

    # cached
    sgc = nn.SGConv(5, 10, 3, True)
439
    sgc = sgc.to(ctx)
440
441
    h_0 = sgc(g, feat)
    h_1 = sgc(g, feat + 1)
442
443
444
445
446
447
448
449
    assert F.allclose(h_0, h_1)
    assert h_0.shape[-1] == 10

def test_appnp_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    appnp = nn.APPNPConv(10, 0.1)
    feat = F.randn((100, 5))
450
    appnp = appnp.to(ctx)
451

452
    h = appnp(g, feat)
453
454
455
456
457
458
459
460
461
462
463
    assert h.shape[-1] == 5

def test_gin_conv():
    for aggregator_type in ['mean', 'max', 'sum']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        gin = nn.GINConv(
            th.nn.Linear(5, 12),
            aggregator_type
        )
        feat = F.randn((100, 5))
464
        gin = gin.to(ctx)
465
        h = gin(g, feat)
466
467
468
469
470
471
472
        assert h.shape[-1] == 12

def test_agnn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    agnn = nn.AGNNConv(1)
    feat = F.randn((100, 5))
473
    agnn = agnn.to(ctx)
474
    h = agnn(g, feat)
475
476
477
478
479
480
481
482
    assert h.shape[-1] == 5

def test_gated_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    ggconv = nn.GatedGraphConv(5, 10, 5, 3)
    etypes = th.arange(g.number_of_edges()) % 3
    feat = F.randn((100, 5))
483
484
    ggconv = ggconv.to(ctx)
    etypes = etypes.to(ctx)
485

486
    h = ggconv(g, feat, etypes)
487
488
489
490
491
492
493
494
495
496
    # current we only do shape check
    assert h.shape[-1] == 10

def test_nn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
    feat = F.randn((100, 5))
    efeat = F.randn((g.number_of_edges(), 4))
497
    nnconv = nnconv.to(ctx)
498
    h = nnconv(g, feat, efeat)
499
500
501
502
503
504
505
506
507
    # currently we only do shape check
    assert h.shape[-1] == 10

def test_gmm_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean')
    feat = F.randn((100, 5))
    pseudo = F.randn((g.number_of_edges(), 3))
508
    gmmconv = gmmconv.to(ctx)
509
    h = gmmconv(g, feat, pseudo)
510
511
512
513
514
515
516
517
518
519
520
521
    # currently we only do shape check
    assert h.shape[-1] == 10

def test_dense_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    dense_conv = nn.DenseGraphConv(5, 2, norm=False, bias=True)
    dense_conv.weight.data = conv.weight.data
    dense_conv.bias.data = conv.bias.data
    feat = F.randn((100, 5))
522
523
    conv = conv.to(ctx)
    dense_conv = dense_conv.to(ctx)
524
525
    out_conv = conv(g, feat)
    out_dense_conv = dense_conv(adj, feat)
526
527
528
529
530
531
    assert F.allclose(out_conv, out_dense_conv)

def test_dense_sage_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
532
    sage = nn.SAGEConv(5, 2, 'gcn')
533
534
535
536
    dense_sage = nn.DenseSAGEConv(5, 2)
    dense_sage.fc.weight.data = sage.fc_neigh.weight.data
    dense_sage.fc.bias.data = sage.fc_neigh.bias.data
    feat = F.randn((100, 5))
537
538
    sage = sage.to(ctx)
    dense_sage = dense_sage.to(ctx)
539
540
    out_sage = sage(g, feat)
    out_dense_sage = dense_sage(adj, feat)
541
542
543
544
545
546
547
548
549
550
551
552
553
554
    assert F.allclose(out_sage, out_dense_sage)

def test_dense_cheb_conv():
    for k in range(1, 4):
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        adj = g.adjacency_matrix(ctx=ctx).to_dense()
        cheb = nn.ChebConv(5, 2, k)
        dense_cheb = nn.DenseChebConv(5, 2, k)
        for i in range(len(cheb.fc)):
            dense_cheb.W.data[i] = cheb.fc[i].weight.data.t()
        if cheb.bias is not None:
            dense_cheb.bias.data = cheb.bias.data
        feat = F.randn((100, 5))
555
556
        cheb = cheb.to(ctx)
        dense_cheb = dense_cheb.to(ctx)
557
558
        out_cheb = cheb(g, feat, [2.0])
        out_dense_cheb = dense_cheb(adj, feat, 2.0)
559
560
        assert F.allclose(out_cheb, out_dense_cheb)

561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
def test_sequential():
    ctx = F.ctx()
    # Test single graph
    class ExampleLayer(th.nn.Module):
        def __init__(self):
            super().__init__()

        def forward(self, graph, n_feat, e_feat):
            graph = graph.local_var()
            graph.ndata['h'] = n_feat
            graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
            n_feat += graph.ndata['h']
            graph.apply_edges(fn.u_add_v('h', 'h', 'e'))
            e_feat += graph.edata['e']
            return n_feat, e_feat

    g = dgl.DGLGraph()
    g.add_nodes(3)
    g.add_edges([0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2])
    net = nn.Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer())
    n_feat = F.randn((3, 4))
    e_feat = F.randn((9, 4))
    net = net.to(ctx)
    n_feat, e_feat = net(g, n_feat, e_feat)
    assert n_feat.shape == (3, 4)
    assert e_feat.shape == (9, 4)

    # Test multiple graph
    class ExampleLayer(th.nn.Module):
        def __init__(self):
            super().__init__()

        def forward(self, graph, n_feat):
            graph = graph.local_var()
            graph.ndata['h'] = n_feat
            graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
            n_feat += graph.ndata['h']
            return n_feat.view(graph.number_of_nodes() // 2, 2, -1).sum(1)

    g1 = dgl.DGLGraph(nx.erdos_renyi_graph(32, 0.05))
    g2 = dgl.DGLGraph(nx.erdos_renyi_graph(16, 0.2))
    g3 = dgl.DGLGraph(nx.erdos_renyi_graph(8, 0.8))
    net = nn.Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer())
    net = net.to(ctx)
    n_feat = F.randn((32, 4))
    n_feat = net([g1, g2, g3], n_feat)
    assert n_feat.shape == (4, 4)

609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
def test_atomic_conv():
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    aconv = nn.AtomicConv(interaction_cutoffs=F.tensor([12.0, 12.0]),
                          rbf_kernel_means=F.tensor([0.0, 2.0]),
                          rbf_kernel_scaling=F.tensor([4.0, 4.0]),
                          features_to_use=F.tensor([6.0, 8.0]))

    ctx = F.ctx()
    if F.gpu_ctx():
        aconv = aconv.to(ctx)

    feat = F.randn((100, 1))
    dist = F.randn((g.number_of_edges(), 1))

    h = aconv(g, feat, dist)
    # current we only do shape check
    assert h.shape[-1] == 4

def test_cf_conv():
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    cfconv = nn.CFConv(node_in_feats=2,
                       edge_in_feats=3,
                       hidden_feats=2,
                       out_feats=3)

    ctx = F.ctx()
    if F.gpu_ctx():
        cfconv = cfconv.to(ctx)

    node_feats = F.randn((100, 2))
    edge_feats = F.randn((g.number_of_edges(), 3))
    h = cfconv(g, node_feats, edge_feats)
    # current we only do shape check
    assert h.shape[-1] == 3    

644
645
646
if __name__ == '__main__':
    test_graph_conv()
    test_edge_softmax()
647
    test_partial_edge_softmax()
648
649
650
651
    test_set2set()
    test_glob_att_pool()
    test_simple_pool()
    test_set_trans()
Minjie Wang's avatar
Minjie Wang committed
652
    test_rgcn()
653
654
655
656
657
658
659
660
661
662
663
664
665
    test_tagconv()
    test_gat_conv()
    test_sage_conv()
    test_sgc_conv()
    test_appnp_conv()
    test_gin_conv()
    test_agnn_conv()
    test_gated_graph_conv()
    test_nn_conv()
    test_gmm_conv()
    test_dense_graph_conv()
    test_dense_sage_conv()
    test_dense_cheb_conv()
666
    test_sequential()
667
668
    test_atomic_conv()
    test_cf_conv()