test_nn.py 16.5 KB
Newer Older
1
2
3
4
import torch as th
import networkx as nx
import dgl
import dgl.nn.pytorch as nn
5
import backend as F
6
7
from copy import deepcopy

8
9
10
import numpy as np
import scipy as sp

11
12
13
14
15
16
17
def _AXWb(A, X, W, b):
    X = th.matmul(X, W)
    Y = th.matmul(A, X.view(X.shape[0], -1)).view_as(X)
    return Y + b

def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
18
19
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
20
21

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
22
    if F.gpu_ctx():
23
        conv = conv.to(ctx)
24
25
    print(conv)
    # test#1: basic
26
    h0 = F.ones((3, 5))
27
    h1 = conv(g, h0)
28
29
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
30
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
31
    # test#2: more-dim
32
    h0 = F.ones((3, 5, 5))
33
    h1 = conv(g, h0)
34
35
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
36
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
37
38

    conv = nn.GraphConv(5, 2)
39
    if F.gpu_ctx():
40
        conv = conv.to(ctx)
41
    # test#3: basic
42
    h0 = F.ones((3, 5))
43
    h1 = conv(g, h0)
44
45
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
46
    # test#4: basic
47
    h0 = F.ones((3, 5, 5))
48
    h1 = conv(g, h0)
49
50
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
51
52

    conv = nn.GraphConv(5, 2)
53
    if F.gpu_ctx():
54
        conv = conv.to(ctx)
55
    # test#3: basic
56
    h0 = F.ones((3, 5))
57
    h1 = conv(g, h0)
58
59
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
60
    # test#4: basic
61
    h0 = F.ones((3, 5, 5))
62
    h1 = conv(g, h0)
63
64
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
65
66
67
68
69

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
70
    assert not F.allclose(old_weight, new_weight)
71

72
73
74
75
76
77
78
79
80
81
82
83
def _S2AXWb(A, N, X, W, b):
    X1 = X * N
    X1 = th.matmul(A, X1.view(X1.shape[0], -1))
    X1 = X1 * N
    X2 = X1 * N
    X2 = th.matmul(A, X2.view(X2.shape[0], -1))
    X2 = X2 * N
    X = th.cat([X, X1, X2], dim=-1)
    Y = th.matmul(X, W.rot90())

    return Y + b

84
def test_tagconv():
85
86
87
88
89
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
    norm = th.pow(g.in_degrees().float(), -0.5)

90
    conv = nn.TAGConv(5, 2, bias=True)
91
    if F.gpu_ctx():
92
        conv = conv.to(ctx)
93
94
95
96
    print(conv)

    # test#1: basic
    h0 = F.ones((3, 5))
97
    h1 = conv(g, h0)
98
99
100
101
102
103
104
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    shp = norm.shape + (1,) * (h0.dim() - 1)
    norm = th.reshape(norm, shp).to(ctx)

    assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight, conv.lin.bias))

105
    conv = nn.TAGConv(5, 2)
106
    if F.gpu_ctx():
107
        conv = conv.to(ctx)
108
109
    # test#2: basic
    h0 = F.ones((3, 5))
110
    h1 = conv(g, h0)
111
    assert h1.shape[-1] == 2
112

113
    # test reset_parameters
114
115
116
117
118
    old_weight = deepcopy(conv.lin.weight.data)
    conv.reset_parameters()
    new_weight = conv.lin.weight.data
    assert not F.allclose(old_weight, new_weight)

119
def test_set2set():
120
    ctx = F.ctx()
121
122
123
    g = dgl.DGLGraph(nx.path_graph(10))

    s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers
124
    if F.gpu_ctx():
125
        s2s = s2s.to(ctx)
126
127
128
    print(s2s)

    # test#1: basic
129
    h0 = F.randn((g.number_of_nodes(), 5))
130
    h1 = s2s(g, h0)
131
132
133
134
135
136
    assert h1.shape[0] == 10 and h1.dim() == 1

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(11))
    g2 = dgl.DGLGraph(nx.path_graph(5))
    bg = dgl.batch([g, g1, g2])
137
    h0 = F.randn((bg.number_of_nodes(), 5))
138
    h1 = s2s(bg, h0)
139
140
141
    assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.dim() == 2

def test_glob_att_pool():
142
    ctx = F.ctx()
143
144
145
    g = dgl.DGLGraph(nx.path_graph(10))

    gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10))
146
    if F.gpu_ctx():
147
        gap = gap.to(ctx)
148
149
150
    print(gap)

    # test#1: basic
151
    h0 = F.randn((g.number_of_nodes(), 5))
152
    h1 = gap(g, h0)
153
154
155
156
    assert h1.shape[0] == 10 and h1.dim() == 1

    # test#2: batched graph
    bg = dgl.batch([g, g, g, g])
157
    h0 = F.randn((bg.number_of_nodes(), 5))
158
    h1 = gap(bg, h0)
159
160
161
    assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2

def test_simple_pool():
162
    ctx = F.ctx()
163
164
165
166
167
168
169
170
171
    g = dgl.DGLGraph(nx.path_graph(15))

    sum_pool = nn.SumPooling()
    avg_pool = nn.AvgPooling()
    max_pool = nn.MaxPooling()
    sort_pool = nn.SortPooling(10) # k = 10
    print(sum_pool, avg_pool, max_pool, sort_pool)

    # test#1: basic
172
    h0 = F.randn((g.number_of_nodes(), 5))
173
174
175
176
177
178
    if F.gpu_ctx():
        sum_pool = sum_pool.to(ctx)
        avg_pool = avg_pool.to(ctx)
        max_pool = max_pool.to(ctx)
        sort_pool = sort_pool.to(ctx)
        h0 = h0.to(ctx)
179
    h1 = sum_pool(g, h0)
180
    assert F.allclose(h1, F.sum(h0, 0))
181
    h1 = avg_pool(g, h0)
182
    assert F.allclose(h1, F.mean(h0, 0))
183
    h1 = max_pool(g, h0)
184
    assert F.allclose(h1, F.max(h0, 0))
185
    h1 = sort_pool(g, h0)
186
187
188
189
190
    assert h1.shape[0] == 10 * 5 and h1.dim() == 1

    # test#2: batched graph
    g_ = dgl.DGLGraph(nx.path_graph(5))
    bg = dgl.batch([g, g_, g, g_, g])
191
    h0 = F.randn((bg.number_of_nodes(), 5))
192
193
    if F.gpu_ctx():
        h0 = h0.to(ctx)
194

195
    h1 = sum_pool(bg, h0)
196
197
198
199
200
201
    truth = th.stack([F.sum(h0[:15], 0),
                      F.sum(h0[15:20], 0),
                      F.sum(h0[20:35], 0),
                      F.sum(h0[35:40], 0),
                      F.sum(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
202

203
    h1 = avg_pool(bg, h0)
204
205
206
207
208
209
    truth = th.stack([F.mean(h0[:15], 0),
                      F.mean(h0[15:20], 0),
                      F.mean(h0[20:35], 0),
                      F.mean(h0[35:40], 0),
                      F.mean(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
210

211
    h1 = max_pool(bg, h0)
212
213
214
215
216
217
    truth = th.stack([F.max(h0[:15], 0),
                      F.max(h0[15:20], 0),
                      F.max(h0[20:35], 0),
                      F.max(h0[35:40], 0),
                      F.max(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
218

219
    h1 = sort_pool(bg, h0)
220
221
222
    assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.dim() == 2

def test_set_trans():
223
    ctx = F.ctx()
224
225
226
227
228
    g = dgl.DGLGraph(nx.path_graph(15))

    st_enc_0 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'sab')
    st_enc_1 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'isab', 3)
    st_dec = nn.SetTransformerDecoder(50, 5, 10, 100, 2, 4)
229
    if F.gpu_ctx():
230
231
232
        st_enc_0 = st_enc_0.to(ctx)
        st_enc_1 = st_enc_1.to(ctx)
        st_dec = st_dec.to(ctx)
233
234
235
    print(st_enc_0, st_enc_1, st_dec)

    # test#1: basic
236
    h0 = F.randn((g.number_of_nodes(), 50))
237
    h1 = st_enc_0(g, h0)
238
    assert h1.shape == h0.shape
239
    h1 = st_enc_1(g, h0)
240
    assert h1.shape == h0.shape
241
    h2 = st_dec(g, h1)
242
243
244
245
246
247
    assert h2.shape[0] == 200 and h2.dim() == 1

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(5))
    g2 = dgl.DGLGraph(nx.path_graph(10))
    bg = dgl.batch([g, g1, g2])
248
    h0 = F.randn((bg.number_of_nodes(), 50))
249
    h1 = st_enc_0(bg, h0)
250
    assert h1.shape == h0.shape
251
    h1 = st_enc_1(bg, h0)
252
253
    assert h1.shape == h0.shape

254
    h2 = st_dec(bg, h1)
255
256
    assert h2.shape[0] == 3 and h2.shape[1] == 200 and h2.dim() == 2

257
258
259
260
261
262
def uniform_attention(g, shape):
    a = th.ones(shape)
    target_shape = (g.number_of_edges(),) + (1,) * (len(shape) - 1)
    return a / g.in_degrees(g.edges()[1]).view(target_shape).float()

def test_edge_softmax():
263
264
    # Basic
    g = dgl.DGLGraph(nx.path_graph(3))
265
    edata = F.ones((g.number_of_edges(), 1))
266
    a = nn.edge_softmax(g, edata)
267
268
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
269
    assert F.allclose(a, uniform_attention(g, a.shape))
270

271
    # Test higher dimension case
272
    edata = F.ones((g.number_of_edges(), 3, 1))
273
    a = nn.edge_softmax(g, edata)
274
275
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
276
    assert F.allclose(a, uniform_attention(g, a.shape))
277

278
279
280
281
282
283
284
285
    # Test both forward and backward with PyTorch built-in softmax.
    g = dgl.DGLGraph()
    g.add_nodes(30)
    # build a complete graph
    for i in range(30):
        for j in range(30):
            g.add_edge(i, j)

286
    score = F.randn((900, 1))
287
    score.requires_grad_()
288
289
    grad = F.randn((900, 1))
    y = F.softmax(score.view(30, 30), dim=0).view(-1, 1)
290
291
292
293
    y.backward(grad)
    grad_score = score.grad
    score.grad.zero_()
    y_dgl = nn.edge_softmax(g, score)
294
295
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
296
    # check forward
297
    assert F.allclose(y_dgl, y)
298
299
    y_dgl.backward(grad)
    # checkout gradient
300
    assert F.allclose(score.grad, grad_score)
301
302
303
304
305
306
307
308
    print(score.grad[:10], grad_score[:10])
    
    # Test 2
    def generate_rand_graph(n):
      arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
      return dgl.DGLGraph(arr, readonly=True)
    
    g = generate_rand_graph(50)
309
    a1 = F.randn((g.number_of_edges(), 1)).requires_grad_()
310
311
    a2 = a1.clone().detach().requires_grad_()
    g.edata['s'] = a1
312
    g.group_apply_edges('dst', lambda edges: {'ss':F.softmax(edges.data['s'], 1)})
313
314
315
316
317
    g.edata['ss'].sum().backward()
    
    builtin_sm = nn.edge_softmax(g, a2)
    builtin_sm.sum().backward()
    print(a1.grad - a2.grad)
318
319
    assert len(g.ndata) == 0
    assert len(g.edata) == 2
320
    assert F.allclose(a1.grad, a2.grad, rtol=1e-4, atol=1e-4) # Follow tolerance in unittest backend
321
    
Minjie Wang's avatar
Minjie Wang committed
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
def test_rgcn():
    ctx = F.ctx()
    etype = []
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    # 5 etypes
    R = 5
    for i in range(g.number_of_edges()):
        etype.append(i % 5)
    B = 2
    I = 10
    O = 8

    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r)
    assert list(h_new.shape) == [100, O]

    rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_bdd(g, h, r)
    assert list(h_new.shape) == [100, O]

    # with norm
    norm = th.zeros((g.number_of_edges(), 1)).to(ctx)

    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r, norm)
    assert list(h_new.shape) == [100, O]

    rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_bdd(g, h, r, norm)
    assert list(h_new.shape) == [100, O]

    # id input
    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randint(0, I, (100,)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r)
    assert list(h_new.shape) == [100, O]
367

368
369
370
371
372
373
374
375
376
377
def test_gat_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        gat = gat.to(ctx)
        feat = feat.to(ctx)

378
    h = gat(g, feat)
379
380
381
382
383
384
385
386
387
388
389
390
391
    assert h.shape[-1] == 2 and h.shape[-2] == 4

def test_sage_conv():
    for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        sage = nn.SAGEConv(5, 10, aggre_type)
        feat = F.randn((100, 5))

        if F.gpu_ctx():
            sage = sage.to(ctx)
            feat = feat.to(ctx)

392
        h = sage(g, feat)
393
394
395
396
397
398
399
400
401
402
403
404
405
        assert h.shape[-1] == 10

def test_sgc_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    # not cached
    sgc = nn.SGConv(5, 10, 3)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        sgc = sgc.to(ctx)
        feat = feat.to(ctx)

406
    h = sgc(g, feat)
407
408
409
410
411
412
413
414
    assert h.shape[-1] == 10

    # cached
    sgc = nn.SGConv(5, 10, 3, True)

    if F.gpu_ctx():
        sgc = sgc.to(ctx)

415
416
    h_0 = sgc(g, feat)
    h_1 = sgc(g, feat + 1)
417
418
419
420
421
422
423
424
425
426
427
428
429
    assert F.allclose(h_0, h_1)
    assert h_0.shape[-1] == 10

def test_appnp_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    appnp = nn.APPNPConv(10, 0.1)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        appnp = appnp.to(ctx)
        feat = feat.to(ctx)

430
    h = appnp(g, feat)
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
    assert h.shape[-1] == 5

def test_gin_conv():
    for aggregator_type in ['mean', 'max', 'sum']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        gin = nn.GINConv(
            th.nn.Linear(5, 12),
            aggregator_type
        )
        feat = F.randn((100, 5))

        if F.gpu_ctx():
            gin = gin.to(ctx)
            feat = feat.to(ctx)

447
        h = gin(g, feat)
448
449
450
451
452
453
454
455
456
457
458
459
        assert h.shape[-1] == 12

def test_agnn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    agnn = nn.AGNNConv(1)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        agnn = agnn.to(ctx)
        feat = feat.to(ctx)

460
    h = agnn(g, feat)
461
462
463
464
465
466
467
468
469
470
471
472
473
474
    assert h.shape[-1] == 5

def test_gated_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    ggconv = nn.GatedGraphConv(5, 10, 5, 3)
    etypes = th.arange(g.number_of_edges()) % 3
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        ggconv = ggconv.to(ctx)
        feat = feat.to(ctx)
        etypes = etypes.to(ctx)

475
    h = ggconv(g, feat, etypes)
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
    # current we only do shape check
    assert h.shape[-1] == 10

def test_nn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
    feat = F.randn((100, 5))
    efeat = F.randn((g.number_of_edges(), 4))

    if F.gpu_ctx():
        nnconv = nnconv.to(ctx)
        feat = feat.to(ctx)
        efeat = efeat.to(ctx)

492
    h = nnconv(g, feat, efeat)
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
    # currently we only do shape check
    assert h.shape[-1] == 10

def test_gmm_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean')
    feat = F.randn((100, 5))
    pseudo = F.randn((g.number_of_edges(), 3))

    if F.gpu_ctx():
        gmmconv = gmmconv.to(ctx)
        feat = feat.to(ctx)
        pseudo = pseudo.to(ctx)

508
    h = gmmconv(g, feat, pseudo)
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
    # currently we only do shape check
    assert h.shape[-1] == 10

def test_dense_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    dense_conv = nn.DenseGraphConv(5, 2, norm=False, bias=True)
    dense_conv.weight.data = conv.weight.data
    dense_conv.bias.data = conv.bias.data
    feat = F.randn((100, 5))
    if F.gpu_ctx():
        conv = conv.to(ctx)
        dense_conv = dense_conv.to(ctx)
        feat = feat.to(ctx)

526
527
    out_conv = conv(g, feat)
    out_dense_conv = dense_conv(adj, feat)
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
    assert F.allclose(out_conv, out_dense_conv)

def test_dense_sage_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    sage = nn.SAGEConv(5, 2, 'gcn',)
    dense_sage = nn.DenseSAGEConv(5, 2)
    dense_sage.fc.weight.data = sage.fc_neigh.weight.data
    dense_sage.fc.bias.data = sage.fc_neigh.bias.data
    feat = F.randn((100, 5))
    if F.gpu_ctx():
        sage = sage.to(ctx)
        dense_sage = dense_sage.to(ctx)
        feat = feat.to(ctx)

544
545
    out_sage = sage(g, feat)
    out_dense_sage = dense_sage(adj, feat)
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
    assert F.allclose(out_sage, out_dense_sage)

def test_dense_cheb_conv():
    for k in range(1, 4):
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        adj = g.adjacency_matrix(ctx=ctx).to_dense()
        cheb = nn.ChebConv(5, 2, k)
        dense_cheb = nn.DenseChebConv(5, 2, k)
        for i in range(len(cheb.fc)):
            dense_cheb.W.data[i] = cheb.fc[i].weight.data.t()
        if cheb.bias is not None:
            dense_cheb.bias.data = cheb.bias.data
        feat = F.randn((100, 5))
        if F.gpu_ctx():
            cheb = cheb.to(ctx)
            dense_cheb = dense_cheb.to(ctx)
            feat = feat.to(ctx)

565
566
        out_cheb = cheb(g, feat, [2.0])
        out_dense_cheb = dense_cheb(adj, feat, 2.0)
567
568
        assert F.allclose(out_cheb, out_dense_cheb)

569
570
571
if __name__ == '__main__':
    test_graph_conv()
    test_edge_softmax()
572
573
574
575
    test_set2set()
    test_glob_att_pool()
    test_simple_pool()
    test_set_trans()
Minjie Wang's avatar
Minjie Wang committed
576
    test_rgcn()
577
578
579
580
581
582
583
584
585
586
587
588
589
590
    test_tagconv()
    test_gat_conv()
    test_sage_conv()
    test_sgc_conv()
    test_appnp_conv()
    test_gin_conv()
    test_agnn_conv()
    test_gated_graph_conv()
    test_nn_conv()
    test_gmm_conv()
    test_dense_graph_conv()
    test_dense_sage_conv()
    test_dense_cheb_conv()