test_nn.py 16.5 KB
Newer Older
1
2
3
4
import torch as th
import networkx as nx
import dgl
import dgl.nn.pytorch as nn
5
import backend as F
6
7
from copy import deepcopy

8
9
10
import numpy as np
import scipy as sp

11
12
13
14
15
16
17
def _AXWb(A, X, W, b):
    X = th.matmul(X, W)
    Y = th.matmul(A, X.view(X.shape[0], -1)).view_as(X)
    return Y + b

def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
18
19
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
20
21

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
22
    if F.gpu_ctx():
23
        conv = conv.to(ctx)
24
25
    print(conv)
    # test#1: basic
26
    h0 = F.ones((3, 5))
27
    h1 = conv(g, h0)
28
29
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
30
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
31
    # test#2: more-dim
32
    h0 = F.ones((3, 5, 5))
33
    h1 = conv(g, h0)
34
35
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
36
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
37
38

    conv = nn.GraphConv(5, 2)
39
    if F.gpu_ctx():
40
        conv = conv.to(ctx)
41
    # test#3: basic
42
    h0 = F.ones((3, 5))
43
    h1 = conv(g, h0)
44
45
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
46
    # test#4: basic
47
    h0 = F.ones((3, 5, 5))
48
    h1 = conv(g, h0)
49
50
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
51
52

    conv = nn.GraphConv(5, 2)
53
    if F.gpu_ctx():
54
        conv = conv.to(ctx)
55
    # test#3: basic
56
    h0 = F.ones((3, 5))
57
    h1 = conv(g, h0)
58
59
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
60
    # test#4: basic
61
    h0 = F.ones((3, 5, 5))
62
    h1 = conv(g, h0)
63
64
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
65
66
67
68
69

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
70
    assert not F.allclose(old_weight, new_weight)
71

72
73
74
75
76
77
78
79
80
81
82
83
def _S2AXWb(A, N, X, W, b):
    X1 = X * N
    X1 = th.matmul(A, X1.view(X1.shape[0], -1))
    X1 = X1 * N
    X2 = X1 * N
    X2 = th.matmul(A, X2.view(X2.shape[0], -1))
    X2 = X2 * N
    X = th.cat([X, X1, X2], dim=-1)
    Y = th.matmul(X, W.rot90())

    return Y + b

84
def test_tagconv():
85
86
87
88
89
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
    norm = th.pow(g.in_degrees().float(), -0.5)

90
    conv = nn.TAGConv(5, 2, bias=True)
91
    if F.gpu_ctx():
92
        conv = conv.to(ctx)
93
94
95
96
    print(conv)

    # test#1: basic
    h0 = F.ones((3, 5))
97
    h1 = conv(g, h0)
98
99
100
101
102
103
104
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    shp = norm.shape + (1,) * (h0.dim() - 1)
    norm = th.reshape(norm, shp).to(ctx)

    assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight, conv.lin.bias))

105
    conv = nn.TAGConv(5, 2)
106
    if F.gpu_ctx():
107
        conv = conv.to(ctx)
108

109
110
    # test#2: basic
    h0 = F.ones((3, 5))
111
    h1 = conv(g, h0)
112
    assert h1.shape[-1] == 2
113

114
    # test reset_parameters
115
116
117
118
119
    old_weight = deepcopy(conv.lin.weight.data)
    conv.reset_parameters()
    new_weight = conv.lin.weight.data
    assert not F.allclose(old_weight, new_weight)

120
def test_set2set():
121
    ctx = F.ctx()
122
123
124
    g = dgl.DGLGraph(nx.path_graph(10))

    s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers
125
    if F.gpu_ctx():
126
        s2s = s2s.to(ctx)
127
128
129
    print(s2s)

    # test#1: basic
130
    h0 = F.randn((g.number_of_nodes(), 5))
131
    h1 = s2s(g, h0)
132
133
134
135
136
137
    assert h1.shape[0] == 10 and h1.dim() == 1

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(11))
    g2 = dgl.DGLGraph(nx.path_graph(5))
    bg = dgl.batch([g, g1, g2])
138
    h0 = F.randn((bg.number_of_nodes(), 5))
139
    h1 = s2s(bg, h0)
140
141
142
    assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.dim() == 2

def test_glob_att_pool():
143
    ctx = F.ctx()
144
145
146
    g = dgl.DGLGraph(nx.path_graph(10))

    gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10))
147
    if F.gpu_ctx():
148
        gap = gap.to(ctx)
149
150
151
    print(gap)

    # test#1: basic
152
    h0 = F.randn((g.number_of_nodes(), 5))
153
    h1 = gap(g, h0)
154
155
156
157
    assert h1.shape[0] == 10 and h1.dim() == 1

    # test#2: batched graph
    bg = dgl.batch([g, g, g, g])
158
    h0 = F.randn((bg.number_of_nodes(), 5))
159
    h1 = gap(bg, h0)
160
161
162
    assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2

def test_simple_pool():
163
    ctx = F.ctx()
164
165
166
167
168
169
170
171
172
    g = dgl.DGLGraph(nx.path_graph(15))

    sum_pool = nn.SumPooling()
    avg_pool = nn.AvgPooling()
    max_pool = nn.MaxPooling()
    sort_pool = nn.SortPooling(10) # k = 10
    print(sum_pool, avg_pool, max_pool, sort_pool)

    # test#1: basic
173
    h0 = F.randn((g.number_of_nodes(), 5))
174
175
176
177
178
179
    if F.gpu_ctx():
        sum_pool = sum_pool.to(ctx)
        avg_pool = avg_pool.to(ctx)
        max_pool = max_pool.to(ctx)
        sort_pool = sort_pool.to(ctx)
        h0 = h0.to(ctx)
180
    h1 = sum_pool(g, h0)
181
    assert F.allclose(h1, F.sum(h0, 0))
182
    h1 = avg_pool(g, h0)
183
    assert F.allclose(h1, F.mean(h0, 0))
184
    h1 = max_pool(g, h0)
185
    assert F.allclose(h1, F.max(h0, 0))
186
    h1 = sort_pool(g, h0)
187
188
189
190
191
    assert h1.shape[0] == 10 * 5 and h1.dim() == 1

    # test#2: batched graph
    g_ = dgl.DGLGraph(nx.path_graph(5))
    bg = dgl.batch([g, g_, g, g_, g])
192
    h0 = F.randn((bg.number_of_nodes(), 5))
193
194
    if F.gpu_ctx():
        h0 = h0.to(ctx)
195

196
    h1 = sum_pool(bg, h0)
197
198
199
200
201
202
    truth = th.stack([F.sum(h0[:15], 0),
                      F.sum(h0[15:20], 0),
                      F.sum(h0[20:35], 0),
                      F.sum(h0[35:40], 0),
                      F.sum(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
203

204
    h1 = avg_pool(bg, h0)
205
206
207
208
209
210
    truth = th.stack([F.mean(h0[:15], 0),
                      F.mean(h0[15:20], 0),
                      F.mean(h0[20:35], 0),
                      F.mean(h0[35:40], 0),
                      F.mean(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
211

212
    h1 = max_pool(bg, h0)
213
214
215
216
217
218
    truth = th.stack([F.max(h0[:15], 0),
                      F.max(h0[15:20], 0),
                      F.max(h0[20:35], 0),
                      F.max(h0[35:40], 0),
                      F.max(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
219

220
    h1 = sort_pool(bg, h0)
221
222
223
    assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.dim() == 2

def test_set_trans():
224
    ctx = F.ctx()
225
226
227
228
229
    g = dgl.DGLGraph(nx.path_graph(15))

    st_enc_0 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'sab')
    st_enc_1 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'isab', 3)
    st_dec = nn.SetTransformerDecoder(50, 5, 10, 100, 2, 4)
230
    if F.gpu_ctx():
231
232
233
        st_enc_0 = st_enc_0.to(ctx)
        st_enc_1 = st_enc_1.to(ctx)
        st_dec = st_dec.to(ctx)
234
235
236
    print(st_enc_0, st_enc_1, st_dec)

    # test#1: basic
237
    h0 = F.randn((g.number_of_nodes(), 50))
238
    h1 = st_enc_0(g, h0)
239
    assert h1.shape == h0.shape
240
    h1 = st_enc_1(g, h0)
241
    assert h1.shape == h0.shape
242
    h2 = st_dec(g, h1)
243
244
245
246
247
248
    assert h2.shape[0] == 200 and h2.dim() == 1

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(5))
    g2 = dgl.DGLGraph(nx.path_graph(10))
    bg = dgl.batch([g, g1, g2])
249
    h0 = F.randn((bg.number_of_nodes(), 50))
250
    h1 = st_enc_0(bg, h0)
251
    assert h1.shape == h0.shape
252
    h1 = st_enc_1(bg, h0)
253
254
    assert h1.shape == h0.shape

255
    h2 = st_dec(bg, h1)
256
257
    assert h2.shape[0] == 3 and h2.shape[1] == 200 and h2.dim() == 2

258
259
260
261
262
263
def uniform_attention(g, shape):
    a = th.ones(shape)
    target_shape = (g.number_of_edges(),) + (1,) * (len(shape) - 1)
    return a / g.in_degrees(g.edges()[1]).view(target_shape).float()

def test_edge_softmax():
264
265
    # Basic
    g = dgl.DGLGraph(nx.path_graph(3))
266
    edata = F.ones((g.number_of_edges(), 1))
267
    a = nn.edge_softmax(g, edata)
268
269
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
270
    assert F.allclose(a, uniform_attention(g, a.shape))
271

272
    # Test higher dimension case
273
    edata = F.ones((g.number_of_edges(), 3, 1))
274
    a = nn.edge_softmax(g, edata)
275
276
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
277
    assert F.allclose(a, uniform_attention(g, a.shape))
278

279
280
281
282
283
284
285
286
    # Test both forward and backward with PyTorch built-in softmax.
    g = dgl.DGLGraph()
    g.add_nodes(30)
    # build a complete graph
    for i in range(30):
        for j in range(30):
            g.add_edge(i, j)

287
    score = F.randn((900, 1))
288
    score.requires_grad_()
289
290
    grad = F.randn((900, 1))
    y = F.softmax(score.view(30, 30), dim=0).view(-1, 1)
291
292
293
294
    y.backward(grad)
    grad_score = score.grad
    score.grad.zero_()
    y_dgl = nn.edge_softmax(g, score)
295
296
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
297
    # check forward
298
    assert F.allclose(y_dgl, y)
299
300
    y_dgl.backward(grad)
    # checkout gradient
301
    assert F.allclose(score.grad, grad_score)
302
303
304
305
306
307
308
309
    print(score.grad[:10], grad_score[:10])
    
    # Test 2
    def generate_rand_graph(n):
      arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
      return dgl.DGLGraph(arr, readonly=True)
    
    g = generate_rand_graph(50)
310
    a1 = F.randn((g.number_of_edges(), 1)).requires_grad_()
311
312
    a2 = a1.clone().detach().requires_grad_()
    g.edata['s'] = a1
313
    g.group_apply_edges('dst', lambda edges: {'ss':F.softmax(edges.data['s'], 1)})
314
315
316
317
318
    g.edata['ss'].sum().backward()
    
    builtin_sm = nn.edge_softmax(g, a2)
    builtin_sm.sum().backward()
    print(a1.grad - a2.grad)
319
320
    assert len(g.ndata) == 0
    assert len(g.edata) == 2
321
    assert F.allclose(a1.grad, a2.grad, rtol=1e-4, atol=1e-4) # Follow tolerance in unittest backend
322
    
Minjie Wang's avatar
Minjie Wang committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
def test_rgcn():
    ctx = F.ctx()
    etype = []
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    # 5 etypes
    R = 5
    for i in range(g.number_of_edges()):
        etype.append(i % 5)
    B = 2
    I = 10
    O = 8

    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r)
    assert list(h_new.shape) == [100, O]

    rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_bdd(g, h, r)
    assert list(h_new.shape) == [100, O]

    # with norm
    norm = th.zeros((g.number_of_edges(), 1)).to(ctx)

    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r, norm)
    assert list(h_new.shape) == [100, O]

    rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_bdd(g, h, r, norm)
    assert list(h_new.shape) == [100, O]

    # id input
    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
    h = th.randint(0, I, (100,)).to(ctx)
    r = th.tensor(etype).to(ctx)
    h_new = rgc_basis(g, h, r)
    assert list(h_new.shape) == [100, O]
368

369
370
371
372
373
374
375
376
377
378
def test_gat_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        gat = gat.to(ctx)
        feat = feat.to(ctx)

379
    h = gat(g, feat)
380
381
382
383
384
385
386
387
388
389
390
391
392
    assert h.shape[-1] == 2 and h.shape[-2] == 4

def test_sage_conv():
    for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        sage = nn.SAGEConv(5, 10, aggre_type)
        feat = F.randn((100, 5))

        if F.gpu_ctx():
            sage = sage.to(ctx)
            feat = feat.to(ctx)

393
        h = sage(g, feat)
394
395
396
397
398
399
400
401
402
403
404
405
406
        assert h.shape[-1] == 10

def test_sgc_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    # not cached
    sgc = nn.SGConv(5, 10, 3)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        sgc = sgc.to(ctx)
        feat = feat.to(ctx)

407
    h = sgc(g, feat)
408
409
410
411
412
413
414
415
    assert h.shape[-1] == 10

    # cached
    sgc = nn.SGConv(5, 10, 3, True)

    if F.gpu_ctx():
        sgc = sgc.to(ctx)

416
417
    h_0 = sgc(g, feat)
    h_1 = sgc(g, feat + 1)
418
419
420
421
422
423
424
425
426
427
428
429
430
    assert F.allclose(h_0, h_1)
    assert h_0.shape[-1] == 10

def test_appnp_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    appnp = nn.APPNPConv(10, 0.1)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        appnp = appnp.to(ctx)
        feat = feat.to(ctx)

431
    h = appnp(g, feat)
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
    assert h.shape[-1] == 5

def test_gin_conv():
    for aggregator_type in ['mean', 'max', 'sum']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        gin = nn.GINConv(
            th.nn.Linear(5, 12),
            aggregator_type
        )
        feat = F.randn((100, 5))

        if F.gpu_ctx():
            gin = gin.to(ctx)
            feat = feat.to(ctx)

448
        h = gin(g, feat)
449
450
451
452
453
454
455
456
457
458
459
460
        assert h.shape[-1] == 12

def test_agnn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    agnn = nn.AGNNConv(1)
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        agnn = agnn.to(ctx)
        feat = feat.to(ctx)

461
    h = agnn(g, feat)
462
463
464
465
466
467
468
469
470
471
472
473
474
475
    assert h.shape[-1] == 5

def test_gated_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    ggconv = nn.GatedGraphConv(5, 10, 5, 3)
    etypes = th.arange(g.number_of_edges()) % 3
    feat = F.randn((100, 5))

    if F.gpu_ctx():
        ggconv = ggconv.to(ctx)
        feat = feat.to(ctx)
        etypes = etypes.to(ctx)

476
    h = ggconv(g, feat, etypes)
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
    # current we only do shape check
    assert h.shape[-1] == 10

def test_nn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
    feat = F.randn((100, 5))
    efeat = F.randn((g.number_of_edges(), 4))

    if F.gpu_ctx():
        nnconv = nnconv.to(ctx)
        feat = feat.to(ctx)
        efeat = efeat.to(ctx)

493
    h = nnconv(g, feat, efeat)
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
    # currently we only do shape check
    assert h.shape[-1] == 10

def test_gmm_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean')
    feat = F.randn((100, 5))
    pseudo = F.randn((g.number_of_edges(), 3))

    if F.gpu_ctx():
        gmmconv = gmmconv.to(ctx)
        feat = feat.to(ctx)
        pseudo = pseudo.to(ctx)

509
    h = gmmconv(g, feat, pseudo)
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
    # currently we only do shape check
    assert h.shape[-1] == 10

def test_dense_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    dense_conv = nn.DenseGraphConv(5, 2, norm=False, bias=True)
    dense_conv.weight.data = conv.weight.data
    dense_conv.bias.data = conv.bias.data
    feat = F.randn((100, 5))
    if F.gpu_ctx():
        conv = conv.to(ctx)
        dense_conv = dense_conv.to(ctx)
        feat = feat.to(ctx)

527
528
    out_conv = conv(g, feat)
    out_dense_conv = dense_conv(adj, feat)
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
    assert F.allclose(out_conv, out_dense_conv)

def test_dense_sage_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    sage = nn.SAGEConv(5, 2, 'gcn',)
    dense_sage = nn.DenseSAGEConv(5, 2)
    dense_sage.fc.weight.data = sage.fc_neigh.weight.data
    dense_sage.fc.bias.data = sage.fc_neigh.bias.data
    feat = F.randn((100, 5))
    if F.gpu_ctx():
        sage = sage.to(ctx)
        dense_sage = dense_sage.to(ctx)
        feat = feat.to(ctx)

545
546
    out_sage = sage(g, feat)
    out_dense_sage = dense_sage(adj, feat)
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
    assert F.allclose(out_sage, out_dense_sage)

def test_dense_cheb_conv():
    for k in range(1, 4):
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
        adj = g.adjacency_matrix(ctx=ctx).to_dense()
        cheb = nn.ChebConv(5, 2, k)
        dense_cheb = nn.DenseChebConv(5, 2, k)
        for i in range(len(cheb.fc)):
            dense_cheb.W.data[i] = cheb.fc[i].weight.data.t()
        if cheb.bias is not None:
            dense_cheb.bias.data = cheb.bias.data
        feat = F.randn((100, 5))
        if F.gpu_ctx():
            cheb = cheb.to(ctx)
            dense_cheb = dense_cheb.to(ctx)
            feat = feat.to(ctx)

566
567
        out_cheb = cheb(g, feat, [2.0])
        out_dense_cheb = dense_cheb(adj, feat, 2.0)
568
569
        assert F.allclose(out_cheb, out_dense_cheb)

570
571
572
if __name__ == '__main__':
    test_graph_conv()
    test_edge_softmax()
573
574
575
576
    test_set2set()
    test_glob_att_pool()
    test_simple_pool()
    test_set_trans()
Minjie Wang's avatar
Minjie Wang committed
577
    test_rgcn()
578
579
580
581
582
583
584
585
586
587
588
589
590
591
    test_tagconv()
    test_gat_conv()
    test_sage_conv()
    test_sgc_conv()
    test_appnp_conv()
    test_gin_conv()
    test_agnn_conv()
    test_gated_graph_conv()
    test_nn_conv()
    test_gmm_conv()
    test_dense_graph_conv()
    test_dense_sage_conv()
    test_dense_cheb_conv()