test_nn.py 58 KB
Newer Older
1
import io
2
3
4
5
import torch as th
import networkx as nx
import dgl
import dgl.nn.pytorch as nn
6
import dgl.function as fn
7
import backend as F
8
import pytest
LuckyLiuM's avatar
LuckyLiuM committed
9
import torch
10
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
nv-dlasalle's avatar
nv-dlasalle committed
11
from test_utils import parametrize_idtype
12
from copy import deepcopy
13
import pickle
14

15
import scipy as sp
LuckyLiuM's avatar
LuckyLiuM committed
16
17
from torch.utils.data import DataLoader
from torch.optim import SparseAdam, Adam
18

19
20
tmp_buffer = io.BytesIO()

21
22
23
24
25
def _AXWb(A, X, W, b):
    X = th.matmul(X, W)
    Y = th.matmul(A, X.view(X.shape[0], -1)).view_as(X)
    return Y + b

26
27
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv0(out_dim):
28
    g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
29
    ctx = F.ctx()
30
    adj = g.adjacency_matrix(transpose=True, ctx=ctx)
31

32
    conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
33
    conv = conv.to(ctx)
34
    print(conv)
35
36
37
38
39

    # test pickle
    th.save(conv, tmp_buffer)


40
    # test#1: basic
41
    h0 = F.ones((3, 5))
42
    h1 = conv(g, h0)
43
44
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
45
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
46
    # test#2: more-dim
47
    h0 = F.ones((3, 5, 5))
48
    h1 = conv(g, h0)
49
50
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
51
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
52

53
    conv = nn.GraphConv(5, out_dim)
54
    conv = conv.to(ctx)
55
    # test#3: basic
56
    h0 = F.ones((3, 5))
57
    h1 = conv(g, h0)
58
59
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
60
    # test#4: basic
61
    h0 = F.ones((3, 5, 5))
62
    h1 = conv(g, h0)
63
64
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
65

66
    conv = nn.GraphConv(5, out_dim)
67
    conv = conv.to(ctx)
68
    # test#3: basic
69
    h0 = F.ones((3, 5))
70
    h1 = conv(g, h0)
71
72
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
73
    # test#4: basic
74
    h0 = F.ones((3, 5, 5))
75
    h1 = conv(g, h0)
76
77
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
78
79
80
81
82

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
83
    assert not F.allclose(old_weight, new_weight)
84

nv-dlasalle's avatar
nv-dlasalle committed
85
@parametrize_idtype
86
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph']))
87
@pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left'])
88
89
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
90
91
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv(idtype, g, norm, weight, bias, out_dim):
92
93
    # Test one tensor input
    g = g.astype(idtype).to(F.ctx())
94
95
    conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, out_dim)).to(F.ctx())
96
97
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
98
99
    h = F.randn((nsrc, 5)).to(F.ctx())
    if weight:
100
        h_out = conv(g, h)
101
    else:
102
        h_out = conv(g, h, weight=ext_w)
103
    assert h_out.shape == (ndst, out_dim)
104

nv-dlasalle's avatar
nv-dlasalle committed
105
@parametrize_idtype
106
107
108
109
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
110
111
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim):
112
    g = g.astype(idtype).to(F.ctx())
113
114
    conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, out_dim)).to(F.ctx())
115
116
117
118
119
120
121
122
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    e_w = g.edata['scalar_w']
    if weight:
        h_out = conv(g, h, edge_weight=e_w)
    else:
        h_out = conv(g, h, weight=ext_w, edge_weight=e_w)
123
    assert h_out.shape == (ndst, out_dim)
124

nv-dlasalle's avatar
nv-dlasalle committed
125
@parametrize_idtype
126
127
128
129
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
130
131
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim):
132
    g = g.astype(idtype).to(F.ctx())
133
    conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx())
134
135
136
137

    # test pickle
    th.save(conv, tmp_buffer)

138
    ext_w = F.randn((5, out_dim)).to(F.ctx())
139
140
141
142
143
144
145
146
147
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    edgenorm = nn.EdgeWeightNorm(norm=norm)
    norm_weight = edgenorm(g, g.edata['scalar_w'])
    if weight:
        h_out = conv(g, h, edge_weight=norm_weight)
    else:
        h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight)
148
    assert h_out.shape == (ndst, out_dim)
149

nv-dlasalle's avatar
nv-dlasalle committed
150
@parametrize_idtype
151
152
153
154
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
155
156
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim):
157
158
    # Test a pair of tensor inputs
    g = g.astype(idtype).to(F.ctx())
159
    conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx())
Mufei Li's avatar
Mufei Li committed
160

161
162
163
    # test pickle
    th.save(conv, tmp_buffer)

164
    ext_w = F.randn((5, out_dim)).to(F.ctx())
165
166
167
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
168
    h_dst = F.randn((ndst, out_dim)).to(F.ctx())
169
170
171
172
    if weight:
        h_out = conv(g, (h, h_dst))
    else:
        h_out = conv(g, (h, h_dst), weight=ext_w)
173
    assert h_out.shape == (ndst, out_dim)
174

175
176
177
178
179
180
181
182
183
184
185
186
def _S2AXWb(A, N, X, W, b):
    X1 = X * N
    X1 = th.matmul(A, X1.view(X1.shape[0], -1))
    X1 = X1 * N
    X2 = X1 * N
    X2 = th.matmul(A, X2.view(X2.shape[0], -1))
    X2 = X2 * N
    X = th.cat([X, X1, X2], dim=-1)
    Y = th.matmul(X, W.rot90())

    return Y + b

187
188
@pytest.mark.parametrize('out_dim', [1, 2])
def test_tagconv(out_dim):
189
    g = dgl.DGLGraph(nx.path_graph(3))
190
    g = g.to(F.ctx())
191
    ctx = F.ctx()
192
    adj = g.adjacency_matrix(transpose=True, ctx=ctx)
193
194
    norm = th.pow(g.in_degrees().float(), -0.5)

195
    conv = nn.TAGConv(5, out_dim, bias=True)
196
    conv = conv.to(ctx)
197
    print(conv)
Mufei Li's avatar
Mufei Li committed
198

199
200
    # test pickle
    th.save(conv, tmp_buffer)
201
202
203

    # test#1: basic
    h0 = F.ones((3, 5))
204
    h1 = conv(g, h0)
205
206
207
208
209
210
211
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    shp = norm.shape + (1,) * (h0.dim() - 1)
    norm = th.reshape(norm, shp).to(ctx)

    assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight, conv.lin.bias))

212
    conv = nn.TAGConv(5, out_dim)
213
    conv = conv.to(ctx)
214

215
216
    # test#2: basic
    h0 = F.ones((3, 5))
217
    h1 = conv(g, h0)
218
    assert h1.shape[-1] == out_dim
219

220
    # test reset_parameters
221
222
223
224
225
    old_weight = deepcopy(conv.lin.weight.data)
    conv.reset_parameters()
    new_weight = conv.lin.weight.data
    assert not F.allclose(old_weight, new_weight)

226
def test_set2set():
227
    ctx = F.ctx()
228
    g = dgl.DGLGraph(nx.path_graph(10))
229
    g = g.to(F.ctx())
230
231

    s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers
232
    s2s = s2s.to(ctx)
233
234
235
    print(s2s)

    # test#1: basic
236
    h0 = F.randn((g.number_of_nodes(), 5))
237
    h1 = s2s(g, h0)
238
    assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2
239
240

    # test#2: batched graph
241
242
    g1 = dgl.DGLGraph(nx.path_graph(11)).to(F.ctx())
    g2 = dgl.DGLGraph(nx.path_graph(5)).to(F.ctx())
243
    bg = dgl.batch([g, g1, g2])
244
    h0 = F.randn((bg.number_of_nodes(), 5))
245
    h1 = s2s(bg, h0)
246
247
248
    assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.dim() == 2

def test_glob_att_pool():
249
    ctx = F.ctx()
250
    g = dgl.DGLGraph(nx.path_graph(10))
251
    g = g.to(F.ctx())
252
253

    gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10))
254
    gap = gap.to(ctx)
255
256
    print(gap)

257
258
259
    # test pickle
    th.save(gap, tmp_buffer)

260
    # test#1: basic
261
    h0 = F.randn((g.number_of_nodes(), 5))
262
    h1 = gap(g, h0)
263
    assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2
264
265
266

    # test#2: batched graph
    bg = dgl.batch([g, g, g, g])
267
    h0 = F.randn((bg.number_of_nodes(), 5))
268
    h1 = gap(bg, h0)
269
270
271
    assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2

def test_simple_pool():
272
    ctx = F.ctx()
273
    g = dgl.DGLGraph(nx.path_graph(15))
274
    g = g.to(F.ctx())
275
276
277
278
279
280
281
282

    sum_pool = nn.SumPooling()
    avg_pool = nn.AvgPooling()
    max_pool = nn.MaxPooling()
    sort_pool = nn.SortPooling(10) # k = 10
    print(sum_pool, avg_pool, max_pool, sort_pool)

    # test#1: basic
283
    h0 = F.randn((g.number_of_nodes(), 5))
284
285
286
287
    sum_pool = sum_pool.to(ctx)
    avg_pool = avg_pool.to(ctx)
    max_pool = max_pool.to(ctx)
    sort_pool = sort_pool.to(ctx)
288
    h1 = sum_pool(g, h0)
289
    assert F.allclose(F.squeeze(h1, 0), F.sum(h0, 0))
290
    h1 = avg_pool(g, h0)
291
    assert F.allclose(F.squeeze(h1, 0), F.mean(h0, 0))
292
    h1 = max_pool(g, h0)
293
    assert F.allclose(F.squeeze(h1, 0), F.max(h0, 0))
294
    h1 = sort_pool(g, h0)
295
    assert h1.shape[0] == 1 and h1.shape[1] == 10 * 5 and h1.dim() == 2
296
297

    # test#2: batched graph
298
    g_ = dgl.DGLGraph(nx.path_graph(5)).to(F.ctx())
299
    bg = dgl.batch([g, g_, g, g_, g])
300
    h0 = F.randn((bg.number_of_nodes(), 5))
301
    h1 = sum_pool(bg, h0)
302
303
304
305
306
307
    truth = th.stack([F.sum(h0[:15], 0),
                      F.sum(h0[15:20], 0),
                      F.sum(h0[20:35], 0),
                      F.sum(h0[35:40], 0),
                      F.sum(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
308

309
    h1 = avg_pool(bg, h0)
310
311
312
313
314
315
    truth = th.stack([F.mean(h0[:15], 0),
                      F.mean(h0[15:20], 0),
                      F.mean(h0[20:35], 0),
                      F.mean(h0[35:40], 0),
                      F.mean(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
316

317
    h1 = max_pool(bg, h0)
318
319
320
321
322
323
    truth = th.stack([F.max(h0[:15], 0),
                      F.max(h0[15:20], 0),
                      F.max(h0[20:35], 0),
                      F.max(h0[35:40], 0),
                      F.max(h0[40:55], 0)], 0)
    assert F.allclose(h1, truth)
324

325
    h1 = sort_pool(bg, h0)
326
327
328
    assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.dim() == 2

def test_set_trans():
329
    ctx = F.ctx()
330
331
332
333
334
    g = dgl.DGLGraph(nx.path_graph(15))

    st_enc_0 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'sab')
    st_enc_1 = nn.SetTransformerEncoder(50, 5, 10, 100, 2, 'isab', 3)
    st_dec = nn.SetTransformerDecoder(50, 5, 10, 100, 2, 4)
335
336
337
    st_enc_0 = st_enc_0.to(ctx)
    st_enc_1 = st_enc_1.to(ctx)
    st_dec = st_dec.to(ctx)
338
339
340
    print(st_enc_0, st_enc_1, st_dec)

    # test#1: basic
341
    h0 = F.randn((g.number_of_nodes(), 50))
342
    h1 = st_enc_0(g, h0)
343
    assert h1.shape == h0.shape
344
    h1 = st_enc_1(g, h0)
345
    assert h1.shape == h0.shape
346
    h2 = st_dec(g, h1)
347
    assert h2.shape[0] == 1 and h2.shape[1] == 200 and h2.dim() == 2
348
349
350
351
352

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(5))
    g2 = dgl.DGLGraph(nx.path_graph(10))
    bg = dgl.batch([g, g1, g2])
353
    h0 = F.randn((bg.number_of_nodes(), 50))
354
    h1 = st_enc_0(bg, h0)
355
    assert h1.shape == h0.shape
356
    h1 = st_enc_1(bg, h0)
357
358
    assert h1.shape == h0.shape

359
    h2 = st_dec(bg, h1)
360
361
    assert h2.shape[0] == 3 and h2.shape[1] == 200 and h2.dim() == 2

nv-dlasalle's avatar
nv-dlasalle committed
362
@parametrize_idtype
363
364
@pytest.mark.parametrize('O', [1, 8, 32])
def test_rgcn(idtype, O):
Minjie Wang's avatar
Minjie Wang committed
365
366
    ctx = F.ctx()
    etype = []
367
368
    g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1))
    g = g.astype(idtype).to(F.ctx())
Minjie Wang's avatar
Minjie Wang committed
369
370
371
372
373
374
375
376
377
    # 5 etypes
    R = 5
    for i in range(g.number_of_edges()):
        etype.append(i % 5)
    B = 2
    I = 10

    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
xiang song(charlie.song)'s avatar
xiang song(charlie.song) committed
378
    norm = th.rand((g.number_of_edges(), 1)).to(ctx)
379
380
381
    sorted_r, idx = th.sort(r)
    sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)})
    sorted_norm = norm[idx]
Minjie Wang's avatar
Minjie Wang committed
382

383
384
    rgc = nn.RelGraphConv(I, O, R).to(ctx)
    th.save(rgc, tmp_buffer)  # test pickle
Minjie Wang's avatar
Minjie Wang committed
385
    rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx)
386
    th.save(rgc_basis, tmp_buffer)  # test pickle
387
388
    if O % B == 0:
        rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx)
389
        th.save(rgc_bdd, tmp_buffer)  # test pickle
390

391
392
393
394
395
    # basic usage
    h_new = rgc(g, h, r)
    assert h_new.shape == (100, O)
    h_new_basis = rgc_basis(g, h, r)
    assert h_new_basis.shape == (100, O)
396
    if O % B == 0:
397
398
399
400
401
402
403
404
405
406
407
        h_new_bdd = rgc_bdd(g, h, r)
        assert h_new_bdd.shape == (100, O)

    # sorted input
    h_new_sorted = rgc(sorted_g, h, sorted_r, presorted=True)
    assert th.allclose(h_new, h_new_sorted, atol=1e-4, rtol=1e-4)
    h_new_basis_sorted = rgc_basis(sorted_g, h, sorted_r, presorted=True)
    assert th.allclose(h_new_basis, h_new_basis_sorted, atol=1e-4, rtol=1e-4)
    if O % B == 0:
        h_new_bdd_sorted = rgc_bdd(sorted_g, h, sorted_r, presorted=True)
        assert th.allclose(h_new_bdd, h_new_bdd_sorted, atol=1e-4, rtol=1e-4)
408

409
410
411
    # norm input
    h_new = rgc(g, h, r, norm)
    assert h_new.shape == (100, O)
412
    h_new = rgc_basis(g, h, r, norm)
413
    assert h_new.shape == (100, O)
414
415
    if O % B == 0:
        h_new = rgc_bdd(g, h, r, norm)
416
        assert h_new.shape == (100, O)
417

418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
@parametrize_idtype
@pytest.mark.parametrize('O', [1, 10, 40])
def test_rgcn_default_nbasis(idtype, O):
    ctx = F.ctx()
    etype = []
    g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1))
    g = g.astype(idtype).to(F.ctx())
    # 5 etypes
    R = 5
    for i in range(g.number_of_edges()):
        etype.append(i % 5)
    I = 10

    h = th.randn((100, I)).to(ctx)
    r = th.tensor(etype).to(ctx)
    norm = th.rand((g.number_of_edges(), 1)).to(ctx)
    sorted_r, idx = th.sort(r)
    sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)})
    sorted_norm = norm[idx]

    rgc = nn.RelGraphConv(I, O, R).to(ctx)
    th.save(rgc, tmp_buffer)  # test pickle
    rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx)
    th.save(rgc_basis, tmp_buffer)  # test pickle
    if O % R == 0:
        rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx)
        th.save(rgc_bdd, tmp_buffer)  # test pickle

    # basic usage
    h_new = rgc(g, h, r)
    assert h_new.shape == (100, O)
    h_new_basis = rgc_basis(g, h, r)
    assert h_new_basis.shape == (100, O)
    if O % R == 0:
        h_new_bdd = rgc_bdd(g, h, r)
        assert h_new_bdd.shape == (100, O)

    # sorted input
    h_new_sorted = rgc(sorted_g, h, sorted_r, presorted=True)
    assert th.allclose(h_new, h_new_sorted, atol=1e-4, rtol=1e-4)
    h_new_basis_sorted = rgc_basis(sorted_g, h, sorted_r, presorted=True)
    assert th.allclose(h_new_basis, h_new_basis_sorted, atol=1e-4, rtol=1e-4)
    if O % R == 0:
        h_new_bdd_sorted = rgc_bdd(sorted_g, h, sorted_r, presorted=True)
        assert th.allclose(h_new_bdd, h_new_bdd_sorted, atol=1e-4, rtol=1e-4)

    # norm input
    h_new = rgc(g, h, r, norm)
    assert h_new.shape == (100, O)
    h_new = rgc_basis(g, h, r, norm)
    assert h_new.shape == (100, O)
    if O % R == 0:
        h_new = rgc_bdd(g, h, r, norm)
        assert h_new.shape == (100, O)
472

nv-dlasalle's avatar
nv-dlasalle committed
473
@parametrize_idtype
474
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
475
@pytest.mark.parametrize('out_dim', [1, 5])
476
477
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gat_conv(g, idtype, out_dim, num_heads):
478
    g = g.astype(idtype).to(F.ctx())
479
    ctx = F.ctx()
480
    gat = nn.GATConv(5, out_dim, num_heads)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
481
    feat = F.randn((g.number_of_src_nodes(), 5))
482
    gat = gat.to(ctx)
483
    h = gat(g, feat)
484
485
486
487

    # test pickle
    th.save(gat, tmp_buffer)

Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
488
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
489
    _, a = gat(g, feat, get_attention=True)
490
    assert a.shape == (g.number_of_edges(), num_heads, 1)
491

492
493
494
495
496
    # test residual connection
    gat = nn.GATConv(5, out_dim, num_heads, residual=True)
    gat = gat.to(ctx)
    h = gat(g, feat)

nv-dlasalle's avatar
nv-dlasalle committed
497
@parametrize_idtype
498
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
499
500
501
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gat_conv_bi(g, idtype, out_dim, num_heads):
502
503
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
504
    gat = nn.GATConv(5, out_dim, num_heads)
505
    feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
506
507
    gat = gat.to(ctx)
    h = gat(g, feat)
508
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
509
    _, a = gat(g, feat, get_attention=True)
510
    assert a.shape == (g.number_of_edges(), num_heads, 1)
511

nv-dlasalle's avatar
nv-dlasalle committed
512
@parametrize_idtype
Shaked Brody's avatar
Shaked Brody committed
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 5])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gatv2_conv(g, idtype, out_dim, num_heads):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gat = nn.GATv2Conv(5, out_dim, num_heads)
    feat = F.randn((g.number_of_src_nodes(), 5))
    gat = gat.to(ctx)
    h = gat(g, feat)

    # test pickle
    th.save(gat, tmp_buffer)

    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
    _, a = gat(g, feat, get_attention=True)
    assert a.shape == (g.number_of_edges(), num_heads, 1)

    # test residual connection
    gat = nn.GATConv(5, out_dim, num_heads, residual=True)
    gat = gat.to(ctx)
    h = gat(g, feat)

nv-dlasalle's avatar
nv-dlasalle committed
536
@parametrize_idtype
Shaked Brody's avatar
Shaked Brody committed
537
538
539
540
541
542
543
544
545
546
547
548
549
550
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gatv2_conv_bi(g, idtype, out_dim, num_heads):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gat = nn.GATv2Conv(5, out_dim, num_heads)
    feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
    _, a = gat(g, feat, get_attention=True)
    assert a.shape == (g.number_of_edges(), num_heads, 1)

nv-dlasalle's avatar
nv-dlasalle committed
551
@parametrize_idtype
552
553
554
555
556
557
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_node_feats', [1, 5])
@pytest.mark.parametrize('out_edge_feats', [1, 5])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads):
    g = g.astype(idtype).to(F.ctx())
Mufei Li's avatar
Mufei Li committed
558
    ctx = F.ctx()
559
560
561
562
563
564
565
566
567
    egat = nn.EGATConv(in_node_feats=10,
                       in_edge_feats=5,
                       out_node_feats=out_node_feats,
                       out_edge_feats=out_edge_feats,
                       num_heads=num_heads)
    nfeat = F.randn((g.number_of_nodes(), 10))
    efeat = F.randn((g.number_of_edges(), 5))
    egat = egat.to(ctx)
    h, f = egat(g, nfeat, efeat)
568

569
    th.save(egat, tmp_buffer)
570

571
572
573
574
    assert h.shape == (g.number_of_nodes(), num_heads, out_node_feats)
    assert f.shape == (g.number_of_edges(), num_heads, out_edge_feats)
    _, _, attn = egat(g, nfeat, efeat, True)
    assert attn.shape == (g.number_of_edges(), num_heads, 1)
575

576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_node_feats', [1, 5])
@pytest.mark.parametrize('out_edge_feats', [1, 5])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    egat = nn.EGATConv(in_node_feats=(10,15),
                       in_edge_feats=7,
                       out_node_feats=out_node_feats,
                       out_edge_feats=out_edge_feats,
                       num_heads=num_heads)
    nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15)))
    efeat = F.randn((g.number_of_edges(), 7))
    egat = egat.to(ctx)
    h, f = egat(g, nfeat, efeat)
593

Mufei Li's avatar
Mufei Li committed
594
    th.save(egat, tmp_buffer)
595

596
597
598
599
600
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_node_feats)
    assert f.shape == (g.number_of_edges(), num_heads, out_edge_feats)
    _, _, attn = egat(g, nfeat, efeat, True)
    assert attn.shape == (g.number_of_edges(), num_heads, 1)

nv-dlasalle's avatar
nv-dlasalle committed
601
@parametrize_idtype
602
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
603
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm'])
604
605
def test_sage_conv(idtype, g, aggre_type):
    g = g.astype(idtype).to(F.ctx())
606
    sage = nn.SAGEConv(5, 10, aggre_type)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
607
    feat = F.randn((g.number_of_src_nodes(), 5))
608
    sage = sage.to(F.ctx())
609
610
    # test pickle
    th.save(sage, tmp_buffer)
611
612
613
    h = sage(g, feat)
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
614
@parametrize_idtype
615
@pytest.mark.parametrize('g', get_cases(['bipartite']))
616
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm'])
617
618
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi(idtype, g, aggre_type, out_dim):
619
    g = g.astype(idtype).to(F.ctx())
620
    dst_dim = 5 if aggre_type != 'gcn' else 10
621
    sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type)
622
623
    feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim)))
    sage = sage.to(F.ctx())
624
    h = sage(g, feat)
625
    assert h.shape[-1] == out_dim
626
    assert h.shape[0] == g.number_of_dst_nodes()
627

nv-dlasalle's avatar
nv-dlasalle committed
628
@parametrize_idtype
629
630
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv2(idtype, out_dim):
631
    # TODO: add test for blocks
Mufei Li's avatar
Mufei Li committed
632
    # Test the case for graphs without edges
633
    g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3})
634
635
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
636
    sage = nn.SAGEConv((3, 3), out_dim, 'gcn')
Mufei Li's avatar
Mufei Li committed
637
638
    feat = (F.randn((5, 3)), F.randn((3, 3)))
    sage = sage.to(ctx)
639
    h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx())))
640
    assert h.shape[-1] == out_dim
Mufei Li's avatar
Mufei Li committed
641
642
    assert h.shape[0] == 3
    for aggre_type in ['mean', 'pool', 'lstm']:
643
        sage = nn.SAGEConv((3, 1), out_dim, aggre_type)
Mufei Li's avatar
Mufei Li committed
644
645
646
        feat = (F.randn((5, 3)), F.randn((3, 1)))
        sage = sage.to(ctx)
        h = sage(g, feat)
647
        assert h.shape[-1] == out_dim
Mufei Li's avatar
Mufei Li committed
648
649
        assert h.shape[0] == 3

nv-dlasalle's avatar
nv-dlasalle committed
650
@parametrize_idtype
651
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
652
653
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sgc_conv(g, idtype, out_dim):
654
    ctx = F.ctx()
655
    g = g.astype(idtype).to(ctx)
656
    # not cached
657
    sgc = nn.SGConv(5, out_dim, 3)
658
659
660
661

    # test pickle
    th.save(sgc, tmp_buffer)

662
    feat = F.randn((g.number_of_nodes(), 5))
663
    sgc = sgc.to(ctx)
664

665
    h = sgc(g, feat)
666
    assert h.shape[-1] == out_dim
667
668

    # cached
669
    sgc = nn.SGConv(5, out_dim, 3, True)
670
    sgc = sgc.to(ctx)
671
672
    h_0 = sgc(g, feat)
    h_1 = sgc(g, feat + 1)
673
    assert F.allclose(h_0, h_1)
674
    assert h_0.shape[-1] == out_dim
675

nv-dlasalle's avatar
nv-dlasalle committed
676
@parametrize_idtype
677
678
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_appnp_conv(g, idtype):
679
    ctx = F.ctx()
680
    g = g.astype(idtype).to(ctx)
681
    appnp = nn.APPNPConv(10, 0.1)
682
    feat = F.randn((g.number_of_nodes(), 5))
683
    appnp = appnp.to(ctx)
Mufei Li's avatar
Mufei Li committed
684

685
686
    # test pickle
    th.save(appnp, tmp_buffer)
687

688
    h = appnp(g, feat)
689
690
    assert h.shape[-1] == 5

691

nv-dlasalle's avatar
nv-dlasalle committed
692
@parametrize_idtype
693
694
695
696
697
698
699
700
701
702
703
704
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_appnp_conv_e_weight(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    appnp = nn.APPNPConv(10, 0.1)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    appnp = appnp.to(ctx)

    h = appnp(g, feat, edge_weight=eweight)
    assert h.shape[-1] == 5

nv-dlasalle's avatar
nv-dlasalle committed
705
@parametrize_idtype
706
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
707
708
@pytest.mark.parametrize("bias", [True, False])
def test_gcn2conv_e_weight(g, idtype, bias):
709
710
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
711
    gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias,
712
713
714
715
716
717
718
719
720
                           project_initial_features=True)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    gcn2conv = gcn2conv.to(ctx)
    res = feat
    h = gcn2conv(g, res, feat, edge_weight=eweight)
    assert h.shape[-1] == 5


nv-dlasalle's avatar
nv-dlasalle committed
721
@parametrize_idtype
722
723
724
725
726
727
728
729
730
731
732
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_sgconv_e_weight(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    sgconv = nn.SGConv(5, 5, 3)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    sgconv = sgconv.to(ctx)
    h = sgconv(g, feat, edge_weight=eweight)
    assert h.shape[-1] == 5

nv-dlasalle's avatar
nv-dlasalle committed
733
@parametrize_idtype
734
735
736
737
738
739
740
741
742
743
744
745
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_tagconv_e_weight(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    conv = nn.TAGConv(5, 5, bias=True)
    conv = conv.to(ctx)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    conv = conv.to(ctx)
    h = conv(g, feat, edge_weight=eweight)
    assert h.shape[-1] == 5

nv-dlasalle's avatar
nv-dlasalle committed
746
@parametrize_idtype
747
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
748
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
749
750
def test_gin_conv(g, idtype, aggregator_type):
    g = g.astype(idtype).to(F.ctx())
751
752
753
754
755
    ctx = F.ctx()
    gin = nn.GINConv(
        th.nn.Linear(5, 12),
        aggregator_type
    )
VoVAllen's avatar
VoVAllen committed
756
    th.save(gin, tmp_buffer)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
757
    feat = F.randn((g.number_of_src_nodes(), 5))
758
759
    gin = gin.to(ctx)
    h = gin(g, feat)
760
761

    # test pickle
VoVAllen's avatar
VoVAllen committed
762
    th.save(gin, tmp_buffer)
Mufei Li's avatar
Mufei Li committed
763

Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
764
    assert h.shape == (g.number_of_dst_nodes(), 12)
765

Mufei Li's avatar
Mufei Li committed
766
767
768
769
    gin = nn.GINConv(None, aggregator_type)
    th.save(gin, tmp_buffer)
    gin = gin.to(ctx)
    h = gin(g, feat)
770

nv-dlasalle's avatar
nv-dlasalle committed
771
@parametrize_idtype
Mufei Li's avatar
Mufei Li committed
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
def test_gine_conv(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    gine = nn.GINEConv(
        th.nn.Linear(5, 12)
    )
    th.save(gine, tmp_buffer)
    nfeat = F.randn((g.number_of_src_nodes(), 5))
    efeat = F.randn((g.num_edges(), 5))
    gine = gine.to(ctx)
    h = gine(g, nfeat, efeat)

    # test pickle
    th.save(gine, tmp_buffer)
    assert h.shape == (g.number_of_dst_nodes(), 12)

    gine = nn.GINEConv(None)
    th.save(gine, tmp_buffer)
    gine = gine.to(ctx)
    h = gine(g, nfeat, efeat)

nv-dlasalle's avatar
nv-dlasalle committed
794
@parametrize_idtype
795
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
796
797
798
799
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv_bi(g, idtype, aggregator_type):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
800
801
802
803
    gin = nn.GINConv(
        th.nn.Linear(5, 12),
        aggregator_type
    )
804
    feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
805
806
    gin = gin.to(ctx)
    h = gin(g, feat)
807
    assert h.shape == (g.number_of_dst_nodes(), 12)
808

nv-dlasalle's avatar
nv-dlasalle committed
809
@parametrize_idtype
810
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
811
812
def test_agnn_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
813
814
    ctx = F.ctx()
    agnn = nn.AGNNConv(1)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
815
    feat = F.randn((g.number_of_src_nodes(), 5))
816
    agnn = agnn.to(ctx)
817
    h = agnn(g, feat)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
818
    assert h.shape == (g.number_of_dst_nodes(), 5)
819

nv-dlasalle's avatar
nv-dlasalle committed
820
@parametrize_idtype
821
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
822
823
824
def test_agnn_conv_bi(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
825
    agnn = nn.AGNNConv(1)
826
    feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
827
828
    agnn = agnn.to(ctx)
    h = agnn(g, feat)
829
    assert h.shape == (g.number_of_dst_nodes(), 5)
830

nv-dlasalle's avatar
nv-dlasalle committed
831
@parametrize_idtype
832
833
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_gated_graph_conv(g, idtype):
834
    ctx = F.ctx()
835
    g = g.astype(idtype).to(ctx)
836
837
    ggconv = nn.GatedGraphConv(5, 10, 5, 3)
    etypes = th.arange(g.number_of_edges()) % 3
838
    feat = F.randn((g.number_of_nodes(), 5))
839
840
    ggconv = ggconv.to(ctx)
    etypes = etypes.to(ctx)
841

842
    h = ggconv(g, feat, etypes)
843
844
845
    # current we only do shape check
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
846
@parametrize_idtype
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_gated_graph_conv_one_etype(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    ggconv = nn.GatedGraphConv(5, 10, 5, 1)
    etypes = th.zeros(g.number_of_edges())
    feat = F.randn((g.number_of_nodes(), 5))
    ggconv = ggconv.to(ctx)
    etypes = etypes.to(ctx)

    h = ggconv(g, feat, etypes)
    h2 = ggconv(g, feat)
    # current we only do shape check
    assert F.allclose(h, h2)
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
863
@parametrize_idtype
864
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
865
866
def test_nn_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
867
868
869
    ctx = F.ctx()
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
870
    feat = F.randn((g.number_of_src_nodes(), 5))
871
872
873
874
875
876
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, feat, efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
877
@parametrize_idtype
878
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
879
880
881
def test_nn_conv_bi(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
882
883
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean')
884
885
    feat = F.randn((g.number_of_src_nodes(), 5))
    feat_dst = F.randn((g.number_of_dst_nodes(), 2))
886
887
888
889
890
891
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, (feat, feat_dst), efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
892
@parametrize_idtype
893
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
894
895
def test_gmm_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
896
897
    ctx = F.ctx()
    gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean')
898
    feat = F.randn((g.number_of_nodes(), 5))
899
    pseudo = F.randn((g.number_of_edges(), 3))
900
    gmmconv = gmmconv.to(ctx)
901
    h = gmmconv(g, feat, pseudo)
902
903
904
    # currently we only do shape check
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
905
@parametrize_idtype
906
@pytest.mark.parametrize('g', get_cases(['bipartite', 'block-bipartite'], exclude=['zero-degree']))
907
908
909
def test_gmm_conv_bi(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
910
    gmmconv = nn.GMMConv((5, 2), 10, 3, 4, 'mean')
911
912
    feat = F.randn((g.number_of_src_nodes(), 5))
    feat_dst = F.randn((g.number_of_dst_nodes(), 2))
913
914
915
916
917
918
    pseudo = F.randn((g.number_of_edges(), 3))
    gmmconv = gmmconv.to(ctx)
    h = gmmconv(g, (feat, feat_dst), pseudo)
    # currently we only do shape check
    assert h.shape[-1] == 10

nv-dlasalle's avatar
nv-dlasalle committed
919
@parametrize_idtype
920
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
921
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree']))
922
923
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_graph_conv(norm_type, g, idtype, out_dim):
924
    g = g.astype(idtype).to(F.ctx())
925
    ctx = F.ctx()
926
    # TODO(minjie): enable the following option after #1385
927
    adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense()
928
929
    conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True)
    dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True)
930
931
    dense_conv.weight.data = conv.weight.data
    dense_conv.bias.data = conv.bias.data
932
    feat = F.randn((g.number_of_src_nodes(), 5))
933
934
    conv = conv.to(ctx)
    dense_conv = dense_conv.to(ctx)
935
936
    out_conv = conv(g, feat)
    out_dense_conv = dense_conv(adj, feat)
937
938
    assert F.allclose(out_conv, out_dense_conv)

nv-dlasalle's avatar
nv-dlasalle committed
939
@parametrize_idtype
940
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite']))
941
942
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_sage_conv(g, idtype, out_dim):
943
    g = g.astype(idtype).to(F.ctx())
944
    ctx = F.ctx()
945
    adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense()
946
947
    sage = nn.SAGEConv(5, out_dim, 'gcn')
    dense_sage = nn.DenseSAGEConv(5, out_dim)
948
    dense_sage.fc.weight.data = sage.fc_neigh.weight.data
949
    dense_sage.fc.bias.data = sage.bias.data
950
951
952
953
954
955
956
    if len(g.ntypes) == 2:
        feat = (
            F.randn((g.number_of_src_nodes(), 5)),
            F.randn((g.number_of_dst_nodes(), 5))
        )
    else:
        feat = F.randn((g.number_of_nodes(), 5))
957
958
    sage = sage.to(ctx)
    dense_sage = dense_sage.to(ctx)
959
960
    out_sage = sage(g, feat)
    out_dense_sage = dense_sage(adj, feat)
961
962
    assert F.allclose(out_sage, out_dense_sage), g

nv-dlasalle's avatar
nv-dlasalle committed
963
@parametrize_idtype
964
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
965
966
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv(g, idtype, out_dim):
967
    g = g.astype(idtype).to(F.ctx())
968
    ctx = F.ctx()
969
    edge_conv = nn.EdgeConv(5, out_dim).to(ctx)
970
    print(edge_conv)
971
972
973

    # test pickle
    th.save(edge_conv, tmp_buffer)
Mufei Li's avatar
Mufei Li committed
974

Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
975
    h0 = F.randn((g.number_of_src_nodes(), 5))
976
    h1 = edge_conv(g, h0)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
977
    assert h1.shape == (g.number_of_dst_nodes(), out_dim)
978

nv-dlasalle's avatar
nv-dlasalle committed
979
@parametrize_idtype
980
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
981
982
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv_bi(g, idtype, out_dim):
983
984
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
985
    edge_conv = nn.EdgeConv(5, out_dim).to(ctx)
986
    print(edge_conv)
987
    h0 = F.randn((g.number_of_src_nodes(), 5))
988
989
    x0 = F.randn((g.number_of_dst_nodes(), 5))
    h1 = edge_conv(g, (h0, x0))
990
    assert h1.shape == (g.number_of_dst_nodes(), out_dim)
Mufei Li's avatar
Mufei Li committed
991

nv-dlasalle's avatar
nv-dlasalle committed
992
@parametrize_idtype
993
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
994
995
996
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_dotgat_conv(g, idtype, out_dim, num_heads):
997
998
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
999
    dotgat = nn.DotGatConv(5, out_dim, num_heads)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
1000
    feat = F.randn((g.number_of_src_nodes(), 5))
1001
    dotgat = dotgat.to(ctx)
Mufei Li's avatar
Mufei Li committed
1002

1003
1004
    # test pickle
    th.save(dotgat, tmp_buffer)
Mufei Li's avatar
Mufei Li committed
1005

1006
    h = dotgat(g, feat)
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
1007
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
1008
    _, a = dotgat(g, feat, get_attention=True)
1009
    assert a.shape == (g.number_of_edges(), num_heads, 1)
1010

nv-dlasalle's avatar
nv-dlasalle committed
1011
@parametrize_idtype
1012
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
1013
1014
1015
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_dotgat_conv_bi(g, idtype, out_dim, num_heads):
1016
1017
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
1018
    dotgat = nn.DotGatConv((5, 5), out_dim, num_heads)
1019
1020
1021
    feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
    dotgat = dotgat.to(ctx)
    h = dotgat(g, feat)
1022
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
1023
    _, a = dotgat(g, feat, get_attention=True)
1024
    assert a.shape == (g.number_of_edges(), num_heads, 1)
1025

1026
1027
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_cheb_conv(out_dim):
1028
1029
1030
    for k in range(1, 4):
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
1031
        g = g.to(F.ctx())
1032
        adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense()
1033
1034
        cheb = nn.ChebConv(5, out_dim, k, None)
        dense_cheb = nn.DenseChebConv(5, out_dim, k)
Axel Nilsson's avatar
Axel Nilsson committed
1035
1036
        #for i in range(len(cheb.fc)):
        #    dense_cheb.W.data[i] = cheb.fc[i].weight.data.t()
1037
        dense_cheb.W.data = cheb.linear.weight.data.transpose(-1, -2).view(k, 5, out_dim)
Axel Nilsson's avatar
Axel Nilsson committed
1038
1039
        if cheb.linear.bias is not None:
            dense_cheb.bias.data = cheb.linear.bias.data
1040
        feat = F.randn((100, 5))
1041
1042
        cheb = cheb.to(ctx)
        dense_cheb = dense_cheb.to(ctx)
1043
1044
        out_cheb = cheb(g, feat, [2.0])
        out_dense_cheb = dense_cheb(adj, feat, 2.0)
Axel Nilsson's avatar
Axel Nilsson committed
1045
        print(k, out_cheb, out_dense_cheb)
1046
1047
        assert F.allclose(out_cheb, out_dense_cheb)

1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
def test_sequential():
    ctx = F.ctx()
    # Test single graph
    class ExampleLayer(th.nn.Module):
        def __init__(self):
            super().__init__()

        def forward(self, graph, n_feat, e_feat):
            graph = graph.local_var()
            graph.ndata['h'] = n_feat
            graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
            n_feat += graph.ndata['h']
            graph.apply_edges(fn.u_add_v('h', 'h', 'e'))
            e_feat += graph.edata['e']
            return n_feat, e_feat

    g = dgl.DGLGraph()
    g.add_nodes(3)
    g.add_edges([0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2])
1067
    g = g.to(F.ctx())
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
    net = nn.Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer())
    n_feat = F.randn((3, 4))
    e_feat = F.randn((9, 4))
    net = net.to(ctx)
    n_feat, e_feat = net(g, n_feat, e_feat)
    assert n_feat.shape == (3, 4)
    assert e_feat.shape == (9, 4)

    # Test multiple graph
    class ExampleLayer(th.nn.Module):
        def __init__(self):
            super().__init__()

        def forward(self, graph, n_feat):
            graph = graph.local_var()
            graph.ndata['h'] = n_feat
            graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
            n_feat += graph.ndata['h']
            return n_feat.view(graph.number_of_nodes() // 2, 2, -1).sum(1)

1088
1089
1090
    g1 = dgl.DGLGraph(nx.erdos_renyi_graph(32, 0.05)).to(F.ctx())
    g2 = dgl.DGLGraph(nx.erdos_renyi_graph(16, 0.2)).to(F.ctx())
    g3 = dgl.DGLGraph(nx.erdos_renyi_graph(8, 0.8)).to(F.ctx())
1091
1092
1093
1094
1095
1096
    net = nn.Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer())
    net = net.to(ctx)
    n_feat = F.randn((32, 4))
    n_feat = net([g1, g2, g3], n_feat)
    assert n_feat.shape == (4, 4)

nv-dlasalle's avatar
nv-dlasalle committed
1097
@parametrize_idtype
1098
1099
1100
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_atomic_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
1101
1102
1103
1104
1105
1106
1107
1108
1109
    aconv = nn.AtomicConv(interaction_cutoffs=F.tensor([12.0, 12.0]),
                          rbf_kernel_means=F.tensor([0.0, 2.0]),
                          rbf_kernel_scaling=F.tensor([4.0, 4.0]),
                          features_to_use=F.tensor([6.0, 8.0]))

    ctx = F.ctx()
    if F.gpu_ctx():
        aconv = aconv.to(ctx)

1110
    feat = F.randn((g.number_of_nodes(), 1))
1111
1112
1113
    dist = F.randn((g.number_of_edges(), 1))

    h = aconv(g, feat, dist)
1114

1115
1116
1117
    # current we only do shape check
    assert h.shape[-1] == 4

nv-dlasalle's avatar
nv-dlasalle committed
1118
@parametrize_idtype
1119
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree']))
1120
1121
@pytest.mark.parametrize('out_dim', [1, 3])
def test_cf_conv(g, idtype, out_dim):
1122
    g = g.astype(idtype).to(F.ctx())
1123
1124
1125
    cfconv = nn.CFConv(node_in_feats=2,
                       edge_in_feats=3,
                       hidden_feats=2,
1126
                       out_feats=out_dim)
1127
1128
1129
1130
1131

    ctx = F.ctx()
    if F.gpu_ctx():
        cfconv = cfconv.to(ctx)

1132
    src_feats = F.randn((g.number_of_src_nodes(), 2))
1133
    edge_feats = F.randn((g.number_of_edges(), 3))
1134
1135
1136
1137
1138
1139
1140
    h = cfconv(g, src_feats, edge_feats)
    # current we only do shape check
    assert h.shape[-1] == out_dim

    # case for bipartite graphs
    dst_feats = F.randn((g.number_of_dst_nodes(), 3))
    h = cfconv(g, (src_feats, dst_feats), edge_feats)
1141
    # current we only do shape check
1142
    assert h.shape[-1] == out_dim
1143

1144
1145
1146
1147
1148
1149
def myagg(alist, dsttype):
    rst = alist[0]
    for i in range(1, len(alist)):
        rst = rst + (i + 1) * alist[i]
    return rst

nv-dlasalle's avatar
nv-dlasalle committed
1150
@parametrize_idtype
1151
@pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg])
1152
1153
@pytest.mark.parametrize('canonical_keys', [False, True])
def test_hetero_conv(agg, idtype, canonical_keys):
1154
    g = dgl.heterograph({
1155
1156
1157
        ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
        ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
        ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])},
1158
        idtype=idtype, device=F.ctx())
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
    if not canonical_keys:
        conv = nn.HeteroGraphConv({
            'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
            'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
            'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)},
            agg)
    else:
        conv = nn.HeteroGraphConv({
            ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True),
            ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True),
            ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)},
            agg)

1172
    conv = conv.to(F.ctx())
1173
1174
1175
1176

    # test pickle
    th.save(conv, tmp_buffer)

1177
1178
1179
1180
    uf = F.randn((4, 2))
    gf = F.randn((4, 4))
    sf = F.randn((2, 3))

1181
    h = conv(g, {'user': uf, 'game': gf, 'store': sf})
1182
1183
1184
1185
1186
1187
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
1188
        assert h['game'].shape == (4, 2, 4)
1189

1190
1191
    block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [0, 1, 2, 3], 'store': []}).to(F.ctx())
    h = conv(block, ({'user': uf, 'game': gf, 'store': sf}, {'user': uf, 'game': gf, 'store': sf[0:0]}))
1192
1193
1194
1195
1196
1197
1198
1199
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

1200
    h = conv(block, {'user': uf, 'game': gf, 'store': sf})
1201
1202
1203
1204
1205
1206
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
1207
        assert h['game'].shape == (4, 2, 4)
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230

    # test with mod args
    class MyMod(th.nn.Module):
        def __init__(self, s1, s2):
            super(MyMod, self).__init__()
            self.carg1 = 0
            self.carg2 = 0
            self.s1 = s1
            self.s2 = s2
        def forward(self, g, h, arg1=None, *, arg2=None):
            if arg1 is not None:
                self.carg1 += 1
            if arg2 is not None:
                self.carg2 += 1
            return th.zeros((g.number_of_dst_nodes(), self.s2))
    mod1 = MyMod(2, 3)
    mod2 = MyMod(2, 4)
    mod3 = MyMod(3, 4)
    conv = nn.HeteroGraphConv({
        'follows': mod1,
        'plays': mod2,
        'sells': mod3},
        agg)
1231
    conv = conv.to(F.ctx())
1232
1233
    mod_args = {'follows' : (1,), 'plays' : (1,)}
    mod_kwargs = {'sells' : {'arg2' : 'abc'}}
1234
    h = conv(g, {'user' : uf, 'game': gf, 'store' : sf}, mod_args=mod_args, mod_kwargs=mod_kwargs)
1235
1236
1237
1238
1239
1240
1241
    assert mod1.carg1 == 1
    assert mod1.carg2 == 0
    assert mod2.carg1 == 1
    assert mod2.carg2 == 0
    assert mod3.carg1 == 0
    assert mod3.carg2 == 1

1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
    #conv on graph without any edges
    for etype in g.etypes:
        g = dgl.remove_edges(g, g.edges(form='eid', etype=etype), etype=etype)
    assert g.num_edges() == 0
    h = conv(g, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}

    block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [
                         0, 1, 2, 3], 'store': []}).to(F.ctx())
    h = conv(block, ({'user': uf, 'game': gf, 'store': sf},
             {'user': uf, 'game': gf, 'store': sf[0:0]}))
    assert set(h.keys()) == {'user', 'game'}

1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
@pytest.mark.parametrize('out_dim', [1, 2, 100])
def test_hetero_linear(out_dim):
    in_feats = {
        'user': F.randn((2, 1)),
        ('user', 'follows', 'user'): F.randn((3, 2))
    }

    layer = nn.HeteroLinear({'user': 1, ('user', 'follows', 'user'): 2}, out_dim)
    layer = layer.to(F.ctx())
    out_feats = layer(in_feats)
    assert out_feats['user'].shape == (2, out_dim)
    assert out_feats[('user', 'follows', 'user')].shape == (3, out_dim)

@pytest.mark.parametrize('out_dim', [1, 2, 100])
def test_hetero_embedding(out_dim):
    layer = nn.HeteroEmbedding({'user': 2, ('user', 'follows', 'user'): 3}, out_dim)
    layer = layer.to(F.ctx())

    embeds = layer.weight
    assert embeds['user'].shape == (2, out_dim)
    assert embeds[('user', 'follows', 'user')].shape == (3, out_dim)

YJ-Zhao's avatar
YJ-Zhao committed
1277
1278
1279
1280
1281
    layer.reset_parameters()
    embeds = layer.weight
    assert embeds['user'].shape == (2, out_dim)
    assert embeds[('user', 'follows', 'user')].shape == (3, out_dim)

1282
1283
1284
1285
1286
1287
1288
    embeds = layer({
        'user': F.tensor([0], dtype=F.int64),
        ('user', 'follows', 'user'): F.tensor([0, 2], dtype=F.int64)
    })
    assert embeds['user'].shape == (1, out_dim)
    assert embeds[('user', 'follows', 'user')].shape == (2, out_dim)

nv-dlasalle's avatar
nv-dlasalle committed
1289
@parametrize_idtype
Mufei Li's avatar
Mufei Li committed
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_gnnexplainer(g, idtype, out_dim):
    g = g.astype(idtype).to(F.ctx())
    feat = F.randn((g.num_nodes(), 5))

    class Model(th.nn.Module):
        def __init__(self, in_feats, out_feats, graph=False):
            super(Model, self).__init__()
            self.linear = th.nn.Linear(in_feats, out_feats)
            if graph:
                self.pool = nn.AvgPooling()
            else:
                self.pool = None

        def forward(self, graph, feat, eweight=None):
            with graph.local_scope():
                feat = self.linear(feat)
                graph.ndata['h'] = feat
                if eweight is None:
                    graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
                else:
                    graph.edata['w'] = eweight
                    graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h'))

                if self.pool:
                    return self.pool(graph, graph.ndata['h'])
                else:
                    return graph.ndata['h']

    # Explain node prediction
    model = Model(5, out_dim)
    model = model.to(F.ctx())
    explainer = nn.GNNExplainer(model, num_hops=1)
    new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat)

    # Explain graph prediction
    model = Model(5, out_dim, graph=True)
    model = model.to(F.ctx())
    explainer = nn.GNNExplainer(model, num_hops=1)
    feat_mask, edge_mask = explainer.explain_graph(g, feat)

Mufei Li's avatar
Mufei Li committed
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
def test_jumping_knowledge():
    ctx = F.ctx()
    num_layers = 2
    num_nodes = 3
    num_feats = 4

    feat_list = [th.randn((num_nodes, num_feats)).to(ctx) for _ in range(num_layers)]

    model = nn.JumpingKnowledge('cat').to(ctx)
    model.reset_parameters()
    assert model(feat_list).shape == (num_nodes, num_layers * num_feats)

    model = nn.JumpingKnowledge('max').to(ctx)
    model.reset_parameters()
    assert model(feat_list).shape == (num_nodes, num_feats)

    model = nn.JumpingKnowledge('lstm', num_feats, num_layers).to(ctx)
    model.reset_parameters()
    assert model(feat_list).shape == (num_nodes, num_feats)

Mufei Li's avatar
Mufei Li committed
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
@pytest.mark.parametrize('op', ['dot', 'cos', 'ele', 'cat'])
def test_edge_predictor(op):
    ctx = F.ctx()
    num_pairs = 3
    in_feats = 4
    out_feats = 5
    h_src = th.randn((num_pairs, in_feats)).to(ctx)
    h_dst = th.randn((num_pairs, in_feats)).to(ctx)

    pred = nn.EdgePredictor(op)
    if op in ['dot', 'cos']:
        assert pred(h_src, h_dst).shape == (num_pairs, 1)
    elif op == 'ele':
        assert pred(h_src, h_dst).shape == (num_pairs, in_feats)
    else:
        assert pred(h_src, h_dst).shape == (num_pairs, 2 * in_feats)
    pred = nn.EdgePredictor(op, in_feats, out_feats, bias=True).to(ctx)
    assert pred(h_src, h_dst).shape == (num_pairs, out_feats)

Mufei Li's avatar
Mufei Li committed
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390

def test_ke_score_funcs():
    ctx = F.ctx()
    num_edges = 30
    num_rels = 3
    nfeats = 4

    h_src = th.randn((num_edges, nfeats)).to(ctx)
    h_dst = th.randn((num_edges, nfeats)).to(ctx)
    rels = th.randint(low=0, high=num_rels, size=(num_edges,)).to(ctx)

    score_func = nn.TransE(num_rels=num_rels, feats=nfeats).to(ctx)
    score_func.reset_parameters()
    score_func(h_src, h_dst, rels).shape == (num_edges)

    score_func = nn.TransR(num_rels=num_rels, rfeats=nfeats - 1, nfeats=nfeats).to(ctx)
    score_func.reset_parameters()
    score_func(h_src, h_dst, rels).shape == (num_edges)


1391
def test_twirls():
1392
1393
1394
1395
1396
    g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
    feat = th.ones(6, 10)
    conv = nn.TWIRLSConv(10, 2, 128, prop_step = 64)
    res = conv(g , feat)
    assert ( res.size() == (6,2) )
1397

1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
@pytest.mark.parametrize('feat_size', [4, 32])
@pytest.mark.parametrize('regularizer,num_bases', [(None, None), ('basis', 4), ('bdd', 4)])
def test_typed_linear(feat_size, regularizer, num_bases):
    dev = F.ctx()
    num_types = 5
    lin = nn.TypedLinear(feat_size, feat_size * 2, 5, regularizer=regularizer, num_bases=num_bases).to(dev)
    print(lin)
    x = th.randn(100, feat_size).to(dev)
    x_type = th.randint(0, 5, (100,)).to(dev)
    x_type_sorted, idx = th.sort(x_type)
    _, rev_idx = th.sort(idx)
    x_sorted = x[idx]

    # test unsorted
    y = lin(x, x_type)
    assert y.shape == (100, feat_size * 2)
    # test sorted
    y_sorted = lin(x_sorted, x_type_sorted, sorted_by_type=True)
    assert y_sorted.shape == (100, feat_size * 2)

    assert th.allclose(y, y_sorted[rev_idx], atol=1e-4, rtol=1e-4)
1419

nv-dlasalle's avatar
nv-dlasalle committed
1420
@parametrize_idtype
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
@pytest.mark.parametrize('in_size', [4])
@pytest.mark.parametrize('num_heads', [1])
def test_hgt(idtype, in_size, num_heads):
    dev = F.ctx()
    num_etypes = 5
    num_ntypes = 2
    head_size = in_size // num_heads

    g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.01))
    g = g.astype(idtype).to(dev)
    etype = th.tensor([i % num_etypes for i in range(g.num_edges())]).to(dev)
    ntype = th.tensor([i % num_ntypes for i in range(g.num_nodes())]).to(dev)
    x = th.randn(g.num_nodes(), in_size).to(dev)
1434

1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
    m = nn.HGTConv(in_size, head_size, num_heads, num_ntypes, num_etypes).to(dev)

    y = m(g, x, ntype, etype)
    assert y.shape == (g.num_nodes(), head_size * num_heads)
    # presorted
    sorted_ntype, idx_nt = th.sort(ntype)
    sorted_etype, idx_et = th.sort(etype)
    _, rev_idx = th.sort(idx_nt)
    g.ndata['t'] = ntype
    g.ndata['x'] = x
    g.edata['t'] = etype
    sorted_g = dgl.reorder_graph(g, node_permute_algo='custom', edge_permute_algo='custom',
                                 permute_config={'nodes_perm' : idx_nt.to(idtype), 'edges_perm' : idx_et.to(idtype)})
    print(sorted_g.ndata['t'])
    print(sorted_g.edata['t'])
    sorted_x = sorted_g.ndata['x']
    sorted_y = m(sorted_g, sorted_x, sorted_ntype, sorted_etype, presorted=False)
    assert sorted_y.shape == (g.num_nodes(), head_size * num_heads)
    # TODO(minjie): enable the following check
    #assert th.allclose(y, sorted_y[rev_idx], atol=1e-4, rtol=1e-4)
1455

1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
@pytest.mark.parametrize('self_loop', [True, False])
@pytest.mark.parametrize('get_distances', [True, False])
def test_radius_graph(self_loop, get_distances):
    pos = th.tensor([[0.1, 0.3, 0.4],
                     [0.5, 0.2, 0.1],
                     [0.7, 0.9, 0.5],
                     [0.3, 0.2, 0.5],
                     [0.2, 0.8, 0.2],
                     [0.9, 0.2, 0.1],
                     [0.7, 0.4, 0.4],
                     [0.2, 0.1, 0.6],
                     [0.5, 0.3, 0.5],
                     [0.4, 0.2, 0.6]])

    rg = nn.RadiusGraph(0.3, self_loop=self_loop)

    if get_distances:
        g, dists = rg(pos, get_distances=get_distances)
    else:
        g = rg(pos)

    if self_loop:
        src_target = th.tensor([0, 0, 1, 2, 3, 3, 3, 3, 3, 4, 5, 6, 6, 7, 7, 7,
                                8, 8, 8, 8, 9, 9, 9, 9])
        dst_target = th.tensor([0, 3, 1, 2, 0, 3, 7, 8, 9, 4, 5, 6, 8, 3, 7, 9,
                                3, 6, 8, 9, 3, 7, 8, 9])

        if get_distances:
            dists_target = th.tensor([[0.0000],
                                      [0.2449],
                                      [0.0000],
                                      [0.0000],
                                      [0.2449],
                                      [0.0000],
                                      [0.1732],
                                      [0.2236],
                                      [0.1414],
                                      [0.0000],
                                      [0.0000],
                                      [0.0000],
                                      [0.2449],
                                      [0.1732],
                                      [0.0000],
                                      [0.2236],
                                      [0.2236],
                                      [0.2449],
                                      [0.0000],
                                      [0.1732],
                                      [0.1414],
                                      [0.2236],
                                      [0.1732],
                                      [0.0000]])
    else:
        src_target = th.tensor([0, 3, 3, 3, 3, 6, 7, 7, 8, 8, 8, 9, 9, 9])
        dst_target = th.tensor([3, 0, 7, 8, 9, 8, 3, 9, 3, 6, 9, 3, 7, 8])

        if get_distances:
            dists_target = th.tensor([[0.2449],
                                      [0.2449],
                                      [0.1732],
                                      [0.2236],
                                      [0.1414],
                                      [0.2449],
                                      [0.1732],
                                      [0.2236],
                                      [0.2236],
                                      [0.2449],
                                      [0.1732],
                                      [0.1414],
                                      [0.2236],
                                      [0.1732]])

    src, dst = g.edges()

    assert th.equal(src, src_target)
    assert th.equal(dst, dst_target)

    if get_distances:
        assert th.allclose(dists, dists_target, rtol=1e-03)

nv-dlasalle's avatar
nv-dlasalle committed
1536
@parametrize_idtype
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
def test_group_rev_res(idtype):
    dev = F.ctx()

    num_nodes = 5
    num_edges = 20
    feats = 32
    groups = 2
    g = dgl.rand_graph(num_nodes, num_edges).to(dev)
    h = th.randn(num_nodes, feats).to(dev)
    conv = nn.GraphConv(feats // groups, feats // groups)
    model = nn.GroupRevRes(conv, groups).to(dev)
1548
1549
    result = model(g, h)
    result.sum().backward()
rudongyu's avatar
rudongyu committed
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567

@pytest.mark.parametrize('in_size', [16, 32])
@pytest.mark.parametrize('hidden_size', [16, 32])
@pytest.mark.parametrize('out_size', [16, 32])
@pytest.mark.parametrize('edge_feat_size', [16, 10, 0])
def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size):
    dev = F.ctx()
    num_nodes = 5
    num_edges = 20
    g = dgl.rand_graph(num_nodes, num_edges).to(dev)
    h = th.randn(num_nodes, in_size).to(dev)
    x = th.randn(num_nodes, 3).to(dev)
    e = th.randn(num_edges, edge_feat_size).to(dev)
    model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev)
    model(g, h, x, e)

@pytest.mark.parametrize('in_size', [16, 32])
@pytest.mark.parametrize('out_size', [16, 32])
Mufei Li's avatar
Mufei Li committed
1568
@pytest.mark.parametrize('aggregators',
rudongyu's avatar
rudongyu committed
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
    [['mean', 'max', 'sum'], ['min', 'std', 'var'], ['moment3', 'moment4', 'moment5']])
@pytest.mark.parametrize('scalers', [['identity'], ['amplification', 'attenuation']])
@pytest.mark.parametrize('delta', [2.5, 7.4])
@pytest.mark.parametrize('dropout', [0., 0.1])
@pytest.mark.parametrize('num_towers', [1, 4])
@pytest.mark.parametrize('edge_feat_size', [16, 0])
@pytest.mark.parametrize('residual', [True, False])
def test_pna_conv(in_size, out_size, aggregators, scalers, delta,
    dropout, num_towers, edge_feat_size, residual):
    dev = F.ctx()
    num_nodes = 5
    num_edges = 20
    g = dgl.rand_graph(num_nodes, num_edges).to(dev)
    h = th.randn(num_nodes, in_size).to(dev)
    e = th.randn(num_edges, edge_feat_size).to(dev)
    model = nn.PNAConv(in_size, out_size, aggregators, scalers, delta, dropout,
        num_towers, edge_feat_size, residual).to(dev)
    model(g, h, edge_feat=e)
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607

@pytest.mark.parametrize('k', [3, 5])
@pytest.mark.parametrize('alpha', [0., 0.5, 1.])
@pytest.mark.parametrize('norm_type', ['sym', 'row'])
@pytest.mark.parametrize('clamp', [True, False])
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('reset', [True, False])
def test_label_prop(k, alpha, norm_type, clamp, normalize, reset):
    dev = F.ctx()
    num_nodes = 5
    num_edges = 20
    num_classes = 4
    g = dgl.rand_graph(num_nodes, num_edges).to(dev)
    labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev)
    ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7
    mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev)
    model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset)
    model(g, labels, mask)
    # multi-label case
    model(g, ml_labels, mask)

LuckyLiuM's avatar
LuckyLiuM committed
1608
@pytest.mark.parametrize('in_size', [16])
1609
1610
@pytest.mark.parametrize('out_size', [16, 32])
@pytest.mark.parametrize('aggregators',
LuckyLiuM's avatar
LuckyLiuM committed
1611
1612
1613
    [['mean', 'max', 'dir2-av'], ['min', 'std', 'dir1-dx']])
@pytest.mark.parametrize('scalers', [['amplification', 'attenuation']])
@pytest.mark.parametrize('delta', [2.5])
1614
1615
@pytest.mark.parametrize('edge_feat_size', [16, 0])
def test_dgn_conv(in_size, out_size, aggregators, scalers, delta,
LuckyLiuM's avatar
LuckyLiuM committed
1616
    edge_feat_size):
1617
1618
1619
1620
1621
1622
1623
1624
1625
    dev = F.ctx()
    num_nodes = 5
    num_edges = 20
    g = dgl.rand_graph(num_nodes, num_edges).to(dev)
    h = th.randn(num_nodes, in_size).to(dev)
    e = th.randn(num_edges, edge_feat_size).to(dev)
    transform = dgl.LaplacianPE(k=3, feat_name='eig')
    g = transform(g)
    eig = g.ndata['eig']
LuckyLiuM's avatar
LuckyLiuM committed
1626
1627
    model = nn.DGNConv(in_size, out_size, aggregators, scalers, delta,
        edge_feat_size=edge_feat_size).to(dev)
1628
1629
1630
    model(g, h, edge_feat=e, eig_vec=eig)

    aggregators_non_eig = [aggr for aggr in aggregators if not aggr.startswith('dir')]
LuckyLiuM's avatar
LuckyLiuM committed
1631
1632
    model = nn.DGNConv(in_size, out_size, aggregators_non_eig, scalers, delta,
        edge_feat_size=edge_feat_size).to(dev)
1633
    model(g, h, edge_feat=e)
LuckyLiuM's avatar
LuckyLiuM committed
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654

def test_DeepWalk():
    dev = F.ctx()
    g = dgl.graph(([0, 1, 2, 1, 2, 0], [1, 2, 0, 0, 1, 2]))
    model = nn.DeepWalk(g, emb_dim=8, walk_length=2, window_size=1, fast_neg=True, sparse=True)
    model = model.to(dev)
    dataloader = DataLoader(torch.arange(g.num_nodes()), batch_size=16, collate_fn=model.sample)
    optim = SparseAdam(model.parameters(), lr=0.01)
    walk = next(iter(dataloader)).to(dev)
    loss = model(walk)
    loss.backward()
    optim.step()

    model = nn.DeepWalk(g, emb_dim=8, walk_length=2, window_size=1, fast_neg=False, sparse=False)
    model = model.to(dev)
    dataloader = DataLoader(torch.arange(g.num_nodes()), batch_size=16, collate_fn=model.sample)
    optim = Adam(model.parameters(), lr=0.01)
    walk = next(iter(dataloader)).to(dev)
    loss = model(walk)
    loss.backward()
    optim.step()
LuckyLiuM's avatar
LuckyLiuM committed
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668

@parametrize_idtype
def test_MetaPath2Vec(idtype):
    dev = F.ctx()
    g = dgl.heterograph({
        ('user', 'uc', 'company'): ([0, 0, 2, 1, 3], [1, 2, 1, 3, 0]),
        ('company', 'cp', 'product'): ([0, 0, 0, 1, 2, 3], [0, 2, 3, 0, 2, 1]),
        ('company', 'cu', 'user'): ([1, 2, 1, 3, 0], [0, 0, 2, 1, 3]),
        ('product', 'pc', 'company'): ([0, 2, 3, 0, 2, 1], [0, 0, 0, 1, 2, 3])
    }, idtype=idtype, device=dev)
    model = nn.MetaPath2Vec(g, ['uc', 'cu'], window_size=1)
    model = model.to(dev)
    embeds = model.node_embed.weight
    assert embeds.shape[0] == g.num_nodes()