Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
9e630101
Unverified
Commit
9e630101
authored
Feb 12, 2021
by
Quan (Andy) Gan
Committed by
GitHub
Feb 12, 2021
Browse files
fix and lots of tests (#2650)
parent
cf8a3fb3
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
322 additions
and
262 deletions
+322
-262
python/dgl/nn/mxnet/utils.py
python/dgl/nn/mxnet/utils.py
+1
-1
python/dgl/nn/pytorch/conv/relgraphconv.py
python/dgl/nn/pytorch/conv/relgraphconv.py
+1
-1
python/dgl/nn/tensorflow/utils.py
python/dgl/nn/tensorflow/utils.py
+1
-1
tests/mxnet/test_nn.py
tests/mxnet/test_nn.py
+91
-71
tests/pytorch/test_nn.py
tests/pytorch/test_nn.py
+148
-121
tests/tensorflow/test_nn.py
tests/tensorflow/test_nn.py
+80
-67
No files found.
python/dgl/nn/mxnet/utils.py
View file @
9e630101
...
...
@@ -84,7 +84,7 @@ def bmm_maybe_select(A, B, index):
return
B
[
index
,
A
,
:]
else
:
BB
=
nd
.
take
(
B
,
index
,
axis
=
0
)
return
nd
.
batch_dot
(
A
.
expand_dims
(
1
),
BB
).
squeeze
()
return
nd
.
batch_dot
(
A
.
expand_dims
(
1
),
BB
).
squeeze
(
1
)
def
normalize
(
x
,
p
=
2
,
axis
=
1
,
eps
=
1e-12
):
r
"""Performs :math:`L_p` normalization of inputs over specified dimension.
...
...
python/dgl/nn/pytorch/conv/relgraphconv.py
View file @
9e630101
...
...
@@ -238,7 +238,7 @@ class RelGraphConv(nn.Module):
etypes
=
th
.
repeat_interleave
(
th
.
arange
(
len
(
etypes
),
device
=
device
),
th
.
tensor
(
etypes
,
device
=
device
))
weight
=
weight
.
index_select
(
0
,
etypes
)
msg
=
th
.
bmm
(
h
.
unsqueeze
(
1
),
weight
).
squeeze
()
msg
=
th
.
bmm
(
h
.
unsqueeze
(
1
),
weight
).
squeeze
(
1
)
if
'norm'
in
edges
.
data
:
msg
=
msg
*
edges
.
data
[
'norm'
]
...
...
python/dgl/nn/tensorflow/utils.py
View file @
9e630101
...
...
@@ -87,7 +87,7 @@ def bmm_maybe_select(A, B, index):
return
tf
.
gather
(
B
,
flatidx
)
else
:
BB
=
tf
.
gather
(
B
,
index
)
return
tf
.
squeeze
(
tf
.
matmul
(
tf
.
expand_dims
(
A
,
1
),
BB
))
return
tf
.
squeeze
(
tf
.
matmul
(
tf
.
expand_dims
(
A
,
1
),
BB
)
,
1
)
class
Identity
(
layers
.
Layer
):
...
...
tests/mxnet/test_nn.py
View file @
9e630101
...
...
@@ -20,13 +20,14 @@ def _AXWb(A, X, W, b):
return
Y
+
b
.
data
(
X
.
context
)
@
parametrize_dtype
def
test_graph_conv
(
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv
(
idtype
,
out_dim
):
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
3
))
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
)
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
'none'
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
'none'
,
bias
=
True
)
conv
.
initialize
(
ctx
=
ctx
)
# test#1: basic
h0
=
F
.
ones
((
3
,
5
))
...
...
@@ -41,7 +42,7 @@ def test_graph_conv(idtype):
assert
len
(
g
.
edata
)
==
0
check_close
(
h1
,
_AXWb
(
adj
,
h0
,
conv
.
weight
,
conv
.
bias
))
conv
=
nn
.
GraphConv
(
5
,
2
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
)
conv
.
initialize
(
ctx
=
ctx
)
# test#3: basic
...
...
@@ -55,7 +56,7 @@ def test_graph_conv(idtype):
assert
len
(
g
.
ndata
)
==
0
assert
len
(
g
.
edata
)
==
0
conv
=
nn
.
GraphConv
(
5
,
2
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
)
conv
.
initialize
(
ctx
=
ctx
)
with
autograd
.
train_mode
():
...
...
@@ -83,38 +84,40 @@ def test_graph_conv(idtype):
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
False
])
def
test_graph_conv2
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv2
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
conv
.
initialize
(
ctx
=
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
2
)).
as_in_context
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
out_dim
)).
as_in_context
(
F
.
ctx
())
nsrc
=
ndst
=
g
.
number_of_nodes
()
h
=
F
.
randn
((
nsrc
,
5
)).
as_in_context
(
F
.
ctx
())
if
weight
:
h_out
=
conv
(
g
,
h
)
else
:
h_out
=
conv
(
g
,
h
,
ext_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
,
'dglgraph'
]))
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
False
])
def
test_graph_conv2_bi
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv2_bi
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
conv
.
initialize
(
ctx
=
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
2
)).
as_in_context
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
out_dim
)).
as_in_context
(
F
.
ctx
())
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
)).
as_in_context
(
F
.
ctx
())
h_dst
=
F
.
randn
((
ndst
,
2
)).
as_in_context
(
F
.
ctx
())
h_dst
=
F
.
randn
((
ndst
,
out_dim
)).
as_in_context
(
F
.
ctx
())
if
weight
:
h_out
=
conv
(
g
,
(
h
,
h_dst
))
else
:
h_out
=
conv
(
g
,
(
h
,
h_dst
),
ext_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
def
_S2AXWb
(
A
,
N
,
X
,
W
,
b
):
X1
=
X
*
N
...
...
@@ -128,13 +131,14 @@ def _S2AXWb(A, N, X, W, b):
return
Y
+
b
def
test_tagconv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_tagconv
(
out_dim
):
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
3
)).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
)
norm
=
mx
.
nd
.
power
(
g
.
in_degrees
().
astype
(
'float32'
),
-
0.5
)
conv
=
nn
.
TAGConv
(
5
,
2
,
bias
=
True
)
conv
=
nn
.
TAGConv
(
5
,
out_dim
,
bias
=
True
)
conv
.
initialize
(
ctx
=
ctx
)
print
(
conv
)
...
...
@@ -148,86 +152,93 @@ def test_tagconv():
assert
F
.
allclose
(
h1
,
_S2AXWb
(
adj
,
norm
,
h0
,
conv
.
lin
.
data
(
ctx
),
conv
.
h_bias
.
data
(
ctx
)))
conv
=
nn
.
TAGConv
(
5
,
2
)
conv
=
nn
.
TAGConv
(
5
,
out_dim
)
conv
.
initialize
(
ctx
=
ctx
)
# test#2: basic
h0
=
F
.
ones
((
3
,
5
))
h1
=
conv
(
g
,
h0
)
assert
h1
.
shape
[
-
1
]
==
2
assert
h1
.
shape
[
-
1
]
==
out_dim
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_gat_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
20
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
5
])
def
test_gat_conv
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
gat
=
nn
.
GATConv
(
10
,
20
,
5
)
# n_heads = 5
gat
=
nn
.
GATConv
(
10
,
out_dim
,
num_heads
)
# n_heads = 5
gat
.
initialize
(
ctx
=
ctx
)
print
(
gat
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
10
))
h
=
gat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
5
,
20
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
gat
(
g
,
feat
,
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
5
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_gat_conv_bi
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_gat_conv_bi
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
gat
=
nn
.
GATConv
(
5
,
2
,
4
)
gat
=
nn
.
GATConv
(
5
,
out_dim
,
num_heads
)
gat
.
initialize
(
ctx
=
ctx
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
5
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
5
)))
h
=
gat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
gat
(
g
,
feat
,
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
]))
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
])
def
test_sage_conv
(
idtype
,
g
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
10
])
def
test_sage_conv
(
idtype
,
g
,
aggre_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
sage
=
nn
.
SAGEConv
(
5
,
10
,
aggre_type
)
sage
=
nn
.
SAGEConv
(
5
,
out_dim
,
aggre_type
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
sage
.
initialize
(
ctx
=
ctx
)
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
10
assert
h
.
shape
[
-
1
]
==
out_dim
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
]))
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
])
def
test_sage_conv_bi
(
idtype
,
g
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sage_conv_bi
(
idtype
,
g
,
aggre_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
dst_dim
=
5
if
aggre_type
!=
'gcn'
else
10
sage
=
nn
.
SAGEConv
((
10
,
dst_dim
),
2
,
aggre_type
)
sage
=
nn
.
SAGEConv
((
10
,
dst_dim
),
out_dim
,
aggre_type
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
10
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
dst_dim
)))
sage
.
initialize
(
ctx
=
ctx
)
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
g
.
number_of_dst_nodes
()
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
])
def
test_sage_conv_bi2
(
idtype
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sage_conv_bi2
(
idtype
,
aggre_type
,
out_dim
):
# Test the case for graphs without edges
g
=
dgl
.
heterograph
({(
'_U'
,
'_E'
,
'_V'
):
([],
[])},
{
'_U'
:
5
,
'_V'
:
3
})
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
sage
=
nn
.
SAGEConv
((
3
,
3
),
2
,
'gcn'
)
sage
=
nn
.
SAGEConv
((
3
,
3
),
out_dim
,
'gcn'
)
feat
=
(
F
.
randn
((
5
,
3
)),
F
.
randn
((
3
,
3
)))
sage
.
initialize
(
ctx
=
ctx
)
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
3
for
aggre_type
in
[
'mean'
,
'pool'
]:
sage
=
nn
.
SAGEConv
((
3
,
1
),
2
,
aggre_type
)
sage
=
nn
.
SAGEConv
((
3
,
1
),
out_dim
,
aggre_type
)
feat
=
(
F
.
randn
((
5
,
3
)),
F
.
randn
((
3
,
1
)))
sage
.
initialize
(
ctx
=
ctx
)
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
3
def
test_gg_conv
():
...
...
@@ -244,18 +255,19 @@ def test_gg_conv():
h1
=
gg_conv
(
g
,
h0
,
etypes
)
assert
h1
.
shape
==
(
20
,
20
)
def
test_cheb_conv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
20
])
def
test_cheb_conv
(
out_dim
):
g
=
dgl
.
from_networkx
(
nx
.
erdos_renyi_graph
(
20
,
0.3
)).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
cheb
=
nn
.
ChebConv
(
10
,
20
,
3
)
# k = 3
cheb
=
nn
.
ChebConv
(
10
,
out_dim
,
3
)
# k = 3
cheb
.
initialize
(
ctx
=
ctx
)
print
(
cheb
)
# test#1: basic
h0
=
F
.
randn
((
20
,
10
))
h1
=
cheb
(
g
,
h0
)
assert
h1
.
shape
==
(
20
,
20
)
assert
h1
.
shape
==
(
20
,
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
...
...
@@ -294,13 +306,14 @@ def test_appnp_conv():
h1
=
appnp_conv
(
g
,
h0
)
assert
h1
.
shape
==
(
20
,
10
)
def
test_dense_cheb_conv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_cheb_conv
(
out_dim
):
for
k
in
range
(
1
,
4
):
ctx
=
F
.
ctx
()
g
=
dgl
.
from_scipy
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.3
)).
to
(
F
.
ctx
())
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
).
tostype
(
'default'
)
cheb
=
nn
.
ChebConv
(
5
,
2
,
k
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
2
,
k
)
cheb
=
nn
.
ChebConv
(
5
,
out_dim
,
k
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
out_dim
,
k
)
cheb
.
initialize
(
ctx
=
ctx
)
dense_cheb
.
initialize
(
ctx
=
ctx
)
...
...
@@ -319,12 +332,13 @@ def test_dense_cheb_conv():
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'norm_type'
,
[
'both'
,
'right'
,
'none'
])
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_dense_graph_conv
(
idtype
,
g
,
norm_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_graph_conv
(
idtype
,
g
,
norm_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
).
tostype
(
'default'
)
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
=
nn
.
DenseGraphConv
(
5
,
2
,
norm
=
norm_type
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
=
nn
.
DenseGraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
conv
.
initialize
(
ctx
=
ctx
)
dense_conv
.
initialize
(
ctx
=
ctx
)
dense_conv
.
weight
.
set_data
(
...
...
@@ -338,12 +352,13 @@ def test_dense_graph_conv(idtype, g, norm_type):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'bipartite'
,
'block-bipartite'
]))
def
test_dense_sage_conv
(
idtype
,
g
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_sage_conv
(
idtype
,
g
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
).
tostype
(
'default'
)
sage
=
nn
.
SAGEConv
(
5
,
2
,
'gcn'
)
dense_sage
=
nn
.
DenseSAGEConv
(
5
,
2
)
sage
=
nn
.
SAGEConv
(
5
,
out_dim
,
'gcn'
)
dense_sage
=
nn
.
DenseSAGEConv
(
5
,
out_dim
)
sage
.
initialize
(
ctx
=
ctx
)
dense_sage
.
initialize
(
ctx
=
ctx
)
dense_sage
.
fc
.
weight
.
set_data
(
...
...
@@ -364,30 +379,32 @@ def test_dense_sage_conv(idtype, g):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_edge_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_edge_conv
(
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
edge_conv
=
nn
.
EdgeConv
(
5
,
2
)
edge_conv
=
nn
.
EdgeConv
(
5
,
out_dim
)
edge_conv
.
initialize
(
ctx
=
ctx
)
print
(
edge_conv
)
# test #1: basic
h0
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
h1
=
edge_conv
(
g
,
h0
)
assert
h1
.
shape
==
(
g
.
number_of_nodes
(),
2
)
assert
h1
.
shape
==
(
g
.
number_of_nodes
(),
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_edge_conv_bi
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_edge_conv_bi
(
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
edge_conv
=
nn
.
EdgeConv
(
5
,
2
)
edge_conv
=
nn
.
EdgeConv
(
5
,
out_dim
)
edge_conv
.
initialize
(
ctx
=
ctx
)
print
(
edge_conv
)
# test #1: basic
h0
=
F
.
randn
((
g
.
number_of_src_nodes
(),
5
))
x0
=
F
.
randn
((
g
.
number_of_dst_nodes
(),
5
))
h1
=
edge_conv
(
g
,
(
h0
,
x0
))
assert
h1
.
shape
==
(
g
.
number_of_dst_nodes
(),
2
)
assert
h1
.
shape
==
(
g
.
number_of_dst_nodes
(),
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
]))
...
...
@@ -475,19 +492,20 @@ def test_nn_conv_bi(g, idtype):
h1
=
nn_conv
(
g
,
(
h0
,
hd
),
etypes
)
assert
h1
.
shape
==
(
g
.
number_of_dst_nodes
(),
2
)
def
test_sg_conv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sg_conv
(
out_dim
):
g
=
dgl
.
from_networkx
(
nx
.
erdos_renyi_graph
(
20
,
0.3
)).
to
(
F
.
ctx
())
g
=
dgl
.
add_self_loop
(
g
)
ctx
=
F
.
ctx
()
sgc
=
nn
.
SGConv
(
5
,
2
,
2
)
sgc
=
nn
.
SGConv
(
5
,
out_dim
,
2
)
sgc
.
initialize
(
ctx
=
ctx
)
print
(
sgc
)
# test #1: basic
h0
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
h1
=
sgc
(
g
,
h0
)
assert
h1
.
shape
==
(
g
.
number_of_nodes
(),
2
)
assert
h1
.
shape
==
(
g
.
number_of_nodes
(),
out_dim
)
def
test_set2set
():
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
10
)).
to
(
F
.
ctx
())
...
...
@@ -577,7 +595,8 @@ def test_simple_pool():
h1
=
sort_pool
(
bg
,
h0
)
assert
h1
.
shape
[
0
]
==
5
and
h1
.
shape
[
1
]
==
10
*
5
and
h1
.
ndim
==
2
def
test_rgcn
():
@
pytest
.
mark
.
parametrize
(
'O'
,
[
1
,
2
,
8
])
def
test_rgcn
(
O
):
ctx
=
F
.
ctx
()
etype
=
[]
g
=
dgl
.
from_scipy
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.1
)).
to
(
F
.
ctx
())
...
...
@@ -587,7 +606,6 @@ def test_rgcn():
etype
.
append
(
i
%
5
)
B
=
2
I
=
10
O
=
8
rgc_basis
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
)
rgc_basis
.
initialize
(
ctx
=
ctx
)
...
...
@@ -596,6 +614,7 @@ def test_rgcn():
h_new
=
rgc_basis
(
g
,
h
,
r
)
assert
list
(
h_new
.
shape
)
==
[
100
,
O
]
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
)
rgc_bdd
.
initialize
(
ctx
=
ctx
)
h
=
nd
.
random
.
randn
(
100
,
I
,
ctx
=
ctx
)
...
...
@@ -613,6 +632,7 @@ def test_rgcn():
h_new
=
rgc_basis
(
g
,
h
,
r
,
norm
)
assert
list
(
h_new
.
shape
)
==
[
100
,
O
]
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
)
rgc_bdd
.
initialize
(
ctx
=
ctx
)
h
=
nd
.
random
.
randn
(
100
,
I
,
ctx
=
ctx
)
...
...
tests/pytorch/test_nn.py
View file @
9e630101
...
...
@@ -16,12 +16,13 @@ def _AXWb(A, X, W, b):
Y
=
th
.
matmul
(
A
,
X
.
view
(
X
.
shape
[
0
],
-
1
)).
view_as
(
X
)
return
Y
+
b
def
test_graph_conv0
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv0
(
out_dim
):
g
=
dgl
.
DGLGraph
(
nx
.
path_graph
(
3
)).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
)
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
'none'
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
'none'
,
bias
=
True
)
conv
=
conv
.
to
(
ctx
)
print
(
conv
)
# test#1: basic
...
...
@@ -37,7 +38,7 @@ def test_graph_conv0():
assert
len
(
g
.
edata
)
==
0
assert
F
.
allclose
(
h1
,
_AXWb
(
adj
,
h0
,
conv
.
weight
,
conv
.
bias
))
conv
=
nn
.
GraphConv
(
5
,
2
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
)
conv
=
conv
.
to
(
ctx
)
# test#3: basic
h0
=
F
.
ones
((
3
,
5
))
...
...
@@ -50,7 +51,7 @@ def test_graph_conv0():
assert
len
(
g
.
ndata
)
==
0
assert
len
(
g
.
edata
)
==
0
conv
=
nn
.
GraphConv
(
5
,
2
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
)
conv
=
conv
.
to
(
ctx
)
# test#3: basic
h0
=
F
.
ones
((
3
,
5
))
...
...
@@ -74,11 +75,12 @@ def test_graph_conv0():
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
True
,
False
])
def
test_graph_conv
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
# Test one tensor input
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
2
)).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
out_dim
)).
to
(
F
.
ctx
())
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
)).
to
(
F
.
ctx
())
...
...
@@ -86,17 +88,18 @@ def test_graph_conv(idtype, g, norm, weight, bias):
h_out
=
conv
(
g
,
h
)
else
:
h_out
=
conv
(
g
,
h
,
weight
=
ext_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'has_scalar_e_feature'
],
exclude
=
[
'zero-degree'
,
'dglgraph'
]))
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
True
,
False
])
def
test_graph_conv_e_weight
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv_e_weight
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
2
)).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
out_dim
)).
to
(
F
.
ctx
())
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
)).
to
(
F
.
ctx
())
...
...
@@ -105,17 +108,18 @@ def test_graph_conv_e_weight(idtype, g, norm, weight, bias):
h_out
=
conv
(
g
,
h
,
edge_weight
=
e_w
)
else
:
h_out
=
conv
(
g
,
h
,
weight
=
ext_w
,
edge_weight
=
e_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'has_scalar_e_feature'
],
exclude
=
[
'zero-degree'
,
'dglgraph'
]))
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
True
,
False
])
def
test_graph_conv_e_weight_norm
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv_e_weight_norm
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
2
)).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
out_dim
)).
to
(
F
.
ctx
())
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
)).
to
(
F
.
ctx
())
...
...
@@ -125,27 +129,28 @@ def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias):
h_out
=
conv
(
g
,
h
,
edge_weight
=
norm_weight
)
else
:
h_out
=
conv
(
g
,
h
,
weight
=
ext_w
,
edge_weight
=
norm_weight
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
,
'dglgraph'
]))
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
True
,
False
])
def
test_graph_conv_bi
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv_bi
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
# Test a pair of tensor inputs
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
2
)).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
).
to
(
F
.
ctx
())
ext_w
=
F
.
randn
((
5
,
out_dim
)).
to
(
F
.
ctx
())
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
)).
to
(
F
.
ctx
())
h_dst
=
F
.
randn
((
ndst
,
2
)).
to
(
F
.
ctx
())
h_dst
=
F
.
randn
((
ndst
,
out_dim
)).
to
(
F
.
ctx
())
if
weight
:
h_out
=
conv
(
g
,
(
h
,
h_dst
))
else
:
h_out
=
conv
(
g
,
(
h
,
h_dst
),
weight
=
ext_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
def
_S2AXWb
(
A
,
N
,
X
,
W
,
b
):
X1
=
X
*
N
...
...
@@ -159,14 +164,15 @@ def _S2AXWb(A, N, X, W, b):
return
Y
+
b
def
test_tagconv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_tagconv
(
out_dim
):
g
=
dgl
.
DGLGraph
(
nx
.
path_graph
(
3
))
g
=
g
.
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
)
norm
=
th
.
pow
(
g
.
in_degrees
().
float
(),
-
0.5
)
conv
=
nn
.
TAGConv
(
5
,
2
,
bias
=
True
)
conv
=
nn
.
TAGConv
(
5
,
out_dim
,
bias
=
True
)
conv
=
conv
.
to
(
ctx
)
print
(
conv
)
...
...
@@ -180,13 +186,13 @@ def test_tagconv():
assert
F
.
allclose
(
h1
,
_S2AXWb
(
adj
,
norm
,
h0
,
conv
.
lin
.
weight
,
conv
.
lin
.
bias
))
conv
=
nn
.
TAGConv
(
5
,
2
)
conv
=
nn
.
TAGConv
(
5
,
out_dim
)
conv
=
conv
.
to
(
ctx
)
# test#2: basic
h0
=
F
.
ones
((
3
,
5
))
h1
=
conv
(
g
,
h0
)
assert
h1
.
shape
[
-
1
]
==
2
assert
h1
.
shape
[
-
1
]
==
out_dim
# test reset_parameters
old_weight
=
deepcopy
(
conv
.
lin
.
weight
.
data
)
...
...
@@ -327,7 +333,8 @@ def test_set_trans():
h2
=
st_dec
(
bg
,
h1
)
assert
h2
.
shape
[
0
]
==
3
and
h2
.
shape
[
1
]
==
200
and
h2
.
dim
()
==
2
def
test_rgcn
():
@
pytest
.
mark
.
parametrize
(
'O'
,
[
1
,
2
,
8
])
def
test_rgcn
(
O
):
ctx
=
F
.
ctx
()
etype
=
[]
g
=
dgl
.
DGLGraph
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.1
),
readonly
=
True
)
...
...
@@ -338,7 +345,6 @@ def test_rgcn():
etype
.
append
(
i
%
5
)
B
=
2
I
=
10
O
=
8
rgc_basis
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
).
to
(
ctx
)
rgc_basis_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
,
low_mem
=
True
).
to
(
ctx
)
...
...
@@ -353,6 +359,7 @@ def test_rgcn():
assert
list
(
h_new_low
.
shape
)
==
[
100
,
O
]
assert
F
.
allclose
(
h_new
,
h_new_low
)
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
).
to
(
ctx
)
rgc_bdd_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
,
low_mem
=
True
).
to
(
ctx
)
rgc_bdd_low
.
weight
=
rgc_bdd
.
weight
...
...
@@ -381,6 +388,7 @@ def test_rgcn():
assert
list
(
h_new_low
.
shape
)
==
[
100
,
O
]
assert
F
.
allclose
(
h_new
,
h_new_low
)
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
).
to
(
ctx
)
rgc_bdd_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
,
low_mem
=
True
).
to
(
ctx
)
rgc_bdd_low
.
weight
=
rgc_bdd
.
weight
...
...
@@ -408,7 +416,8 @@ def test_rgcn():
assert
F
.
allclose
(
h_new
,
h_new_low
)
def
test_rgcn_sorted
():
@
pytest
.
mark
.
parametrize
(
'O'
,
[
1
,
2
,
8
])
def
test_rgcn_sorted
(
O
):
ctx
=
F
.
ctx
()
etype
=
[]
g
=
dgl
.
DGLGraph
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.1
),
readonly
=
True
)
...
...
@@ -418,7 +427,6 @@ def test_rgcn_sorted():
etype
=
[
200
,
200
,
200
,
200
,
200
]
B
=
2
I
=
10
O
=
8
rgc_basis
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
).
to
(
ctx
)
rgc_basis_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
,
low_mem
=
True
).
to
(
ctx
)
...
...
@@ -433,6 +441,7 @@ def test_rgcn_sorted():
assert
list
(
h_new_low
.
shape
)
==
[
100
,
O
]
assert
F
.
allclose
(
h_new
,
h_new_low
)
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
).
to
(
ctx
)
rgc_bdd_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
,
low_mem
=
True
).
to
(
ctx
)
rgc_bdd_low
.
weight
=
rgc_bdd
.
weight
...
...
@@ -461,6 +470,7 @@ def test_rgcn_sorted():
assert
list
(
h_new_low
.
shape
)
==
[
100
,
O
]
assert
F
.
allclose
(
h_new
,
h_new_low
)
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
).
to
(
ctx
)
rgc_bdd_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
,
low_mem
=
True
).
to
(
ctx
)
rgc_bdd_low
.
weight
=
rgc_bdd
.
weight
...
...
@@ -490,29 +500,33 @@ def test_rgcn_sorted():
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_gat_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_gat_conv
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
gat
=
nn
.
GATConv
(
5
,
2
,
4
)
gat
=
nn
.
GATConv
(
5
,
out_dim
,
num_heads
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
gat
=
gat
.
to
(
ctx
)
h
=
gat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
gat
(
g
,
feat
,
get_attention
=
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_gat_conv_bi
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_gat_conv_bi
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
gat
=
nn
.
GATConv
(
5
,
2
,
4
)
gat
=
nn
.
GATConv
(
5
,
out_dim
,
num_heads
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
5
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
5
)))
gat
=
gat
.
to
(
ctx
)
h
=
gat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
gat
(
g
,
feat
,
get_attention
=
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
]))
...
...
@@ -528,57 +542,60 @@ def test_sage_conv(idtype, g, aggre_type):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
]))
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
,
'lstm'
])
def
test_sage_conv_bi
(
idtype
,
g
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sage_conv_bi
(
idtype
,
g
,
aggre_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
dst_dim
=
5
if
aggre_type
!=
'gcn'
else
10
sage
=
nn
.
SAGEConv
((
10
,
dst_dim
),
2
,
aggre_type
)
sage
=
nn
.
SAGEConv
((
10
,
dst_dim
),
out_dim
,
aggre_type
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
10
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
dst_dim
)))
sage
=
sage
.
to
(
F
.
ctx
())
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
g
.
number_of_dst_nodes
()
@
parametrize_dtype
def
test_sage_conv2
(
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sage_conv2
(
idtype
,
out_dim
):
# TODO: add test for blocks
# Test the case for graphs without edges
g
=
dgl
.
heterograph
({(
'_U'
,
'_E'
,
'_V'
):
([],
[])},
{
'_U'
:
5
,
'_V'
:
3
})
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
sage
=
nn
.
SAGEConv
((
3
,
3
),
2
,
'gcn'
)
sage
=
nn
.
SAGEConv
((
3
,
3
),
out_dim
,
'gcn'
)
feat
=
(
F
.
randn
((
5
,
3
)),
F
.
randn
((
3
,
3
)))
sage
=
sage
.
to
(
ctx
)
h
=
sage
(
g
,
(
F
.
copy_to
(
feat
[
0
],
F
.
ctx
()),
F
.
copy_to
(
feat
[
1
],
F
.
ctx
())))
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
3
for
aggre_type
in
[
'mean'
,
'pool'
,
'lstm'
]:
sage
=
nn
.
SAGEConv
((
3
,
1
),
2
,
aggre_type
)
sage
=
nn
.
SAGEConv
((
3
,
1
),
out_dim
,
aggre_type
)
feat
=
(
F
.
randn
((
5
,
3
)),
F
.
randn
((
3
,
1
)))
sage
=
sage
.
to
(
ctx
)
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
3
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
],
exclude
=
[
'zero-degree'
]))
def
test_sgc_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sgc_conv
(
g
,
idtype
,
out_dim
):
ctx
=
F
.
ctx
()
g
=
g
.
astype
(
idtype
).
to
(
ctx
)
# not cached
sgc
=
nn
.
SGConv
(
5
,
10
,
3
)
sgc
=
nn
.
SGConv
(
5
,
out_dim
,
3
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
sgc
=
sgc
.
to
(
ctx
)
h
=
sgc
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
10
assert
h
.
shape
[
-
1
]
==
out_dim
# cached
sgc
=
nn
.
SGConv
(
5
,
10
,
3
,
True
)
sgc
=
nn
.
SGConv
(
5
,
out_dim
,
3
,
True
)
sgc
=
sgc
.
to
(
ctx
)
h_0
=
sgc
(
g
,
feat
)
h_1
=
sgc
(
g
,
feat
+
1
)
assert
F
.
allclose
(
h_0
,
h_1
)
assert
h_0
.
shape
[
-
1
]
==
10
assert
h_0
.
shape
[
-
1
]
==
out_dim
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
],
exclude
=
[
'zero-degree'
]))
...
...
@@ -718,13 +735,14 @@ def test_gmm_conv_bi(g, idtype):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'norm_type'
,
[
'both'
,
'right'
,
'none'
])
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_dense_graph_conv
(
norm_type
,
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_graph_conv
(
norm_type
,
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
# TODO(minjie): enable the following option after #1385
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
).
to_dense
()
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
=
nn
.
DenseGraphConv
(
5
,
2
,
norm
=
norm_type
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
=
nn
.
DenseGraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
.
weight
.
data
=
conv
.
weight
.
data
dense_conv
.
bias
.
data
=
conv
.
bias
.
data
feat
=
F
.
randn
((
g
.
number_of_src_nodes
(),
5
))
...
...
@@ -736,12 +754,13 @@ def test_dense_graph_conv(norm_type, g, idtype):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'bipartite'
]))
def
test_dense_sage_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_sage_conv
(
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
).
to_dense
()
sage
=
nn
.
SAGEConv
(
5
,
2
,
'gcn'
)
dense_sage
=
nn
.
DenseSAGEConv
(
5
,
2
)
sage
=
nn
.
SAGEConv
(
5
,
out_dim
,
'gcn'
)
dense_sage
=
nn
.
DenseSAGEConv
(
5
,
out_dim
)
dense_sage
.
fc
.
weight
.
data
=
sage
.
fc_neigh
.
weight
.
data
dense_sage
.
fc
.
bias
.
data
=
sage
.
fc_neigh
.
bias
.
data
if
len
(
g
.
ntypes
)
==
2
:
...
...
@@ -759,64 +778,71 @@ def test_dense_sage_conv(g, idtype):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_edge_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_edge_conv
(
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
edge_conv
=
nn
.
EdgeConv
(
5
,
2
).
to
(
ctx
)
edge_conv
=
nn
.
EdgeConv
(
5
,
out_dim
).
to
(
ctx
)
print
(
edge_conv
)
h0
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
h1
=
edge_conv
(
g
,
h0
)
assert
h1
.
shape
==
(
g
.
number_of_nodes
(),
2
)
assert
h1
.
shape
==
(
g
.
number_of_nodes
(),
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_edge_conv_bi
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_edge_conv_bi
(
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
edge_conv
=
nn
.
EdgeConv
(
5
,
2
).
to
(
ctx
)
edge_conv
=
nn
.
EdgeConv
(
5
,
out_dim
).
to
(
ctx
)
print
(
edge_conv
)
h0
=
F
.
randn
((
g
.
number_of_src_nodes
(),
5
))
x0
=
F
.
randn
((
g
.
number_of_dst_nodes
(),
5
))
h1
=
edge_conv
(
g
,
(
h0
,
x0
))
assert
h1
.
shape
==
(
g
.
number_of_dst_nodes
(),
2
)
assert
h1
.
shape
==
(
g
.
number_of_dst_nodes
(),
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_dotgat_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_dotgat_conv
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
dotgat
=
nn
.
DotGatConv
(
5
,
2
,
4
)
dotgat
=
nn
.
DotGatConv
(
5
,
out_dim
,
num_heads
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
dotgat
=
dotgat
.
to
(
ctx
)
h
=
dotgat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
dotgat
(
g
,
feat
,
get_attention
=
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_dotgat_conv_bi
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_dotgat_conv_bi
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
dotgat
=
nn
.
DotGatConv
((
5
,
5
),
2
,
4
)
dotgat
=
nn
.
DotGatConv
((
5
,
5
),
out_dim
,
num_heads
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
5
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
5
)))
dotgat
=
dotgat
.
to
(
ctx
)
h
=
dotgat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
dotgat
(
g
,
feat
,
get_attention
=
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
def
test_dense_cheb_conv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_cheb_conv
(
out_dim
):
for
k
in
range
(
1
,
4
):
ctx
=
F
.
ctx
()
g
=
dgl
.
DGLGraph
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.1
),
readonly
=
True
)
g
=
g
.
to
(
F
.
ctx
())
adj
=
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
).
to_dense
()
cheb
=
nn
.
ChebConv
(
5
,
2
,
k
,
None
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
2
,
k
)
cheb
=
nn
.
ChebConv
(
5
,
out_dim
,
k
,
None
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
out_dim
,
k
)
#for i in range(len(cheb.fc)):
# dense_cheb.W.data[i] = cheb.fc[i].weight.data.t()
dense_cheb
.
W
.
data
=
cheb
.
linear
.
weight
.
data
.
transpose
(
-
1
,
-
2
).
view
(
k
,
5
,
2
)
dense_cheb
.
W
.
data
=
cheb
.
linear
.
weight
.
data
.
transpose
(
-
1
,
-
2
).
view
(
k
,
5
,
out_dim
)
if
cheb
.
linear
.
bias
is
not
None
:
dense_cheb
.
bias
.
data
=
cheb
.
linear
.
bias
.
data
feat
=
F
.
randn
((
100
,
5
))
...
...
@@ -898,12 +924,13 @@ def test_atomic_conv(g, idtype):
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
],
exclude
=
[
'zero-degree'
]))
def
test_cf_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
3
])
def
test_cf_conv
(
g
,
idtype
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
cfconv
=
nn
.
CFConv
(
node_in_feats
=
2
,
edge_in_feats
=
3
,
hidden_feats
=
2
,
out_feats
=
3
)
out_feats
=
out_dim
)
ctx
=
F
.
ctx
()
if
F
.
gpu_ctx
():
...
...
@@ -913,7 +940,7 @@ def test_cf_conv(g, idtype):
edge_feats
=
F
.
randn
((
g
.
number_of_edges
(),
3
))
h
=
cfconv
(
g
,
node_feats
,
edge_feats
)
# current we only do shape check
assert
h
.
shape
[
-
1
]
==
3
assert
h
.
shape
[
-
1
]
==
out_dim
def
myagg
(
alist
,
dsttype
):
rst
=
alist
[
0
]
...
...
tests/tensorflow/test_nn.py
View file @
9e630101
...
...
@@ -18,12 +18,13 @@ def _AXWb(A, X, W, b):
Y
=
tf
.
reshape
(
tf
.
matmul
(
A
,
tf
.
reshape
(
X
,
(
X
.
shape
[
0
],
-
1
))),
X
.
shape
)
return
Y
+
b
def
test_graph_conv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv
(
out_dim
):
g
=
dgl
.
DGLGraph
(
nx
.
path_graph
(
3
)).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
adj
=
tf
.
sparse
.
to_dense
(
tf
.
sparse
.
reorder
(
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
)))
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
'none'
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
'none'
,
bias
=
True
)
# conv = conv
print
(
conv
)
# test#1: basic
...
...
@@ -39,7 +40,7 @@ def test_graph_conv():
assert
len
(
g
.
edata
)
==
0
assert
F
.
allclose
(
h1
,
_AXWb
(
adj
,
h0
,
conv
.
weight
,
conv
.
bias
))
conv
=
nn
.
GraphConv
(
5
,
2
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
)
# conv = conv
# test#3: basic
h0
=
F
.
ones
((
3
,
5
))
...
...
@@ -52,7 +53,7 @@ def test_graph_conv():
assert
len
(
g
.
ndata
)
==
0
assert
len
(
g
.
edata
)
==
0
conv
=
nn
.
GraphConv
(
5
,
2
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
)
# conv = conv
# test#3: basic
h0
=
F
.
ones
((
3
,
5
))
...
...
@@ -76,38 +77,40 @@ def test_graph_conv():
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
True
,
False
])
def
test_graph_conv2
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv2
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
ext_w
=
F
.
randn
((
5
,
2
))
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
ext_w
=
F
.
randn
((
5
,
out_dim
))
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
))
h_dst
=
F
.
randn
((
ndst
,
2
))
h_dst
=
F
.
randn
((
ndst
,
out_dim
))
if
weight
:
h_out
=
conv
(
g
,
h
)
else
:
h_out
=
conv
(
g
,
h
,
weight
=
ext_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
,
'dglgraph'
]))
@
pytest
.
mark
.
parametrize
(
'norm'
,
[
'none'
,
'both'
,
'right'
])
@
pytest
.
mark
.
parametrize
(
'weight'
,
[
True
,
False
])
@
pytest
.
mark
.
parametrize
(
'bias'
,
[
True
,
False
])
def
test_graph_conv2_bi
(
idtype
,
g
,
norm
,
weight
,
bias
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_graph_conv2_bi
(
idtype
,
g
,
norm
,
weight
,
bias
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
conv
=
nn
.
GraphConv
(
5
,
2
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
ext_w
=
F
.
randn
((
5
,
2
))
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm
,
weight
=
weight
,
bias
=
bias
)
ext_w
=
F
.
randn
((
5
,
out_dim
))
nsrc
=
g
.
number_of_src_nodes
()
ndst
=
g
.
number_of_dst_nodes
()
h
=
F
.
randn
((
nsrc
,
5
))
h_dst
=
F
.
randn
((
ndst
,
2
))
h_dst
=
F
.
randn
((
ndst
,
out_dim
))
if
weight
:
h_out
=
conv
(
g
,
(
h
,
h_dst
))
else
:
h_out
=
conv
(
g
,
(
h
,
h_dst
),
weight
=
ext_w
)
assert
h_out
.
shape
==
(
ndst
,
2
)
assert
h_out
.
shape
==
(
ndst
,
out_dim
)
def
test_simple_pool
():
ctx
=
F
.
ctx
()
...
...
@@ -179,7 +182,8 @@ def test_glob_att_pool():
assert
h1
.
shape
[
0
]
==
4
and
h1
.
shape
[
1
]
==
10
and
h1
.
ndim
==
2
def
test_rgcn
():
@
pytest
.
mark
.
parametrize
(
'O'
,
[
1
,
2
,
8
])
def
test_rgcn
(
O
):
etype
=
[]
g
=
dgl
.
DGLGraph
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.1
),
readonly
=
True
).
to
(
F
.
ctx
())
# 5 etypes
...
...
@@ -188,7 +192,6 @@ def test_rgcn():
etype
.
append
(
i
%
5
)
B
=
2
I
=
10
O
=
8
rgc_basis
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
)
rgc_basis_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"basis"
,
B
,
low_mem
=
True
)
...
...
@@ -203,6 +206,7 @@ def test_rgcn():
assert
list
(
h_new_low
.
shape
)
==
[
100
,
O
]
assert
F
.
allclose
(
h_new
,
h_new_low
)
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
)
rgc_bdd_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
,
low_mem
=
True
)
rgc_bdd_low
.
weight
=
rgc_bdd
.
weight
...
...
@@ -231,6 +235,7 @@ def test_rgcn():
assert
list
(
h_new_low
.
shape
)
==
[
100
,
O
]
assert
F
.
allclose
(
h_new
,
h_new_low
)
if
O
%
B
==
0
:
rgc_bdd
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
)
rgc_bdd_low
=
nn
.
RelGraphConv
(
I
,
O
,
R
,
"bdd"
,
B
,
low_mem
=
True
)
rgc_bdd_low
.
weight
=
rgc_bdd
.
weight
...
...
@@ -259,87 +264,94 @@ def test_rgcn():
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_gat_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_gat_conv
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
gat
=
nn
.
GATConv
(
5
,
2
,
4
)
gat
=
nn
.
GATConv
(
5
,
out_dim
,
num_heads
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
h
=
gat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
gat
(
g
,
feat
,
get_attention
=
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
],
exclude
=
[
'zero-degree'
]))
def
test_gat_conv_bi
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
@
pytest
.
mark
.
parametrize
(
'num_heads'
,
[
1
,
4
])
def
test_gat_conv_bi
(
g
,
idtype
,
out_dim
,
num_heads
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
gat
=
nn
.
GATConv
(
5
,
2
,
4
)
gat
=
nn
.
GATConv
(
5
,
out_dim
,
num_heads
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
5
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
5
)))
h
=
gat
(
g
,
feat
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
4
,
2
)
assert
h
.
shape
==
(
g
.
number_of_dst_nodes
(),
num_heads
,
out_dim
)
_
,
a
=
gat
(
g
,
feat
,
get_attention
=
True
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
4
,
1
)
assert
a
.
shape
==
(
g
.
number_of_edges
(),
num_heads
,
1
)
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
,
'block-bipartite'
]))
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
])
def
test_sage_conv
(
idtype
,
g
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
10
])
def
test_sage_conv
(
idtype
,
g
,
aggre_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
sage
=
nn
.
SAGEConv
(
5
,
10
,
aggre_type
)
sage
=
nn
.
SAGEConv
(
5
,
out_dim
,
aggre_type
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
10
assert
h
.
shape
[
-
1
]
==
out_dim
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'bipartite'
]))
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
])
def
test_sage_conv_bi
(
idtype
,
g
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sage_conv_bi
(
idtype
,
g
,
aggre_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
sage
=
nn
.
SAGEConv
(
5
,
10
,
aggre_type
)
dst_dim
=
5
if
aggre_type
!=
'gcn'
else
10
sage
=
nn
.
SAGEConv
((
10
,
dst_dim
),
2
,
aggre_type
)
sage
=
nn
.
SAGEConv
((
10
,
dst_dim
),
out_dim
,
aggre_type
)
feat
=
(
F
.
randn
((
g
.
number_of_src_nodes
(),
10
)),
F
.
randn
((
g
.
number_of_dst_nodes
(),
dst_dim
)))
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
g
.
number_of_dst_nodes
()
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'aggre_type'
,
[
'mean'
,
'pool'
,
'gcn'
])
def
test_sage_conv_bi_empty
(
idtype
,
aggre_type
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sage_conv_bi_empty
(
idtype
,
aggre_type
,
out_dim
):
# Test the case for graphs without edges
g
=
dgl
.
heterograph
({(
'_U'
,
'_E'
,
'_V'
):
([],
[])},
{
'_U'
:
5
,
'_V'
:
3
}).
to
(
F
.
ctx
())
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
sage
=
nn
.
SAGEConv
((
3
,
3
),
2
,
'gcn'
)
sage
=
nn
.
SAGEConv
((
3
,
3
),
out_dim
,
'gcn'
)
feat
=
(
F
.
randn
((
5
,
3
)),
F
.
randn
((
3
,
3
)))
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
3
for
aggre_type
in
[
'mean'
,
'pool'
,
'lstm'
]:
sage
=
nn
.
SAGEConv
((
3
,
1
),
2
,
aggre_type
)
sage
=
nn
.
SAGEConv
((
3
,
1
),
out_dim
,
aggre_type
)
feat
=
(
F
.
randn
((
5
,
3
)),
F
.
randn
((
3
,
1
)))
h
=
sage
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
2
assert
h
.
shape
[
-
1
]
==
out_dim
assert
h
.
shape
[
0
]
==
3
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
],
exclude
=
[
'zero-degree'
]))
def
test_sgc_conv
(
g
,
idtype
):
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_sgc_conv
(
g
,
idtype
,
out_dim
):
ctx
=
F
.
ctx
()
g
=
g
.
astype
(
idtype
).
to
(
ctx
)
# not cached
sgc
=
nn
.
SGConv
(
5
,
10
,
3
)
sgc
=
nn
.
SGConv
(
5
,
out_dim
,
3
)
feat
=
F
.
randn
((
g
.
number_of_nodes
(),
5
))
h
=
sgc
(
g
,
feat
)
assert
h
.
shape
[
-
1
]
==
10
assert
h
.
shape
[
-
1
]
==
out_dim
# cached
sgc
=
nn
.
SGConv
(
5
,
10
,
3
,
True
)
sgc
=
nn
.
SGConv
(
5
,
out_dim
,
3
,
True
)
h_0
=
sgc
(
g
,
feat
)
h_1
=
sgc
(
g
,
feat
+
1
)
assert
F
.
allclose
(
h_0
,
h_1
)
assert
h_0
.
shape
[
-
1
]
==
10
assert
h_0
.
shape
[
-
1
]
==
out_dim
@
parametrize_dtype
@
pytest
.
mark
.
parametrize
(
'g'
,
get_cases
([
'homo'
],
exclude
=
[
'zero-degree'
]))
...
...
@@ -463,21 +475,22 @@ def test_hetero_conv(agg, idtype):
assert
mod3
.
carg2
==
1
def
test_dense_cheb_conv
():
@
pytest
.
mark
.
parametrize
(
'out_dim'
,
[
1
,
2
])
def
test_dense_cheb_conv
(
out_dim
):
for
k
in
range
(
3
,
4
):
ctx
=
F
.
ctx
()
g
=
dgl
.
DGLGraph
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.1
,
random_state
=
42
))
g
=
g
.
to
(
ctx
)
adj
=
tf
.
sparse
.
to_dense
(
tf
.
sparse
.
reorder
(
g
.
adjacency_matrix
(
transpose
=
False
,
ctx
=
ctx
)))
cheb
=
nn
.
ChebConv
(
5
,
2
,
k
,
None
,
bias
=
True
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
2
,
k
,
bias
=
True
)
cheb
=
nn
.
ChebConv
(
5
,
out_dim
,
k
,
None
,
bias
=
True
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
out_dim
,
k
,
bias
=
True
)
# init cheb modules
feat
=
F
.
ones
((
100
,
5
))
out_cheb
=
cheb
(
g
,
feat
,
[
2.0
])
dense_cheb
.
W
=
tf
.
reshape
(
cheb
.
linear
.
weights
[
0
],
(
k
,
5
,
2
))
dense_cheb
.
W
=
tf
.
reshape
(
cheb
.
linear
.
weights
[
0
],
(
k
,
5
,
out_dim
))
if
cheb
.
linear
.
bias
is
not
None
:
dense_cheb
.
bias
=
cheb
.
linear
.
bias
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment