Unverified Commit ec2e24be authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by GitHub
Browse files

[Feature] Slice dstnode features in NN modules (#1838)

* slice dstdata from srcdata within nn module

* a bunch of fixes

* add comment

* fix gcmc layer

* repr for blocks

* fix

* fix context

* fix

* do not copy internal columns

* docstring
parent 0e896a92
...@@ -67,7 +67,7 @@ class EdgeConv(nn.Module): ...@@ -67,7 +67,7 @@ class EdgeConv(nn.Module):
New node features. New node features.
""" """
with g.local_scope(): with g.local_scope():
h_src, h_dst = expand_as_pair(h) h_src, h_dst = expand_as_pair(h, g)
g.srcdata['x'] = h_src g.srcdata['x'] = h_src
g.dstdata['x'] = h_dst g.dstdata['x'] = h_dst
if not self.batch_norm: if not self.batch_norm:
......
...@@ -128,6 +128,8 @@ class GATConv(nn.Module): ...@@ -128,6 +128,8 @@ class GATConv(nn.Module):
h_src = h_dst = self.feat_drop(feat) h_src = h_dst = self.feat_drop(feat)
feat_src = feat_dst = self.fc(h_src).view( feat_src = feat_dst = self.fc(h_src).view(
-1, self._num_heads, self._out_feats) -1, self._num_heads, self._out_feats)
if graph.is_block:
feat_dst = feat_src[:graph.number_of_dst_nodes()]
# NOTE: GAT paper uses "first concatenation then linear projection" # NOTE: GAT paper uses "first concatenation then linear projection"
# to compute attention scores, while ours is "first projection then # to compute attention scores, while ours is "first projection then
# addition", the two approaches are mathematically equivalent: # addition", the two approaches are mathematically equivalent:
......
...@@ -73,7 +73,7 @@ class GINConv(nn.Module): ...@@ -73,7 +73,7 @@ class GINConv(nn.Module):
as input dimensionality. as input dimensionality.
""" """
with graph.local_scope(): with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat) feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh')) graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh'] rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']
......
...@@ -111,7 +111,7 @@ class GMMConv(nn.Module): ...@@ -111,7 +111,7 @@ class GMMConv(nn.Module):
is the output feature size. is the output feature size.
""" """
with graph.local_scope(): with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat) feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = self.fc(feat_src).view(-1, self._n_kernels, self._out_feats) graph.srcdata['h'] = self.fc(feat_src).view(-1, self._n_kernels, self._out_feats)
E = graph.number_of_edges() E = graph.number_of_edges()
# compute gaussian weight # compute gaussian weight
......
...@@ -134,7 +134,8 @@ class GraphConv(nn.Module): ...@@ -134,7 +134,8 @@ class GraphConv(nn.Module):
The output feature The output feature
""" """
with graph.local_scope(): with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat) # (BarclayII) For RGCN on heterogeneous graphs we need to support GCN on bipartite.
feat_src, feat_dst = expand_as_pair(feat, graph)
if self._norm == 'both': if self._norm == 'both':
degs = graph.out_degrees().to(feat_src.device).float().clamp(min=1) degs = graph.out_degrees().to(feat_src.device).float().clamp(min=1)
......
...@@ -103,7 +103,7 @@ class NNConv(nn.Module): ...@@ -103,7 +103,7 @@ class NNConv(nn.Module):
is the output feature size. is the output feature size.
""" """
with graph.local_scope(): with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat) feat_src, feat_dst = expand_as_pair(feat, graph)
# (n, d_in, 1) # (n, d_in, 1)
graph.srcdata['h'] = feat_src.unsqueeze(-1) graph.srcdata['h'] = feat_src.unsqueeze(-1)
......
...@@ -122,6 +122,8 @@ class SAGEConv(nn.Module): ...@@ -122,6 +122,8 @@ class SAGEConv(nn.Module):
feat_dst = self.feat_drop(feat[1]) feat_dst = self.feat_drop(feat[1])
else: else:
feat_src = feat_dst = self.feat_drop(feat) feat_src = feat_dst = self.feat_drop(feat)
if graph.is_block:
feat_dst = feat_src[:graph.number_of_dst_nodes()]
h_self = feat_dst h_self = feat_dst
......
...@@ -135,8 +135,13 @@ class HeteroGraphConv(nn.Module): ...@@ -135,8 +135,13 @@ class HeteroGraphConv(nn.Module):
if mod_kwargs is None: if mod_kwargs is None:
mod_kwargs = {} mod_kwargs = {}
outputs = {nty : [] for nty in g.dsttypes} outputs = {nty : [] for nty in g.dsttypes}
if isinstance(inputs, tuple) or g.is_block:
if isinstance(inputs, tuple): if isinstance(inputs, tuple):
src_inputs, dst_inputs = inputs src_inputs, dst_inputs = inputs
else:
src_inputs = inputs
dst_inputs = {k: v[:g.number_of_dst_nodes(k)] for k, v in inputs.items()}
for stype, etype, dtype in g.canonical_etypes: for stype, etype, dtype in g.canonical_etypes:
rel_graph = g[stype, etype, dtype] rel_graph = g[stype, etype, dtype]
if rel_graph.number_of_edges() == 0: if rel_graph.number_of_edges() == 0:
......
...@@ -122,6 +122,8 @@ class GATConv(layers.Layer): ...@@ -122,6 +122,8 @@ class GATConv(layers.Layer):
h_src = h_dst = self.feat_drop(feat) h_src = h_dst = self.feat_drop(feat)
feat_src = feat_dst = tf.reshape( feat_src = feat_dst = tf.reshape(
self.fc(h_src), (-1, self._num_heads, self._out_feats)) self.fc(h_src), (-1, self._num_heads, self._out_feats))
if graph.is_block:
feat_dst = feat_src[:graph.number_of_dst_nodes()]
# NOTE: GAT paper uses "first concatenation then linear projection" # NOTE: GAT paper uses "first concatenation then linear projection"
# to compute attention scores, while ours is "first projection then # to compute attention scores, while ours is "first projection then
# addition", the two approaches are mathematically equivalent: # addition", the two approaches are mathematically equivalent:
......
...@@ -71,7 +71,7 @@ class GINConv(layers.Layer): ...@@ -71,7 +71,7 @@ class GINConv(layers.Layer):
as input dimensionality. as input dimensionality.
""" """
with graph.local_scope(): with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat) feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh')) graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh'] rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']
......
...@@ -130,7 +130,7 @@ class GraphConv(layers.Layer): ...@@ -130,7 +130,7 @@ class GraphConv(layers.Layer):
The output feature The output feature
""" """
with graph.local_scope(): with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat) feat_src, feat_dst = expand_as_pair(feat, graph)
if self._norm == 'both': if self._norm == 'both':
degs = tf.clip_by_value(tf.cast(graph.out_degrees(), tf.float32), degs = tf.clip_by_value(tf.cast(graph.out_degrees(), tf.float32),
......
...@@ -107,6 +107,8 @@ class SAGEConv(layers.Layer): ...@@ -107,6 +107,8 @@ class SAGEConv(layers.Layer):
feat_dst = self.feat_drop(feat[1]) feat_dst = self.feat_drop(feat[1])
else: else:
feat_src = feat_dst = self.feat_drop(feat) feat_src = feat_dst = self.feat_drop(feat)
if graph.is_block:
feat_dst = feat_src[:graph.number_of_dst_nodes()]
h_self = feat_dst h_self = feat_dst
......
...@@ -8,7 +8,7 @@ from scipy import sparse ...@@ -8,7 +8,7 @@ from scipy import sparse
from ._ffi.function import _init_api from ._ffi.function import _init_api
from .graph import DGLGraph from .graph import DGLGraph
from .heterograph import DGLHeteroGraph from .heterograph import DGLHeteroGraph, DGLBlock
from . import ndarray as nd from . import ndarray as nd
from . import backend as F from . import backend as F
from .graph_index import from_coo from .graph_index import from_coo
...@@ -16,7 +16,7 @@ from .graph_index import _get_halo_subgraph_inner_node ...@@ -16,7 +16,7 @@ from .graph_index import _get_halo_subgraph_inner_node
from .graph import unbatch from .graph import unbatch
from .convert import graph, bipartite, heterograph from .convert import graph, bipartite, heterograph
from . import utils from . import utils
from .base import EID, NID from .base import EID, NID, DGLError, is_internal_column
from . import ndarray as nd from . import ndarray as nd
from .partition import metis_partition_assignment as hetero_metis_partition_assignment from .partition import metis_partition_assignment as hetero_metis_partition_assignment
from .partition import partition_graph_with_halo as hetero_partition_graph_with_halo from .partition import partition_graph_with_halo as hetero_partition_graph_with_halo
...@@ -1294,7 +1294,7 @@ def compact_graphs(graphs, always_preserve=None): ...@@ -1294,7 +1294,7 @@ def compact_graphs(graphs, always_preserve=None):
return new_graphs return new_graphs
def to_block(g, dst_nodes=None, include_dst_in_src=True): def to_block(g, dst_nodes=None, include_dst_in_src=True, copy_ndata=True, copy_edata=True):
"""Convert a graph into a bipartite-structured "block" for message passing. """Convert a graph into a bipartite-structured "block" for message passing.
A block graph is uni-directional bipartite graph consisting of two sets of nodes A block graph is uni-directional bipartite graph consisting of two sets of nodes
...@@ -1334,8 +1334,18 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True): ...@@ -1334,8 +1334,18 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True):
The graph. The graph.
dst_nodes : Tensor or dict[str, Tensor], optional dst_nodes : Tensor or dict[str, Tensor], optional
Optional DST nodes. If a tensor is given, the graph must have only one node type. Optional DST nodes. If a tensor is given, the graph must have only one node type.
include_dst_in_src : bool, default True include_dst_in_src : bool
If False, do not include DST nodes in SRC nodes. If False, do not include DST nodes in SRC nodes.
(Default: True)
copy_ndata : bool, optional
If True, the source and destination node features of the block are copied from the
original graph.
If False, the block will not have any node features.
(Default: True)
copy_edata: bool, optional
If True, the edge features of the block are copied from the origianl graph.
If False, the simple graph will not have any edge features.
(Default: True)
Returns Returns
------- -------
...@@ -1437,9 +1447,39 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True): ...@@ -1437,9 +1447,39 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True):
# The new graph duplicates the original node types to SRC and DST sets. # The new graph duplicates the original node types to SRC and DST sets.
new_ntypes = ([ntype for ntype in g.ntypes], [ntype for ntype in g.ntypes]) new_ntypes = ([ntype for ntype in g.ntypes], [ntype for ntype in g.ntypes])
new_graph = DGLHeteroGraph(new_graph_index, new_ntypes, g.etypes) new_graph = DGLBlock(new_graph_index, new_ntypes, g.etypes)
assert new_graph.is_unibipartite # sanity check assert new_graph.is_unibipartite # sanity check
src_node_id = {
ntype: F.zerocopy_from_dgl_ndarray(src)
for ntype, src in zip(g.ntypes, src_nodes_nd)}
dst_node_id = {
ntype: dst_nodes.get(ntype, F.tensor([], dtype=g.idtype))
for ntype in g.ntypes}
edge_id = {
canonical_etype: F.zerocopy_from_dgl_ndarray(edges)
for canonical_etype, edges in zip(g.canonical_etypes, induced_edges_nd)}
if copy_ndata:
for ntype in g.ntypes:
src = src_node_id[ntype]
dst = dst_node_id[ntype]
for key, value in g.nodes[ntype].data.items():
if is_internal_column(key):
continue
ctx = F.context(value)
new_graph.srcnodes[ntype].data[key] = F.gather_row(value, F.copy_to(src, ctx))
new_graph.dstnodes[ntype].data[key] = F.gather_row(value, F.copy_to(dst, ctx))
if copy_edata:
for canonical_etype in g.canonical_etypes:
eid = edge_id[canonical_etype]
for key, value in g.edges[canonical_etype].data.items():
if is_internal_column(key):
continue
ctx = F.context(value)
new_graph.edges[canonical_etype].data[key] = F.gather_row(
value, F.copy_to(eid, ctx))
for i, ntype in enumerate(g.ntypes): for i, ntype in enumerate(g.ntypes):
new_graph.srcnodes[ntype].data[NID] = F.zerocopy_from_dgl_ndarray(src_nodes_nd[i]) new_graph.srcnodes[ntype].data[NID] = F.zerocopy_from_dgl_ndarray(src_nodes_nd[i])
if ntype in dst_nodes: if ntype in dst_nodes:
...@@ -1450,9 +1490,7 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True): ...@@ -1450,9 +1490,7 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True):
for i, canonical_etype in enumerate(g.canonical_etypes): for i, canonical_etype in enumerate(g.canonical_etypes):
induced_edges = F.zerocopy_from_dgl_ndarray(induced_edges_nd[i]) induced_edges = F.zerocopy_from_dgl_ndarray(induced_edges_nd[i])
utype, etype, vtype = canonical_etype new_graph.edges[canonical_etype].data[EID] = induced_edges
new_canonical_etype = (utype, etype, vtype)
new_graph.edges[new_canonical_etype].data[EID] = induced_edges
return new_graph return new_graph
......
...@@ -550,11 +550,35 @@ def make_invmap(array, use_numpy=True): ...@@ -550,11 +550,35 @@ def make_invmap(array, use_numpy=True):
remapped = np.asarray([invmap[x] for x in array]) remapped = np.asarray([invmap[x] for x in array])
return uniques, invmap, remapped return uniques, invmap, remapped
def expand_as_pair(input_): def expand_as_pair(input_, g=None):
"""Return a pair of same element if the input is not a pair. """Return a pair of same element if the input is not a pair.
If the graph is a block, obtain the feature of destination nodes from the source nodes.
Parameters
----------
input_ : Tensor, dict[str, Tensor], or their pairs
The input features
g : DGLHeteroGraph or DGLGraph or None
The graph.
If None, skip checking if the graph is a block.
Returns
-------
tuple[Tensor, Tensor] or tuple[dict[str, Tensor], dict[str, Tensor]]
The features for input and output nodes
""" """
if isinstance(input_, tuple): if isinstance(input_, tuple):
return input_ return input_
elif g is not None and g.is_block:
if isinstance(input_, Mapping):
input_dst = {
k: F.narrow_row(v, 0, g.number_of_dst_nodes(k))
for k, v in input_.items()}
else:
input_dst = F.narrow_row(input_, 0, g.number_of_dst_nodes())
return input_, input_dst
else: else:
return input_, input_ return input_, input_
......
...@@ -265,6 +265,12 @@ def test_pickling_heterograph(): ...@@ -265,6 +265,12 @@ def test_pickling_heterograph():
new_g = _reconstruct_pickle(g) new_g = _reconstruct_pickle(g)
_assert_is_identical_hetero(g, new_g) _assert_is_identical_hetero(g, new_g)
block = dgl.to_block(g, {'user': [1, 2], 'game': [0, 1], 'developer': []})
new_block = _reconstruct_pickle(block)
_assert_is_identical_hetero(block, new_block)
assert block.is_block
assert new_block.is_block
def test_pickling_batched_heterograph(): def test_pickling_batched_heterograph():
# copied from test_heterograph.create_test_heterograph() # copied from test_heterograph.create_test_heterograph()
plays_spmat = ssp.coo_matrix(([1, 1, 1, 1], ([0, 1, 2, 1], [0, 0, 1, 1]))) plays_spmat = ssp.coo_matrix(([1, 1, 1, 1], ([0, 1, 2, 1], [0, 0, 1, 1])))
......
...@@ -893,21 +893,46 @@ def test_to_block(index_dtype): ...@@ -893,21 +893,46 @@ def test_to_block(index_dtype):
('A', 'AA', 'A'): [(0, 1), (2, 3), (1, 2), (3, 4)], ('A', 'AA', 'A'): [(0, 1), (2, 3), (1, 2), (3, 4)],
('A', 'AB', 'B'): [(0, 1), (1, 3), (3, 5), (1, 6)], ('A', 'AB', 'B'): [(0, 1), (1, 3), (3, 5), (1, 6)],
('B', 'BA', 'A'): [(2, 3), (3, 2)]}, index_dtype=index_dtype) ('B', 'BA', 'A'): [(2, 3), (3, 2)]}, index_dtype=index_dtype)
g.nodes['A'].data['x'] = F.randn((5, 10))
g.nodes['B'].data['x'] = F.randn((7, 5))
g.edges['AA'].data['x'] = F.randn((4, 3))
g.edges['AB'].data['x'] = F.randn((4, 3))
g.edges['BA'].data['x'] = F.randn((2, 3))
g_a = g['AA'] g_a = g['AA']
def check_features(g, bg):
for ntype in bg.srctypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.srcnodes[ntype].data[key],
F.gather_row(g.nodes[ntype].data[key], bg.srcnodes[ntype].data[dgl.NID]))
for ntype in bg.dsttypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.dstnodes[ntype].data[key],
F.gather_row(g.nodes[ntype].data[key], bg.dstnodes[ntype].data[dgl.NID]))
for etype in bg.canonical_etypes:
for key in g.edges[etype].data:
assert F.array_equal(
bg.edges[etype].data[key],
F.gather_row(g.edges[etype].data[key], bg.edges[etype].data[dgl.EID]))
bg = dgl.to_block(g_a) bg = dgl.to_block(g_a)
check(g_a, bg, 'A', 'AA', None) check(g_a, bg, 'A', 'AA', None)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 5 assert bg.number_of_src_nodes() == 5
assert bg.number_of_dst_nodes() == 4 assert bg.number_of_dst_nodes() == 4
bg = dgl.to_block(g_a, include_dst_in_src=False) bg = dgl.to_block(g_a, include_dst_in_src=False)
check(g_a, bg, 'A', 'AA', None, False) check(g_a, bg, 'A', 'AA', None, False)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 4 assert bg.number_of_src_nodes() == 4
assert bg.number_of_dst_nodes() == 4 assert bg.number_of_dst_nodes() == 4
dst_nodes = F.tensor([4, 3, 2, 1], dtype=getattr(F, index_dtype)) dst_nodes = F.tensor([4, 3, 2, 1], dtype=getattr(F, index_dtype))
bg = dgl.to_block(g_a, dst_nodes) bg = dgl.to_block(g_a, dst_nodes)
check(g_a, bg, 'A', 'AA', dst_nodes) check(g_a, bg, 'A', 'AA', dst_nodes)
check_features(g_a, bg)
g_ab = g['AB'] g_ab = g['AB']
...@@ -917,6 +942,7 @@ def test_to_block(index_dtype): ...@@ -917,6 +942,7 @@ def test_to_block(index_dtype):
assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID]) assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID])
assert bg.number_of_nodes('DST/A') == 0 assert bg.number_of_nodes('DST/A') == 0
checkall(g_ab, bg, None) checkall(g_ab, bg, None)
check_features(g_ab, bg)
dst_nodes = {'B': F.tensor([5, 6, 3, 1], dtype=getattr(F, index_dtype))} dst_nodes = {'B': F.tensor([5, 6, 3, 1], dtype=getattr(F, index_dtype))}
bg = dgl.to_block(g, dst_nodes) bg = dgl.to_block(g, dst_nodes)
...@@ -924,10 +950,12 @@ def test_to_block(index_dtype): ...@@ -924,10 +950,12 @@ def test_to_block(index_dtype):
assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID]) assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID])
assert bg.number_of_nodes('DST/A') == 0 assert bg.number_of_nodes('DST/A') == 0
checkall(g, bg, dst_nodes) checkall(g, bg, dst_nodes)
check_features(g, bg)
dst_nodes = {'A': F.tensor([4, 3, 2, 1], dtype=getattr(F, index_dtype)), 'B': F.tensor([3, 5, 6, 1], dtype=getattr(F, index_dtype))} dst_nodes = {'A': F.tensor([4, 3, 2, 1], dtype=getattr(F, index_dtype)), 'B': F.tensor([3, 5, 6, 1], dtype=getattr(F, index_dtype))}
bg = dgl.to_block(g, dst_nodes=dst_nodes) bg = dgl.to_block(g, dst_nodes=dst_nodes)
checkall(g, bg, dst_nodes) checkall(g, bg, dst_nodes)
check_features(g, bg)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@parametrize_dtype @parametrize_dtype
......
...@@ -7,7 +7,8 @@ import dgl ...@@ -7,7 +7,8 @@ import dgl
import dgl.nn.mxnet as nn import dgl.nn.mxnet as nn
import dgl.function as fn import dgl.function as fn
import backend as F import backend as F
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph, \
random_block
from mxnet import autograd, gluon, nd from mxnet import autograd, gluon, nd
def check_close(a, b): def check_close(a, b):
...@@ -75,7 +76,7 @@ def test_graph_conv(): ...@@ -75,7 +76,7 @@ def test_graph_conv():
assert "h" in g.ndata assert "h" in g.ndata
check_close(g.ndata['h'], 2 * F.ones((3, 1))) check_close(g.ndata['h'], 2 * F.ones((3, 1)))
@pytest.mark.parametrize('g', get_cases(['path', 'bipartite', 'small'], exclude=['zero-degree'])) @pytest.mark.parametrize('g', get_cases(['path', 'bipartite', 'small', 'block'], exclude=['zero-degree']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False]) @pytest.mark.parametrize('bias', [False])
...@@ -163,6 +164,16 @@ def test_gat_conv(): ...@@ -163,6 +164,16 @@ def test_gat_conv():
h = gat(g, feat) h = gat(g, feat)
assert h.shape == (200, 4, 2) assert h.shape == (200, 4, 2)
# test#3: block
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].asnumpy())
block = dgl.to_block(g, seed_nodes)
gat = nn.GATConv(5, 2, 4)
gat.initialize(ctx=ctx)
feat = F.randn((block.number_of_src_nodes(), 5))
h = gat(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 4, 2)
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn']) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
def test_sage_conv(aggre_type): def test_sage_conv(aggre_type):
...@@ -190,6 +201,16 @@ def test_sage_conv(aggre_type): ...@@ -190,6 +201,16 @@ def test_sage_conv(aggre_type):
assert h.shape[-1] == 2 assert h.shape[-1] == 2
assert h.shape[0] == 200 assert h.shape[0] == 200
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].asnumpy())
block = dgl.to_block(g, seed_nodes)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((block.number_of_src_nodes(), 5))
sage.initialize(ctx=ctx)
h = sage(block, feat)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 10
# Test the case for graphs without edges # Test the case for graphs without edges
g = dgl.bipartite([], num_nodes=(5, 3)) g = dgl.bipartite([], num_nodes=(5, 3))
sage = nn.SAGEConv((3, 3), 2, 'gcn') sage = nn.SAGEConv((3, 3), 2, 'gcn')
...@@ -251,6 +272,13 @@ def test_agnn_conv(): ...@@ -251,6 +272,13 @@ def test_agnn_conv():
h = agnn_conv(g, feat) h = agnn_conv(g, feat)
assert h.shape == (200, 5) assert h.shape == (200, 5)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].asnumpy())
block = dgl.to_block(g, seed_nodes)
feat = F.randn((block.number_of_src_nodes(), 5))
h = agnn_conv(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 5)
def test_appnp_conv(): def test_appnp_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3)) g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
ctx = F.ctx() ctx = F.ctx()
...@@ -328,7 +356,7 @@ def test_dense_sage_conv(g): ...@@ -328,7 +356,7 @@ def test_dense_sage_conv(g):
out_dense_sage = dense_sage(adj, feat) out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage) assert F.allclose(out_sage, out_dense_sage)
@pytest.mark.parametrize('g', [random_dglgraph(20), random_graph(20), random_bipartite(20, 10)]) @pytest.mark.parametrize('g', [random_dglgraph(20), random_graph(20), random_bipartite(20, 10), random_block(20)])
def test_edge_conv(g): def test_edge_conv(g):
ctx = F.ctx() ctx = F.ctx()
...@@ -338,7 +366,7 @@ def test_edge_conv(g): ...@@ -338,7 +366,7 @@ def test_edge_conv(g):
# test #1: basic # test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5)) h0 = F.randn((g.number_of_src_nodes(), 5))
if not g.is_homograph(): if not g.is_homograph() and not g.is_block:
# bipartite # bipartite
h1 = edge_conv(g, (h0, h0[:10])) h1 = edge_conv(g, (h0, h0[:10]))
else: else:
...@@ -364,6 +392,13 @@ def test_gin_conv(): ...@@ -364,6 +392,13 @@ def test_gin_conv():
h = gin_conv(g, feat) h = gin_conv(g, feat)
return h.shape == (20, 5) return h.shape == (20, 5)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].asnumpy())
block = dgl.to_block(g, seed_nodes)
feat = F.randn((block.number_of_src_nodes(), 5))
h = gin_conv(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 12)
def test_gmm_conv(): def test_gmm_conv():
ctx = F.ctx() ctx = F.ctx()
...@@ -396,6 +431,17 @@ def test_gmm_conv(): ...@@ -396,6 +431,17 @@ def test_gmm_conv():
h1 = gmm_conv(g, (h0, hd), pseudo) h1 = gmm_conv(g, (h0, hd), pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2) assert h1.shape == (g.number_of_dst_nodes(), 2)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].asnumpy())
block = dgl.to_block(g, seed_nodes)
gmm_conv = nn.GMMConv(5, 2, 5, 3, 'mean')
gmm_conv.initialize(ctx=ctx)
h0 = F.randn((block.number_of_src_nodes(), 5))
pseudo = F.randn((block.number_of_edges(), 5))
h = gmm_conv(block, h0, pseudo)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 2
def test_nn_conv(): def test_nn_conv():
ctx = F.ctx() ctx = F.ctx()
...@@ -427,6 +473,17 @@ def test_nn_conv(): ...@@ -427,6 +473,17 @@ def test_nn_conv():
h1 = nn_conv(g, (h0, hd), etypes) h1 = nn_conv(g, (h0, hd), etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2) assert h1.shape == (g.number_of_dst_nodes(), 2)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].asnumpy())
block = dgl.to_block(g, seed_nodes)
nn_conv = nn.NNConv((5, 4), 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
feat = F.randn((block.number_of_src_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h = nn_conv(block, feat, etypes)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 2
def test_sg_conv(): def test_sg_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3)) g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
ctx = F.ctx() ctx = F.ctx()
......
...@@ -5,7 +5,8 @@ import dgl.nn.pytorch as nn ...@@ -5,7 +5,8 @@ import dgl.nn.pytorch as nn
import dgl.function as fn import dgl.function as fn
import backend as F import backend as F
import pytest import pytest
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph, \
random_block
from copy import deepcopy from copy import deepcopy
import numpy as np import numpy as np
...@@ -69,7 +70,7 @@ def test_graph_conv(): ...@@ -69,7 +70,7 @@ def test_graph_conv():
new_weight = conv.weight.data new_weight = conv.weight.data
assert not F.allclose(old_weight, new_weight) assert not F.allclose(old_weight, new_weight)
@pytest.mark.parametrize('g', get_cases(['path', 'bipartite', 'small'], exclude=['zero-degree'])) @pytest.mark.parametrize('g', get_cases(['path', 'bipartite', 'small', 'block'], exclude=['zero-degree']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('bias', [True, False])
...@@ -452,6 +453,15 @@ def test_gat_conv(): ...@@ -452,6 +453,15 @@ def test_gat_conv():
h = gat(g, feat) h = gat(g, feat)
assert h.shape == (200, 4, 2) assert h.shape == (200, 4, 2)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = th.unique(g.edges()[1])
block = dgl.to_block(g, seed_nodes)
gat = nn.GATConv(5, 2, 4)
feat = F.randn((block.number_of_src_nodes(), 5))
gat = gat.to(ctx)
h = gat(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 4, 2)
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm'])
def test_sage_conv(aggre_type): def test_sage_conv(aggre_type):
ctx = F.ctx() ctx = F.ctx()
...@@ -478,6 +488,16 @@ def test_sage_conv(aggre_type): ...@@ -478,6 +488,16 @@ def test_sage_conv(aggre_type):
assert h.shape[-1] == 2 assert h.shape[-1] == 2
assert h.shape[0] == 200 assert h.shape[0] == 200
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = th.unique(g.edges()[1])
block = dgl.to_block(g, seed_nodes)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((block.number_of_src_nodes(), 5))
sage = sage.to(ctx)
h = sage(block, feat)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 10
# Test the case for graphs without edges # Test the case for graphs without edges
g = dgl.bipartite([], num_nodes=(5, 3)) g = dgl.bipartite([], num_nodes=(5, 3))
sage = nn.SAGEConv((3, 3), 2, 'gcn') sage = nn.SAGEConv((3, 3), 2, 'gcn')
...@@ -546,6 +566,15 @@ def test_gin_conv(aggregator_type): ...@@ -546,6 +566,15 @@ def test_gin_conv(aggregator_type):
h = gin(g, feat) h = gin(g, feat)
assert h.shape == (200, 12) assert h.shape == (200, 12)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = th.unique(g.edges()[1])
block = dgl.to_block(g, seed_nodes)
gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
feat = F.randn((block.number_of_src_nodes(), 5))
gin = gin.to(ctx)
h = gin(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 12)
def test_agnn_conv(): def test_agnn_conv():
ctx = F.ctx() ctx = F.ctx()
g = dgl.graph(sp.sparse.random(100, 100, density=0.1)) g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
...@@ -562,6 +591,15 @@ def test_agnn_conv(): ...@@ -562,6 +591,15 @@ def test_agnn_conv():
h = agnn(g, feat) h = agnn(g, feat)
assert h.shape == (200, 5) assert h.shape == (200, 5)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = th.unique(g.edges()[1])
block = dgl.to_block(g, seed_nodes)
agnn = nn.AGNNConv(1)
feat = F.randn((block.number_of_src_nodes(), 5))
agnn = agnn.to(ctx)
h = agnn(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 5)
def test_gated_graph_conv(): def test_gated_graph_conv():
ctx = F.ctx() ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
...@@ -608,6 +646,18 @@ def test_nn_conv(): ...@@ -608,6 +646,18 @@ def test_nn_conv():
# currently we only do shape check # currently we only do shape check
assert h.shape[-1] == 10 assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = th.unique(g.edges()[1])
block = dgl.to_block(g, seed_nodes)
edge_func = th.nn.Linear(4, 5 * 10)
nnconv = nn.NNConv(5, 10, edge_func, 'mean')
feat = F.randn((block.number_of_src_nodes(), 5))
efeat = F.randn((block.number_of_edges(), 4))
nnconv = nnconv.to(ctx)
h = nnconv(block, feat, efeat)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 10
def test_gmm_conv(): def test_gmm_conv():
ctx = F.ctx() ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
...@@ -638,6 +688,17 @@ def test_gmm_conv(): ...@@ -638,6 +688,17 @@ def test_gmm_conv():
# currently we only do shape check # currently we only do shape check
assert h.shape[-1] == 10 assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = th.unique(g.edges()[1])
block = dgl.to_block(g, seed_nodes)
gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean')
feat = F.randn((block.number_of_src_nodes(), 5))
pseudo = F.randn((block.number_of_edges(), 3))
gmmconv = gmmconv.to(ctx)
h = gmmconv(block, feat, pseudo)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 10
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
@pytest.mark.parametrize('g', [random_graph(100), random_bipartite(100, 200)]) @pytest.mark.parametrize('g', [random_graph(100), random_bipartite(100, 200)])
def test_dense_graph_conv(norm_type, g): def test_dense_graph_conv(norm_type, g):
...@@ -676,7 +737,7 @@ def test_dense_sage_conv(g): ...@@ -676,7 +737,7 @@ def test_dense_sage_conv(g):
out_dense_sage = dense_sage(adj, feat) out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage), g assert F.allclose(out_sage, out_dense_sage), g
@pytest.mark.parametrize('g', [random_dglgraph(20), random_graph(20), random_bipartite(20, 10)]) @pytest.mark.parametrize('g', [random_dglgraph(20), random_graph(20), random_bipartite(20, 10), random_block(20)])
def test_edge_conv(g): def test_edge_conv(g):
ctx = F.ctx() ctx = F.ctx()
...@@ -685,7 +746,7 @@ def test_edge_conv(g): ...@@ -685,7 +746,7 @@ def test_edge_conv(g):
# test #1: basic # test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5)) h0 = F.randn((g.number_of_src_nodes(), 5))
if not g.is_homograph(): if not g.is_homograph() and not g.is_block:
# bipartite # bipartite
h1 = edge_conv(g, (h0, h0[:10])) h1 = edge_conv(g, (h0, h0[:10]))
else: else:
......
...@@ -6,7 +6,8 @@ import dgl ...@@ -6,7 +6,8 @@ import dgl
import dgl.nn.tensorflow as nn import dgl.nn.tensorflow as nn
import dgl.function as fn import dgl.function as fn
import backend as F import backend as F
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph, \
random_block
from copy import deepcopy from copy import deepcopy
import numpy as np import numpy as np
...@@ -70,7 +71,7 @@ def test_graph_conv(): ...@@ -70,7 +71,7 @@ def test_graph_conv():
# new_weight = conv.weight.data # new_weight = conv.weight.data
# assert not F.allclose(old_weight, new_weight) # assert not F.allclose(old_weight, new_weight)
@pytest.mark.parametrize('g', get_cases(['path', 'bipartite', 'small'], exclude=['zero-degree'])) @pytest.mark.parametrize('g', get_cases(['path', 'bipartite', 'small', 'block'], exclude=['zero-degree']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('bias', [True, False])
...@@ -355,6 +356,14 @@ def test_gat_conv(): ...@@ -355,6 +356,14 @@ def test_gat_conv():
feat = (F.randn((100, 5)), F.randn((200, 10))) feat = (F.randn((100, 5)), F.randn((200, 10)))
h = gat(g, feat) h = gat(g, feat)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].numpy())
block = dgl.to_block(g, seed_nodes)
gat = nn.GATConv(5, 2, 4)
feat = F.randn((block.number_of_src_nodes(), 5))
h = gat(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 4, 2)
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm'])
def test_sage_conv(aggre_type): def test_sage_conv(aggre_type):
ctx = F.ctx() ctx = F.ctx()
...@@ -378,6 +387,15 @@ def test_sage_conv(aggre_type): ...@@ -378,6 +387,15 @@ def test_sage_conv(aggre_type):
assert h.shape[-1] == 2 assert h.shape[-1] == 2
assert h.shape[0] == 200 assert h.shape[0] == 200
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].numpy())
block = dgl.to_block(g, seed_nodes)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((block.number_of_src_nodes(), 5))
h = sage(block, feat)
assert h.shape[0] == block.number_of_dst_nodes()
assert h.shape[-1] == 10
# Test the case for graphs without edges # Test the case for graphs without edges
g = dgl.bipartite([], num_nodes=(5, 3)) g = dgl.bipartite([], num_nodes=(5, 3))
sage = nn.SAGEConv((3, 3), 2, 'gcn') sage = nn.SAGEConv((3, 3), 2, 'gcn')
...@@ -438,6 +456,17 @@ def test_gin_conv(aggregator_type): ...@@ -438,6 +456,17 @@ def test_gin_conv(aggregator_type):
h = gin(g, feat) h = gin(g, feat)
assert h.shape == (200, 12) assert h.shape == (200, 12)
g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
seed_nodes = np.unique(g.edges()[1].numpy())
block = dgl.to_block(g, seed_nodes)
gin = nn.GINConv(
tf.keras.layers.Dense(12),
aggregator_type
)
feat = F.randn((block.number_of_src_nodes(), 5))
h = gin(block, feat)
assert h.shape == (block.number_of_dst_nodes(), 12)
def myagg(alist, dsttype): def myagg(alist, dsttype):
rst = alist[0] rst = alist[0]
for i in range(1, len(alist)): for i in range(1, len(alist)):
......
from collections import defaultdict from collections import defaultdict
import backend as F
import dgl import dgl
import numpy as np
import networkx as nx import networkx as nx
import scipy.sparse as ssp import scipy.sparse as ssp
...@@ -35,6 +37,11 @@ def bipartite1(): ...@@ -35,6 +37,11 @@ def bipartite1():
def bipartite_full(): def bipartite_full():
return dgl.bipartite([(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]) return dgl.bipartite([(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)])
@register_case(['block'])
def block():
g = dgl.graph(([0, 1, 2, 3], [1, 2, 3, 4]))
return dgl.to_block(g, [1, 2, 3, 4])
def random_dglgraph(size): def random_dglgraph(size):
return dgl.DGLGraph(nx.erdos_renyi_graph(size, 0.3)) return dgl.DGLGraph(nx.erdos_renyi_graph(size, 0.3))
...@@ -43,3 +50,7 @@ def random_graph(size): ...@@ -43,3 +50,7 @@ def random_graph(size):
def random_bipartite(size_src, size_dst): def random_bipartite(size_src, size_dst):
return dgl.bipartite(ssp.random(size_src, size_dst, 0.1)) return dgl.bipartite(ssp.random(size_src, size_dst, 0.1))
def random_block(size):
g = dgl.graph(nx.erdos_renyi_graph(size, 0.1))
return dgl.to_block(g, np.unique(F.zerocopy_to_numpy(g.edges()[1])))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment