Unverified Commit af61e2fb authored by Zihao Ye's avatar Zihao Ye Committed by GitHub
Browse files

[Feature] Support nn modules for bipartite graphs. (#1392)



* init gat

* fix

* gin

* 7 nn modules

* rename & lint

* upd

* upd

* fix lint

* upd test

* upd

* lint

* shape check

* upd

* lint

* address comments

* update tensorflow
Co-authored-by: default avatarQuan Gan <coin2028@hotmail.com>
Co-authored-by: default avatarJinjing Zhou <VoVAllen@users.noreply.github.com>
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
parent 67cb7a43
......@@ -77,6 +77,8 @@ class GatedGraphConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is the output feature size.
"""
assert graph.is_homograph(), \
"not a homograph; convert it with to_homo and pass in the edge type as argument"
graph = graph.local_var()
zero_pad = feat.new_zeros((feat.shape[0], self._out_feats - feat.shape[1]))
feat = th.cat([feat, zero_pad], -1)
......
......@@ -4,6 +4,7 @@ import torch as th
from torch import nn
from .... import function as fn
from ....utils import expand_as_pair
class GINConv(nn.Module):
......@@ -55,10 +56,12 @@ class GINConv(nn.Module):
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, D)` where :math:`D`
could be any positive integer, :math:`N` is the number
of nodes. If ``apply_func`` is not None, :math:`D` should
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
Returns
......@@ -70,9 +73,10 @@ class GINConv(nn.Module):
as input dimensionality.
"""
graph = graph.local_var()
graph.ndata['h'] = feat
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps) * feat + graph.ndata['neigh']
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
......@@ -6,6 +6,7 @@ from torch.nn import init
from .... import function as fn
from ..utils import Identity
from ....utils import expand_as_pair
class GMMConv(nn.Module):
......@@ -45,7 +46,7 @@ class GMMConv(nn.Module):
residual=False,
bias=True):
super(GMMConv, self).__init__()
self._in_feats = in_feats
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
self._dim = dim
self._n_kernels = n_kernels
......@@ -60,10 +61,10 @@ class GMMConv(nn.Module):
self.mu = nn.Parameter(th.Tensor(n_kernels, dim))
self.inv_sigma = nn.Parameter(th.Tensor(n_kernels, dim))
self.fc = nn.Linear(in_feats, n_kernels * out_feats, bias=False)
self.fc = nn.Linear(self._in_src_feats, n_kernels * out_feats, bias=False)
if residual:
if in_feats != out_feats:
self.res_fc = nn.Linear(in_feats, out_feats, bias=False)
if self._in_dst_feats != out_feats:
self.res_fc = nn.Linear(self._in_dst_feats, out_feats, bias=False)
else:
self.res_fc = Identity()
else:
......@@ -94,9 +95,10 @@ class GMMConv(nn.Module):
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`N`
is the number of nodes of the graph and :math:`D_{in}` is the
input feature size.
If a single tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of tensors are given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`.
pseudo : torch.Tensor
The pseudo coordinate tensor of shape :math:`(E, D_{u})` where
:math:`E` is the number of edges of the graph and :math:`D_{u}`
......@@ -108,21 +110,22 @@ class GMMConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is the output feature size.
"""
graph = graph.local_var()
graph.ndata['h'] = self.fc(feat).view(-1, self._n_kernels, self._out_feats)
E = graph.number_of_edges()
# compute gaussian weight
gaussian = -0.5 * ((pseudo.view(E, 1, self._dim) -
self.mu.view(1, self._n_kernels, self._dim)) ** 2)
gaussian = gaussian * (self.inv_sigma.view(1, self._n_kernels, self._dim) ** 2)
gaussian = th.exp(gaussian.sum(dim=-1, keepdim=True)) # (E, K, 1)
graph.edata['w'] = gaussian
graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h'))
rst = graph.ndata['h'].sum(1)
# residual connection
if self.res_fc is not None:
rst = rst + self.res_fc(feat)
# bias
if self.bias is not None:
rst = rst + self.bias
return rst
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = self.fc(feat_src).view(-1, self._n_kernels, self._out_feats)
E = graph.number_of_edges()
# compute gaussian weight
gaussian = -0.5 * ((pseudo.view(E, 1, self._dim) -
self.mu.view(1, self._n_kernels, self._dim)) ** 2)
gaussian = gaussian * (self.inv_sigma.view(1, self._n_kernels, self._dim) ** 2)
gaussian = th.exp(gaussian.sum(dim=-1, keepdim=True)) # (E, K, 1)
graph.edata['w'] = gaussian
graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h'))
rst = graph.dstdata['h'].sum(1)
# residual connection
if self.res_fc is not None:
rst = rst + self.res_fc(feat_dst)
# bias
if self.bias is not None:
rst = rst + self.bias
return rst
......@@ -6,6 +6,7 @@ from torch.nn import init
from .... import function as fn
from ..utils import Identity
from ....utils import expand_as_pair
class NNConv(nn.Module):
......@@ -20,6 +21,11 @@ class NNConv(nn.Module):
----------
in_feats : int
Input feature size.
If the layer is to be applied on a unidirectional bipartite graph, ``in_feats``
specifies the input feature size on both the source and destination nodes. If
a scalar is given, the source and destination node feature size would take the
same value.
out_feats : int
Output feature size.
edge_func : callable activation function/layer
......@@ -42,7 +48,7 @@ class NNConv(nn.Module):
residual=False,
bias=True):
super(NNConv, self).__init__()
self._in_feats = in_feats
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
self.edge_nn = edge_func
if aggregator_type == 'sum':
......@@ -55,8 +61,8 @@ class NNConv(nn.Module):
raise KeyError('Aggregator type {} not recognized: '.format(aggregator_type))
self._aggre_type = aggregator_type
if residual:
if in_feats != out_feats:
self.res_fc = nn.Linear(in_feats, out_feats, bias=False)
if self._in_dst_feats != out_feats:
self.res_fc = nn.Linear(self._in_dst_feats, out_feats, bias=False)
else:
self.res_fc = Identity()
else:
......@@ -82,7 +88,7 @@ class NNConv(nn.Module):
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
feat : torch.Tensor or pair of torch.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`N`
is the number of nodes of the graph and :math:`D_{in}` is the
input feature size.
......@@ -96,18 +102,20 @@ class NNConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is the output feature size.
"""
graph = graph.local_var()
# (n, d_in, 1)
graph.ndata['h'] = feat.unsqueeze(-1)
# (n, d_in, d_out)
graph.edata['w'] = self.edge_nn(efeat).view(-1, self._in_feats, self._out_feats)
# (n, d_in, d_out)
graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh'))
rst = graph.ndata.pop('neigh').sum(dim=1) # (n, d_out)
# residual connection
if self.res_fc is not None:
rst = rst + self.res_fc(feat)
# bias
if self.bias is not None:
rst = rst + self.bias
return rst
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
# (n, d_in, 1)
graph.srcdata['h'] = feat_src.unsqueeze(-1)
# (n, d_in, d_out)
graph.edata['w'] = self.edge_nn(efeat).view(-1, self._in_src_feats, self._out_feats)
# (n, d_in, d_out)
graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh'))
rst = graph.dstdata['neigh'].sum(dim=1) # (n, d_out)
# residual connection
if self.res_fc is not None:
rst = rst + self.res_fc(feat_dst)
# bias
if self.bias is not None:
rst = rst + self.bias
return rst
......@@ -172,22 +172,24 @@ class RelGraphConv(nn.Module):
torch.Tensor
New node features.
"""
g = g.local_var()
g.ndata['h'] = x
g.edata['type'] = etypes
if norm is not None:
g.edata['norm'] = norm
if self.self_loop:
loop_message = utils.matmul_maybe_select(x, self.loop_weight)
# message passing
g.update_all(self.message_func, fn.sum(msg='msg', out='h'))
# apply bias and activation
node_repr = g.ndata['h']
if self.bias:
node_repr = node_repr + self.h_bias
if self.self_loop:
node_repr = node_repr + loop_message
if self.activation:
node_repr = self.activation(node_repr)
node_repr = self.dropout(node_repr)
return node_repr
assert g.is_homograph(), \
"not a homograph; convert it with to_homo and pass in the edge type as argument"
with g.local_scope():
g.ndata['h'] = x
g.edata['type'] = etypes
if norm is not None:
g.edata['norm'] = norm
if self.self_loop:
loop_message = utils.matmul_maybe_select(x, self.loop_weight)
# message passing
g.update_all(self.message_func, fn.sum(msg='msg', out='h'))
# apply bias and activation
node_repr = g.ndata['h']
if self.bias:
node_repr = node_repr + self.h_bias
if self.self_loop:
node_repr = node_repr + loop_message
if self.activation:
node_repr = self.activation(node_repr)
node_repr = self.dropout(node_repr)
return node_repr
"""Torch Module for GraphSAGE layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
from numbers import Integral
from torch import nn
from torch.nn import functional as F
from .... import function as fn
from ....utils import expand_as_pair, check_eq_shape
class SAGEConv(nn.Module):
......@@ -56,14 +56,7 @@ class SAGEConv(nn.Module):
activation=None):
super(SAGEConv, self).__init__()
if isinstance(in_feats, tuple):
self._in_src_feats = in_feats[0]
self._in_dst_feats = in_feats[1]
elif isinstance(in_feats, Integral):
self._in_src_feats = self._in_dst_feats = in_feats
else:
raise TypeError('in_feats must be either int or pair of ints')
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
self._aggre_type = aggregator_type
self.norm = norm
......@@ -136,6 +129,7 @@ class SAGEConv(nn.Module):
graph.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
h_neigh = graph.dstdata['neigh']
elif self._aggre_type == 'gcn':
check_eq_shape(feat)
graph.srcdata['h'] = feat_src
graph.dstdata['h'] = feat_dst # same as above if homogeneous
graph.update_all(fn.copy_src('h', 'm'), fn.sum('m', 'neigh'))
......
......@@ -73,6 +73,7 @@ class TAGConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
assert graph.is_homograph(), 'Graph is not homogeneous'
graph = graph.local_var()
norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)
......
......@@ -28,8 +28,13 @@ class GATConv(layers.Layer):
Parameters
----------
in_feats : int
in_feats : int, or a pair of ints
Input feature size.
If the layer is to be applied to a unidirectional bipartite graph, ``in_feats``
specifies the input feature size on both the source and destination nodes. If
a scalar is given, the source and destination node feature size would take the
same value.
out_feats : int
Output feature size.
num_heads : int
......@@ -62,11 +67,16 @@ class GATConv(layers.Layer):
self._out_feats = out_feats
xinit = tf.keras.initializers.VarianceScaling(scale=np.sqrt(
2), mode="fan_avg", distribution="untruncated_normal")
self.fc = layers.Dense(
out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
if isinstance(in_feats, tuple):
self.fc_src = layers.Dense(
out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
self.fc_dst = layers.Dense(
out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
else:
self.fc = layers.Dense(
out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
self.attn_l = tf.Variable(initial_value=xinit(
shape=(1, num_heads, out_feats), dtype='float32'), trainable=True)
self.attn_r = tf.Variable(initial_value=xinit(
shape=(1, num_heads, out_feats), dtype='float32'), trainable=True)
self.feat_drop = layers.Dropout(rate=feat_drop)
......@@ -90,9 +100,11 @@ class GATConv(layers.Layer):
----------
graph : DGLGraph
The graph.
feat : tf.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
feat : tf.Tensor or pair of tf.Tensor
If a tf.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of tf.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`.
Returns
-------
......@@ -101,8 +113,15 @@ class GATConv(layers.Layer):
is the number of heads, and :math:`D_{out}` is size of output feature.
"""
graph = graph.local_var()
h = self.feat_drop(feat)
feat = tf.reshape(self.fc(h), (-1, self._num_heads, self._out_feats))
if isinstance(feat, tuple):
h_src = self.feat_drop(feat[0])
h_dst = self.feat_drop(feat[1])
feat_src = tf.reshape(self.fc_src(h_src), (-1, self._num_heads, self._out_feats))
feat_dst = tf.reshape(self.fc_dst(h_dst), (-1, self._num_heads, self._out_feats))
else:
h_src = h_dst = self.feat_drop(feat)
feat_src = feat_dst = tf.reshape(
self.fc(h_src), (-1, self._num_heads, self._out_feats))
# NOTE: GAT paper uses "first concatenation then linear projection"
# to compute attention scores, while ours is "first projection then
# addition", the two approaches are mathematically equivalent:
......@@ -113,9 +132,10 @@ class GATConv(layers.Layer):
# save [Wh_i || Wh_j] on edges, which is not memory-efficient. Plus,
# addition could be optimized with DGL's built-in function u_add_v,
# which further speeds up computation and saves memory footprint.
el = tf.reduce_sum(feat * self.attn_l, axis=-1, keepdims=True)
er = tf.reduce_sum(feat * self.attn_r, axis=-1, keepdims=True)
graph.ndata.update({'ft': feat, 'el': el, 'er': er})
el = tf.reduce_sum(feat_src * self.attn_l, axis=-1, keepdims=True)
er = tf.reduce_sum(feat_dst * self.attn_r, axis=-1, keepdims=True)
graph.srcdata.update({'ft': feat_src, 'el': el})
graph.dstdata.update({'er': er})
# compute edge attention, el and er are a_l Wh_i and a_r Wh_j respectively.
graph.apply_edges(fn.u_add_v('el', 'er', 'e'))
e = self.leaky_relu(graph.edata.pop('e'))
......@@ -124,11 +144,11 @@ class GATConv(layers.Layer):
# message passing
graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
fn.sum('m', 'ft'))
rst = graph.ndata['ft']
rst = graph.dstdata['ft']
# residual
if self.res_fc is not None:
resval = tf.reshape(self.res_fc(
h), (h.shape[0], -1, self._out_feats))
h_dst), (h_dst.shape[0], -1, self._out_feats))
rst = rst + resval
# activation
if self.activation:
......
......@@ -4,6 +4,7 @@ import tensorflow as tf
from tensorflow.keras import layers
from .... import function as fn
from ....utils import expand_as_pair
class GINConv(layers.Layer):
......@@ -52,10 +53,13 @@ class GINConv(layers.Layer):
----------
graph : DGLGraph
The graph.
feat : tf.Tensor
The input feature of shape :math:`(N, D)` where :math:`D`
could be any positive integer, :math:`N` is the number
of nodes. If ``apply_func`` is not None, :math:`D` should
feat : tf.Tensor or pair of tf.Tensor
If a tf.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of tf.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
Returns
......@@ -67,9 +71,10 @@ class GINConv(layers.Layer):
as input dimensionality.
"""
graph = graph.local_var()
graph.ndata['h'] = feat
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps) * feat + graph.ndata['neigh']
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
......@@ -176,22 +176,24 @@ class RelGraphConv(layers.Layer):
tf.Tensor
New node features.
"""
g = g.local_var()
g.ndata['h'] = x
g.edata['type'] = tf.cast(etypes, tf.int64)
if norm is not None:
g.edata['norm'] = norm
if self.self_loop:
loop_message = utils.matmul_maybe_select(x, self.loop_weight)
# message passing
g.update_all(self.message_func, fn.sum(msg='msg', out='h'))
# apply bias and activation
node_repr = g.ndata['h']
if self.bias:
node_repr = node_repr + self.h_bias
if self.self_loop:
node_repr = node_repr + loop_message
if self.activation:
node_repr = self.activation(node_repr)
node_repr = self.dropout(node_repr)
return node_repr
assert g.is_homograph(), \
"not a homograph; convert it with to_homo and pass in the edge type as argument"
with g.local_scope():
g.ndata['h'] = x
g.edata['type'] = tf.cast(etypes, tf.int64)
if norm is not None:
g.edata['norm'] = norm
if self.self_loop:
loop_message = utils.matmul_maybe_select(x, self.loop_weight)
# message passing
g.update_all(self.message_func, fn.sum(msg='msg', out='h'))
# apply bias and activation
node_repr = g.ndata['h']
if self.bias:
node_repr = node_repr + self.h_bias
if self.self_loop:
node_repr = node_repr + loop_message
if self.activation:
node_repr = self.activation(node_repr)
node_repr = self.dropout(node_repr)
return node_repr
"""Tensorflow Module for GraphSAGE layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
from numbers import Integral
import tensorflow as tf
from tensorflow.keras import layers
from .... import function as fn
from ....utils import expand_as_pair, check_eq_shape
class SAGEConv(layers.Layer):
......@@ -57,14 +57,7 @@ class SAGEConv(layers.Layer):
activation=None):
super(SAGEConv, self).__init__()
if isinstance(in_feats, tuple):
self._in_src_feats = in_feats[0]
self._in_dst_feats = in_feats[1]
elif isinstance(in_feats, Integral):
self._in_src_feats = self._in_dst_feats = in_feats
else:
raise TypeError('in_feats must be either int or pair of ints')
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
self._aggre_type = aggregator_type
self.norm = norm
......@@ -95,9 +88,11 @@ class SAGEConv(layers.Layer):
----------
graph : DGLGraph
The graph.
feat : tf.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
feat : tf.Tensor or pair of tf.Tensor
If a single tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of tensors are given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`.
Returns
-------
......@@ -120,6 +115,7 @@ class SAGEConv(layers.Layer):
graph.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
h_neigh = graph.dstdata['neigh']
elif self._aggre_type == 'gcn':
check_eq_shape(feat)
graph.srcdata['h'] = feat_src
graph.dstdata['h'] = feat_dst # same as above if homogeneous
graph.update_all(fn.copy_src('h', 'm'), fn.sum('m', 'neigh'))
......
......@@ -519,3 +519,23 @@ def make_invmap(array, use_numpy=True):
invmap = {x: i for i, x in enumerate(uniques)}
remapped = np.asarray([invmap[x] for x in array])
return uniques, invmap, remapped
def expand_as_pair(input_):
"""Return a pair of same element if the input is not a pair.
"""
if isinstance(input_, tuple):
return input_
else:
return input_, input_
def check_eq_shape(input_):
"""If input_ is a pair of features, check if the feature shape of source
nodes is equal to the feature shape of destination nodes.
"""
srcdata, dstdata = expand_as_pair(input_)
src_feat_shape = tuple(F.shape(srcdata))[1:]
dst_feat_shape = tuple(F.shape(dstdata))[1:]
if src_feat_shape != dst_feat_shape:
raise DGLError("The feature shape of source nodes: {} \
should be equal to the feature shape of destination \
nodes: {}.".format(src_feat_shape, dst_feat_shape))
......@@ -7,7 +7,7 @@ import dgl
import dgl.nn.mxnet as nn
import dgl.function as fn
import backend as F
from test_utils.graph_cases import get_cases
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from mxnet import autograd, gluon, nd
def check_close(a, b):
......@@ -133,43 +133,52 @@ def test_tagconv():
assert h1.shape[-1] == 2
def test_gat_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
ctx = F.ctx()
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
gat = nn.GATConv(10, 20, 5) # n_heads = 5
gat.initialize(ctx=ctx)
print(gat)
# test#1: basic
h0 = F.randn((20, 10))
h1 = gat(g, h0)
assert h1.shape == (20, 5, 20)
feat = F.randn((20, 10))
h = gat(g, feat)
assert h.shape == (20, 5, 20)
def test_sage_conv():
for aggre_type in ['mean', 'pool', 'gcn']:
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
# test#2: bipartite
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
gat = nn.GATConv((5, 10), 2, 4)
gat.initialize(ctx=ctx)
feat = (F.randn((100, 5)), F.randn((200, 10)))
h = gat(g, feat)
assert h.shape == (200, 4, 2)
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 200
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
def test_sage_conv(aggre_type):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 200
def test_gg_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
......@@ -207,9 +216,14 @@ def test_agnn_conv():
print(agnn_conv)
# test#1: basic
h0 = F.randn((20, 10))
h1 = agnn_conv(g, h0)
assert h1.shape == (20, 10)
feat = F.randn((20, 10))
h = agnn_conv(g, feat)
assert h.shape == (20, 10)
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
feat = (F.randn((100, 5)), F.randn((200, 5)))
h = agnn_conv(g, feat)
assert h.shape == (200, 5)
def test_appnp_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
......@@ -246,27 +260,27 @@ def test_dense_cheb_conv():
out_dense_cheb = dense_cheb(adj, feat, 2.0)
assert F.allclose(out_cheb, out_dense_cheb)
def test_dense_graph_conv():
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
@pytest.mark.parametrize('g', [random_graph(100), random_bipartite(100, 200)])
def test_dense_graph_conv(g, norm_type):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.3), readonly=True)
adj = g.adjacency_matrix(ctx=ctx).tostype('default')
conv = nn.GraphConv(5, 2, norm='none', bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=False, bias=True)
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
conv.initialize(ctx=ctx)
dense_conv.initialize(ctx=ctx)
dense_conv.weight.set_data(
conv.weight.data())
dense_conv.bias.set_data(
conv.bias.data())
feat = F.randn((100, 5))
feat = F.randn((g.number_of_src_nodes(), 5))
out_conv = conv(g, feat)
out_dense_conv = dense_conv(adj, feat)
assert F.allclose(out_conv, out_dense_conv)
def test_dense_sage_conv():
@pytest.mark.parametrize('g', [random_graph(100), random_bipartite(100, 200)])
def test_dense_sage_conv(g):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
adj = g.adjacency_matrix(ctx=ctx).tostype('default')
sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2)
......@@ -276,14 +290,20 @@ def test_dense_sage_conv():
sage.fc_neigh.weight.data())
dense_sage.fc.bias.set_data(
sage.fc_neigh.bias.data())
feat = F.randn((100, 5))
if len(g.ntypes) == 2:
feat = (
F.randn((g.number_of_src_nodes(), 5)),
F.randn((g.number_of_dst_nodes(), 5))
)
else:
feat = F.randn((g.number_of_nodes(), 5))
out_sage = sage(g, feat)
out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage)
def test_edge_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
@pytest.mark.parametrize('g', [random_dglgraph(20), random_graph(20), random_bipartite(20, 10)])
def test_edge_conv(g):
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, 2)
......@@ -291,9 +311,13 @@ def test_edge_conv():
print(edge_conv)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_nodes(), 2)
h0 = F.randn((g.number_of_src_nodes(), 5))
if not g.is_homograph():
# bipartite
h1 = edge_conv(g, (h0, h0[:10]))
else:
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_dst_nodes(), 2)
def test_gin_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
......@@ -304,38 +328,79 @@ def test_gin_conv():
print(gin_conv)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = gin_conv(g, h0)
assert h1.shape == (g.number_of_nodes(), 5)
feat = F.randn((g.number_of_nodes(), 5))
h = gin_conv(g, feat)
assert h.shape == (20, 5)
# test #2: bipartite
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
feat = (F.randn((100, 5)), F.randn((200, 5)))
h = gin_conv(g, feat)
return h.shape == (20, 5)
def test_gmm_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
ctx = F.ctx()
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
gmm_conv = nn.GMMConv(5, 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
print(gmm_conv)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, h0, pseudo)
assert h1.shape == (g.number_of_nodes(), 2)
g = dgl.graph(nx.erdos_renyi_graph(20, 0.3))
gmm_conv = nn.GMMConv(5, 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, h0, pseudo)
assert h1.shape == (g.number_of_nodes(), 2)
g = dgl.bipartite(sp.sparse.random(20, 10, 0.1))
gmm_conv = nn.GMMConv((5, 4), 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, (h0, hd), pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
def test_nn_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
ctx = F.ctx()
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
nn_conv = nn.NNConv(5, 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
print(nn_conv)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, h0, etypes)
assert h1.shape == (g.number_of_nodes(), 2)
g = dgl.graph(nx.erdos_renyi_graph(20, 0.3))
nn_conv = nn.NNConv(5, 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, h0, etypes)
assert h1.shape == (g.number_of_nodes(), 2)
g = dgl.bipartite(sp.sparse.random(20, 10, 0.3))
nn_conv = nn.NNConv((5, 4), 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, (h0, hd), etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
def test_sg_conv():
g = dgl.DGLGraph(nx.erdos_renyi_graph(20, 0.3))
ctx = F.ctx()
......
......@@ -5,7 +5,7 @@ import dgl.nn.pytorch as nn
import dgl.function as fn
import backend as F
import pytest
from test_utils.graph_cases import get_cases
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from copy import deepcopy
import numpy as np
......@@ -413,33 +413,40 @@ def test_gat_conv():
feat = F.randn((100, 5))
gat = gat.to(ctx)
h = gat(g, feat)
assert h.shape[-1] == 2 and h.shape[-2] == 4
assert h.shape == (100, 4, 2)
def test_sage_conv():
for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage = sage.to(ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
gat = nn.GATConv((5, 10), 2, 4)
feat = (F.randn((100, 5)), F.randn((200, 10)))
gat = gat.to(ctx)
h = gat(g, feat)
assert h.shape == (200, 4, 2)
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage = sage.to(ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
sage = sage.to(ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 200
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm'])
def test_sage_conv(aggre_type):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage = sage.to(ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
sage = sage.to(ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
sage = sage.to(ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 200
def test_sgc_conv():
ctx = F.ctx()
......@@ -470,27 +477,44 @@ def test_appnp_conv():
h = appnp(g, feat)
assert h.shape[-1] == 5
def test_gin_conv():
for aggregator_type in ['mean', 'max', 'sum']:
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
gin = nn.GINConv(
th.nn.Linear(5, 12),
aggregator_type
)
feat = F.randn((100, 5))
gin = gin.to(ctx)
h = gin(g, feat)
assert h.shape[-1] == 12
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv(aggregator_type):
ctx = F.ctx()
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
gin = nn.GINConv(
th.nn.Linear(5, 12),
aggregator_type
)
feat = F.randn((100, 5))
gin = gin.to(ctx)
h = gin(g, feat)
assert h.shape == (100, 12)
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
gin = nn.GINConv(
th.nn.Linear(5, 12),
aggregator_type
)
feat = (F.randn((100, 5)), F.randn((200, 5)))
gin = gin.to(ctx)
h = gin(g, feat)
assert h.shape == (200, 12)
def test_agnn_conv():
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
agnn = nn.AGNNConv(1)
feat = F.randn((100, 5))
agnn = agnn.to(ctx)
h = agnn(g, feat)
assert h.shape[-1] == 5
assert h.shape == (100, 5)
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
agnn = nn.AGNNConv(1)
feat = (F.randn((100, 5)), F.randn((200, 5)))
agnn = agnn.to(ctx)
h = agnn(g, feat)
assert h.shape == (200, 5)
def test_gated_graph_conv():
ctx = F.ctx()
......@@ -517,6 +541,27 @@ def test_nn_conv():
# currently we only do shape check
assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
edge_func = th.nn.Linear(4, 5 * 10)
nnconv = nn.NNConv(5, 10, edge_func, 'mean')
feat = F.randn((100, 5))
efeat = F.randn((g.number_of_edges(), 4))
nnconv = nnconv.to(ctx)
h = nnconv(g, feat, efeat)
# currently we only do shape check
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(50, 100, density=0.1))
edge_func = th.nn.Linear(4, 5 * 10)
nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean')
feat = F.randn((50, 5))
feat_dst = F.randn((100, 2))
efeat = F.randn((g.number_of_edges(), 4))
nnconv = nnconv.to(ctx)
h = nnconv(g, (feat, feat_dst), efeat)
# currently we only do shape check
assert h.shape[-1] == 10
def test_gmm_conv():
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
......@@ -528,35 +573,78 @@ def test_gmm_conv():
# currently we only do shape check
assert h.shape[-1] == 10
def test_dense_graph_conv():
g = dgl.graph(sp.sparse.random(100, 100, density=0.1), readonly=True)
gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean')
feat = F.randn((100, 5))
pseudo = F.randn((g.number_of_edges(), 3))
gmmconv = gmmconv.to(ctx)
h = gmmconv(g, feat, pseudo)
# currently we only do shape check
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 50, density=0.1), readonly=True)
gmmconv = nn.GMMConv((5, 2), 10, 3, 4, 'mean')
feat = F.randn((100, 5))
feat_dst = F.randn((50, 2))
pseudo = F.randn((g.number_of_edges(), 3))
gmmconv = gmmconv.to(ctx)
h = gmmconv(g, (feat, feat_dst), pseudo)
# currently we only do shape check
assert h.shape[-1] == 10
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
@pytest.mark.parametrize('g', [random_graph(100), random_bipartite(100, 200)])
def test_dense_graph_conv(norm_type, g):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
# TODO(minjie): enable the following option after #1385
adj = g.adjacency_matrix(ctx=ctx).to_dense()
conv = nn.GraphConv(5, 2, norm='none', bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=False, bias=True)
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
dense_conv.weight.data = conv.weight.data
dense_conv.bias.data = conv.bias.data
feat = F.randn((100, 5))
feat = F.randn((g.number_of_src_nodes(), 5))
conv = conv.to(ctx)
dense_conv = dense_conv.to(ctx)
out_conv = conv(g, feat)
out_dense_conv = dense_conv(adj, feat)
assert F.allclose(out_conv, out_dense_conv)
def test_dense_sage_conv():
@pytest.mark.parametrize('g', [random_graph(100), random_bipartite(100, 200)])
def test_dense_sage_conv(g):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
adj = g.adjacency_matrix(ctx=ctx).to_dense()
sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2)
dense_sage.fc.weight.data = sage.fc_neigh.weight.data
dense_sage.fc.bias.data = sage.fc_neigh.bias.data
feat = F.randn((100, 5))
if len(g.ntypes) == 2:
feat = (
F.randn((g.number_of_src_nodes(), 5)),
F.randn((g.number_of_dst_nodes(), 5))
)
else:
feat = F.randn((g.number_of_nodes(), 5))
sage = sage.to(ctx)
dense_sage = dense_sage.to(ctx)
out_sage = sage(g, feat)
out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage)
assert F.allclose(out_sage, out_dense_sage), g
@pytest.mark.parametrize('g', [random_dglgraph(20), random_graph(20), random_bipartite(20, 10)])
def test_edge_conv(g):
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, 2).to(ctx)
print(edge_conv)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
if not g.is_homograph():
# bipartite
h1 = edge_conv(g, (h0, h0[:10]))
else:
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_dst_nodes(), 2)
def test_dense_cheb_conv():
for k in range(1, 4):
......
......@@ -6,7 +6,7 @@ import dgl
import dgl.nn.tensorflow as nn
import dgl.function as fn
import backend as F
from test_utils.graph_cases import get_cases
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from copy import deepcopy
import numpy as np
......@@ -166,7 +166,6 @@ def test_edge_softmax():
for i in range(30):
for j in range(30):
g.add_edge(i, j)
score = F.randn((900, 1))
with tf.GradientTape() as tape:
......@@ -311,30 +310,35 @@ def test_gat_conv():
gat = nn.GATConv(5, 2, 4)
feat = F.randn((100, 5))
h = gat(g, feat)
assert h.shape[-1] == 2 and h.shape[-2] == 4
def test_sage_conv():
for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 200
assert h.shape == (100, 4, 2)
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
gat = nn.GATConv((5, 10), 2, 4)
feat = (F.randn((100, 5)), F.randn((200, 10)))
h = gat(g, feat)
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm'])
def test_sage_conv(aggre_type):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((100, 5))
h = sage(g, feat)
assert h.shape[-1] == 10
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 200
def test_sgc_conv():
ctx = F.ctx()
......@@ -361,17 +365,26 @@ def test_appnp_conv():
h = appnp(g, feat)
assert h.shape[-1] == 5
def test_gin_conv():
for aggregator_type in ['mean', 'max', 'sum']:
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
gin = nn.GINConv(
tf.keras.layers.Dense(12),
aggregator_type
)
feat = F.randn((100, 5))
gin = gin
h = gin(g, feat)
assert h.shape[-1] == 12
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv(aggregator_type):
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
gin = nn.GINConv(
tf.keras.layers.Dense(12),
aggregator_type
)
feat = F.randn((100, 5))
gin = gin
h = gin(g, feat)
assert h.shape == (100, 12)
g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
gin = nn.GINConv(
tf.keras.layers.Dense(12),
aggregator_type
)
feat = (F.randn((100, 5)), F.randn((200, 5)))
h = gin(g, feat)
assert h.shape == (200, 12)
def myagg(alist, dsttype):
rst = alist[0]
......@@ -477,7 +490,6 @@ def test_hetero_conv(agg):
assert mod3.carg1 == 0
assert mod3.carg2 == 1
if __name__ == '__main__':
test_graph_conv()
test_edge_softmax()
......@@ -501,4 +513,3 @@ if __name__ == '__main__':
# test_dense_sage_conv()
# test_dense_cheb_conv()
# test_sequential()
from collections import defaultdict
import dgl
import networkx as nx
import scipy.sparse as ssp
case_registry = defaultdict(list)
......@@ -33,3 +34,12 @@ def bipartite1():
@register_case(['bipartite', 'small', 'hetero'])
def bipartite_full():
return dgl.bipartite([(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)])
def random_dglgraph(size):
return dgl.DGLGraph(nx.erdos_renyi_graph(size, 0.3))
def random_graph(size):
return dgl.graph(nx.erdos_renyi_graph(size, 0.3))
def random_bipartite(size_src, size_dst):
return dgl.bipartite(ssp.random(size_src, size_dst, 0.1))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment