Unverified Commit f25bc176 authored by Minjie Wang's avatar Minjie Wang Committed by GitHub
Browse files

[Hetero] Improve speed of several Hetero APIs (#1486)

* add clone function to frame

* add utest

* replace all local_var with local_scope

* fix utest

* avoid creating canonical types in __getitem__

* lint

* try another utest  appraoch for mx

* utest
parent 3c4506e9
......@@ -128,7 +128,7 @@ class Sequential(gluon.nn.Sequential):
>>> def __init__(self, **kwargs):
>>> super().__init__(**kwargs)
>>> def forward(self, graph, n_feat, e_feat):
>>> graph = graph.local_var()
>>> with graph.local_scope():
>>> graph.ndata['h'] = n_feat
>>> graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
>>> n_feat += graph.ndata['h']
......@@ -175,7 +175,7 @@ class Sequential(gluon.nn.Sequential):
>>> def __init__(self, **kwargs):
>>> super().__init__(**kwargs)
>>> def forward(self, graph, n_feat):
>>> graph = graph.local_var()
>>> with graph.local_scope():
>>> graph.ndata['h'] = n_feat
>>> graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
>>> n_feat += graph.ndata['h']
......
......@@ -58,8 +58,7 @@ class AGNNConv(nn.Module):
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
graph = graph.local_var()
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = F.normalize(feat_src, p=2, dim=-1)
......
......@@ -53,7 +53,7 @@ class APPNPConv(nn.Module):
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
graph = graph.local_var()
with graph.local_scope():
norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = th.reshape(norm, shp).to(feat.device)
......
......@@ -217,8 +217,8 @@ class AtomicConv(nn.Module):
Updated node representations. V for the number of nodes, K for the
number of radial filters, and T for the number of types of atomic numbers.
"""
with graph.local_scope():
radial_pooled_values = self.radial_pooling(distances) # (K, E, 1)
graph = graph.local_var()
if self.features_to_use is not None:
feat = (feat == self.features_to_use).float() # (V, T)
graph.ndata['hv'] = feat
......
......@@ -90,7 +90,7 @@ class CFConv(nn.Module):
float32 tensor of shape (V, out_feats)
Updated node representations.
"""
g = g.local_var()
with g.local_scope():
g.ndata['hv'] = self.project_node(node_feats)
g.edata['he'] = self.project_edge(edge_feats)
g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h'))
......
......@@ -118,7 +118,7 @@ class GATConv(nn.Module):
The output feature of shape :math:`(N, H, D_{out})` where :math:`H`
is the number of heads, and :math:`D_{out}` is size of output feature.
"""
graph = graph.local_var()
with graph.local_scope():
if isinstance(feat, tuple):
h_src = self.feat_drop(feat[0])
h_dst = self.feat_drop(feat[1])
......
......@@ -77,9 +77,9 @@ class GatedGraphConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is the output feature size.
"""
with graph.local_scope():
assert graph.is_homograph(), \
"not a homograph; convert it with to_homo and pass in the edge type as argument"
graph = graph.local_var()
zero_pad = feat.new_zeros((feat.shape[0], self._out_feats - feat.shape[1]))
feat = th.cat([feat, zero_pad], -1)
......
......@@ -72,7 +72,7 @@ class GINConv(nn.Module):
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
graph = graph.local_var()
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
......
......@@ -125,8 +125,7 @@ class GraphConv(nn.Module):
torch.Tensor
The output feature
"""
graph = graph.local_var()
with graph.local_scope():
if self._norm == 'both':
degs = graph.out_degrees().to(feat.device).float().clamp(min=1)
norm = th.pow(degs, -0.5)
......
......@@ -114,8 +114,7 @@ class SAGEConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
graph = graph.local_var()
with graph.local_scope():
if isinstance(feat, tuple):
feat_src = self.feat_drop(feat[0])
feat_dst = self.feat_drop(feat[1])
......
......@@ -77,7 +77,7 @@ class SGConv(nn.Module):
If ``cache`` is se to True, ``feat`` and ``graph`` should not change during
training, or you will get wrong results.
"""
graph = graph.local_var()
with graph.local_scope():
if self._cached_h is not None:
feat = self._cached_h
else:
......
......@@ -73,8 +73,8 @@ class TAGConv(nn.Module):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
with graph.local_scope():
assert graph.is_homograph(), 'Graph is not homogeneous'
graph = graph.local_var()
norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
......
......@@ -130,7 +130,7 @@ class Sequential(nn.Sequential):
>>> def __init__(self):
>>> super().__init__()
>>> def forward(self, graph, n_feat, e_feat):
>>> graph = graph.local_var()
>>> with graph.local_scope():
>>> graph.ndata['h'] = n_feat
>>> graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
>>> n_feat += graph.ndata['h']
......@@ -169,7 +169,7 @@ class Sequential(nn.Sequential):
>>> def __init__(self):
>>> super().__init__()
>>> def forward(self, graph, n_feat):
>>> graph = graph.local_var()
>>> with graph.local_scope():
>>> graph.ndata['h'] = n_feat
>>> graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
>>> n_feat += graph.ndata['h']
......
......@@ -55,7 +55,7 @@ class APPNPConv(layers.Layer):
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
graph = graph.local_var()
with graph.local_scope():
degs = tf.clip_by_value(tf.cast(graph.in_degrees(), tf.float32),
clip_value_min=1, clip_value_max=np.inf)
norm = tf.pow(degs, -0.5)
......
......@@ -112,7 +112,7 @@ class GATConv(layers.Layer):
The output feature of shape :math:`(N, H, D_{out})` where :math:`H`
is the number of heads, and :math:`D_{out}` is size of output feature.
"""
graph = graph.local_var()
with graph.local_scope():
if isinstance(feat, tuple):
h_src = self.feat_drop(feat[0])
h_dst = self.feat_drop(feat[1])
......
......@@ -70,7 +70,7 @@ class GINConv(layers.Layer):
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
graph = graph.local_var()
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
......
......@@ -122,8 +122,7 @@ class GraphConv(layers.Layer):
tf.Tensor
The output feature
"""
graph = graph.local_var()
with graph.local_scope():
if self._norm == 'both':
degs = tf.clip_by_value(tf.cast(graph.out_degrees(), tf.float32),
clip_value_min=1,
......
......@@ -100,8 +100,7 @@ class SAGEConv(layers.Layer):
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
graph = graph.local_var()
with graph.local_scope():
if isinstance(feat, tuple):
feat_src = self.feat_drop(feat[0])
feat_dst = self.feat_drop(feat[1])
......
......@@ -72,7 +72,7 @@ class SGConv(layers.Layer):
If ``cache`` is se to True, ``feat`` and ``graph`` should not change during
training, or you will get wrong results.
"""
graph = graph.local_var()
with graph.local_scope():
if self._cached_h is not None:
feat = self._cached_h
else:
......
......@@ -12,23 +12,23 @@ def edge_softmax_real(graph, score, eids=ALL):
"""Edge Softmax function"""
if not is_all(eids):
graph = graph.edge_subgraph(tf.cast(eids, tf.int64))
g = graph.local_var()
g.edata['s'] = score
g.update_all(fn.copy_e('s', 'm'), fn.max('m', 'smax'))
g.apply_edges(fn.e_sub_v('s', 'smax', 'out'))
g.edata['out'] = tf.math.exp(g.edata['out'])
g.update_all(fn.copy_e('out', 'm'), fn.sum('m', 'out_sum'))
g.apply_edges(fn.e_div_v('out', 'out_sum', 'out'))
out = g.edata['out']
with graph.local_scope():
graph.edata['s'] = score
graph.update_all(fn.copy_e('s', 'm'), fn.max('m', 'smax'))
graph.apply_edges(fn.e_sub_v('s', 'smax', 'out'))
graph.edata['out'] = tf.math.exp(graph.edata['out'])
graph.update_all(fn.copy_e('out', 'm'), fn.sum('m', 'out_sum'))
graph.apply_edges(fn.e_div_v('out', 'out_sum', 'out'))
out = graph.edata['out']
def edge_softmax_backward(grad_out):
g = graph.local_var()
with graph.local_scope():
# clear backward cache explicitly
g.edata['out'] = out
g.edata['grad_s'] = out * grad_out
g.update_all(fn.copy_e('grad_s', 'm'), fn.sum('m', 'accum'))
g.apply_edges(fn.e_mul_v('out', 'accum', 'out'))
grad_score = g.edata['grad_s'] - g.edata['out']
graph.edata['out'] = out
graph.edata['grad_s'] = out * grad_out
graph.update_all(fn.copy_e('grad_s', 'm'), fn.sum('m', 'accum'))
graph.apply_edges(fn.e_mul_v('out', 'accum', 'out'))
grad_score = graph.edata['grad_s'] - graph.edata['out']
return grad_score
return out, edge_softmax_backward
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment