Unverified Commit 6d9433b0 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Transform] [Doc] Rename transform to transforms and update doc (#3765)

* Update

* Update

* Update

* Fix

* Update

* Update

* Update

* Fix
parent ccaa0bf2
"""Modules that transforms between graphs and between graph and tensors.""" """Modules that transforms between graphs and between graph and tensors."""
import torch.nn as nn import torch.nn as nn
from ...transform import knn_graph, segmented_knn_graph from ...transforms import knn_graph, segmented_knn_graph
def pairwise_squared_distance(x): def pairwise_squared_distance(x):
''' '''
...@@ -12,11 +12,7 @@ def pairwise_squared_distance(x): ...@@ -12,11 +12,7 @@ def pairwise_squared_distance(x):
class KNNGraph(nn.Module): class KNNGraph(nn.Module):
r""" r"""Layer that transforms one point set into a graph, or a batch of
Description
-----------
Layer that transforms one point set into a graph, or a batch of
point sets with the same number of points into a union of those graphs. point sets with the same number of points into a union of those graphs.
The KNNGraph is implemented in the following steps: The KNNGraph is implemented in the following steps:
...@@ -127,11 +123,7 @@ class KNNGraph(nn.Module): ...@@ -127,11 +123,7 @@ class KNNGraph(nn.Module):
class SegmentedKNNGraph(nn.Module): class SegmentedKNNGraph(nn.Module):
r""" r"""Layer that transforms one point set into a graph, or a batch of
Description
-----------
Layer that transforms one point set into a graph, or a batch of
point sets with different number of points into a union of those graphs. point sets with different number of points into a union of those graphs.
If a batch of point sets is provided, then the point :math:`j` in the point If a batch of point sets is provided, then the point :math:`j` in the point
......
...@@ -15,11 +15,7 @@ __all__ = ['SumPooling', 'AvgPooling', 'MaxPooling', 'SortPooling', ...@@ -15,11 +15,7 @@ __all__ = ['SumPooling', 'AvgPooling', 'MaxPooling', 'SortPooling',
'SetTransformerEncoder', 'SetTransformerDecoder', 'WeightAndSum'] 'SetTransformerEncoder', 'SetTransformerDecoder', 'WeightAndSum']
class SumPooling(nn.Module): class SumPooling(nn.Module):
r""" r"""Apply sum pooling over the nodes in a graph.
Description
-----------
Apply sum pooling over the nodes in a graph .
.. math:: .. math::
r^{(i)} = \sum_{k=1}^{N_i} x^{(i)}_k r^{(i)} = \sum_{k=1}^{N_i} x^{(i)}_k
...@@ -100,11 +96,7 @@ class SumPooling(nn.Module): ...@@ -100,11 +96,7 @@ class SumPooling(nn.Module):
class AvgPooling(nn.Module): class AvgPooling(nn.Module):
r""" r"""Apply average pooling over the nodes in a graph.
Description
-----------
Apply average pooling over the nodes in a graph.
.. math:: .. math::
r^{(i)} = \frac{1}{N_i}\sum_{k=1}^{N_i} x^{(i)}_k r^{(i)} = \frac{1}{N_i}\sum_{k=1}^{N_i} x^{(i)}_k
...@@ -185,11 +177,7 @@ class AvgPooling(nn.Module): ...@@ -185,11 +177,7 @@ class AvgPooling(nn.Module):
class MaxPooling(nn.Module): class MaxPooling(nn.Module):
r""" r"""Apply max pooling over the nodes in a graph.
Description
-----------
Apply max pooling over the nodes in a graph.
.. math:: .. math::
r^{(i)} = \max_{k=1}^{N_i}\left( x^{(i)}_k \right) r^{(i)} = \max_{k=1}^{N_i}\left( x^{(i)}_k \right)
...@@ -268,13 +256,10 @@ class MaxPooling(nn.Module): ...@@ -268,13 +256,10 @@ class MaxPooling(nn.Module):
class SortPooling(nn.Module): class SortPooling(nn.Module):
r""" r"""Sort Pooling from `An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__
Description It first sorts the node features in ascending order along the feature dimension,
-----------
Apply Sort Pooling (`An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__) over the nodes in a graph.
Sort Pooling first sorts the node features in ascending order along the feature dimension,
and selects the sorted features of top-k nodes (ranked by the largest value of each node). and selects the sorted features of top-k nodes (ranked by the largest value of each node).
Parameters Parameters
...@@ -345,7 +330,7 @@ class SortPooling(nn.Module): ...@@ -345,7 +330,7 @@ class SortPooling(nn.Module):
graph : DGLGraph graph : DGLGraph
A DGLGraph or a batch of DGLGraphs. A DGLGraph or a batch of DGLGraphs.
feat : torch.Tensor feat : torch.Tensor
The input feature with shape :math:`(N, D)`, where :math:`N` is the The input node feature with shape :math:`(N, D)`, where :math:`N` is the
number of nodes in the graph, and :math:`D` means the size of features. number of nodes in the graph, and :math:`D` means the size of features.
Returns Returns
...@@ -365,12 +350,8 @@ class SortPooling(nn.Module): ...@@ -365,12 +350,8 @@ class SortPooling(nn.Module):
class GlobalAttentionPooling(nn.Module): class GlobalAttentionPooling(nn.Module):
r""" r"""Global Attention Pooling from `Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493>`__
Description
-----------
Apply Global Attention Pooling (`Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__) over the nodes in a graph.
.. math:: .. math::
r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate} r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate}
...@@ -448,7 +429,7 @@ class GlobalAttentionPooling(nn.Module): ...@@ -448,7 +429,7 @@ class GlobalAttentionPooling(nn.Module):
graph : DGLGraph graph : DGLGraph
A DGLGraph or a batch of DGLGraphs. A DGLGraph or a batch of DGLGraphs.
feat : torch.Tensor feat : torch.Tensor
The input feature with shape :math:`(N, D)` where :math:`N` is the The input node feature with shape :math:`(N, D)` where :math:`N` is the
number of nodes in the graph, and :math:`D` means the size of features. number of nodes in the graph, and :math:`D` means the size of features.
Returns Returns
...@@ -474,10 +455,9 @@ class GlobalAttentionPooling(nn.Module): ...@@ -474,10 +455,9 @@ class GlobalAttentionPooling(nn.Module):
class Set2Set(nn.Module): class Set2Set(nn.Module):
r""" r"""Set2Set operator from `Order Matters: Sequence to sequence for sets
<https://arxiv.org/pdf/1511.06391.pdf>`__
Description
-----------
For each individual graph in the batch, set2set computes For each individual graph in the batch, set2set computes
.. math:: .. math::
...@@ -641,7 +621,7 @@ def _gen_mask(lengths_x, lengths_y, max_len_x, max_len_y): ...@@ -641,7 +621,7 @@ def _gen_mask(lengths_x, lengths_y, max_len_x, max_len_y):
class MultiHeadAttention(nn.Module): class MultiHeadAttention(nn.Module):
r"""Multi-Head Attention block, used in Transformer, Set Transformer and so on. r"""Multi-Head Attention block, used in Transformer, Set Transformer and so on
Parameters Parameters
---------- ----------
...@@ -754,7 +734,8 @@ class MultiHeadAttention(nn.Module): ...@@ -754,7 +734,8 @@ class MultiHeadAttention(nn.Module):
class SetAttentionBlock(nn.Module): class SetAttentionBlock(nn.Module):
r"""SAB block introduced in Set-Transformer paper. r"""SAB block from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/abs/1810.00825>`__
Parameters Parameters
---------- ----------
...@@ -795,7 +776,8 @@ class SetAttentionBlock(nn.Module): ...@@ -795,7 +776,8 @@ class SetAttentionBlock(nn.Module):
class InducedSetAttentionBlock(nn.Module): class InducedSetAttentionBlock(nn.Module):
r"""ISAB block introduced in Set-Transformer paper. r"""ISAB block from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/abs/1810.00825>`__
Parameters Parameters
---------- ----------
...@@ -867,7 +849,8 @@ class InducedSetAttentionBlock(nn.Module): ...@@ -867,7 +849,8 @@ class InducedSetAttentionBlock(nn.Module):
class PMALayer(nn.Module): class PMALayer(nn.Module):
r"""Pooling by Multihead Attention, used as the Decoder Module in Set Transformer. r"""Pooling by Multihead Attention from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/abs/1810.00825>`__
Parameters Parameters
---------- ----------
...@@ -943,12 +926,8 @@ class PMALayer(nn.Module): ...@@ -943,12 +926,8 @@ class PMALayer(nn.Module):
class SetTransformerEncoder(nn.Module): class SetTransformerEncoder(nn.Module):
r""" r"""The Encoder module from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__
Description
-----------
The Encoder module in `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__.
Parameters Parameters
---------- ----------
...@@ -1079,12 +1058,8 @@ class SetTransformerEncoder(nn.Module): ...@@ -1079,12 +1058,8 @@ class SetTransformerEncoder(nn.Module):
class SetTransformerDecoder(nn.Module): class SetTransformerDecoder(nn.Module):
r""" r"""The Decoder module from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__
Description
-----------
The Decoder module in `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__.
Parameters Parameters
---------- ----------
......
...@@ -5,12 +5,9 @@ import torch.nn as nn ...@@ -5,12 +5,9 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
class EdgePredictor(nn.Module): class EdgePredictor(nn.Module):
r""" r"""Predictor/score function for pairs of node representations
Description Given a pair of node representations, :math:`h_i` and :math:`h_j`, it combines them with
-----------
Predictor/score function for pairs of node representations. Given a pair of node
representations, :math:`h_i` and :math:`h_j`, it combines them with
**dot product** **dot product**
......
...@@ -4,12 +4,9 @@ import torch ...@@ -4,12 +4,9 @@ import torch
import torch.nn as nn import torch.nn as nn
class TransE(nn.Module): class TransE(nn.Module):
r""" r"""Similarity measure from `Translating Embeddings for Modeling Multi-relational Data
<https://papers.nips.cc/paper/2013/hash/1cecc7a77928ca8133fa24680a88d2f9-Abstract.html>`__
Description
-----------
Similarity measure introduced in `Translating Embeddings for Modeling Multi-relational Data
<https://papers.nips.cc/paper/2013/hash/1cecc7a77928ca8133fa24680a88d2f9-Abstract.html>`__.
Mathematically, it is defined as follows: Mathematically, it is defined as follows:
.. math:: .. math::
......
...@@ -4,14 +4,11 @@ import torch ...@@ -4,14 +4,11 @@ import torch
import torch.nn as nn import torch.nn as nn
class TransR(nn.Module): class TransR(nn.Module):
r""" r"""Similarity measure from
Description
-----------
Similarity measure introduced in
`Learning entity and relation embeddings for knowledge graph completion `Learning entity and relation embeddings for knowledge graph completion
<https://ojs.aaai.org/index.php/AAAI/article/view/9491>`__. Mathematically, <https://ojs.aaai.org/index.php/AAAI/article/view/9491>`__
it is defined as follows:
Mathematically, it is defined as follows:
.. math:: .. math::
......
...@@ -104,11 +104,7 @@ class Identity(nn.Module): ...@@ -104,11 +104,7 @@ class Identity(nn.Module):
return x return x
class Sequential(nn.Sequential): class Sequential(nn.Sequential):
r""" r"""A sequential container for stacking graph neural network modules
Description
-----------
A sequential container for stacking graph neural network modules.
DGL supports two modes: sequentially apply GNN modules on 1) the same graph or DGL supports two modes: sequentially apply GNN modules on 1) the same graph or
2) a list of given graphs. In the second case, the number of graphs equals the 2) a list of given graphs. In the second case, the number of graphs equals the
...@@ -227,11 +223,10 @@ class Sequential(nn.Sequential): ...@@ -227,11 +223,10 @@ class Sequential(nn.Sequential):
return feats return feats
class WeightBasis(nn.Module): class WeightBasis(nn.Module):
r"""Basis decomposition module. r"""Basis decomposition from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Basis decomposition is introduced in "`Modeling Relational Data with Graph It can be described as below:
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described as below:
.. math:: .. math::
...@@ -284,13 +279,10 @@ class WeightBasis(nn.Module): ...@@ -284,13 +279,10 @@ class WeightBasis(nn.Module):
return weight.view(self.num_outputs, *self.shape) return weight.view(self.num_outputs, *self.shape)
class JumpingKnowledge(nn.Module): class JumpingKnowledge(nn.Module):
r""" r"""The Jumping Knowledge aggregation module from `Representation Learning on
Graphs with Jumping Knowledge Networks <https://arxiv.org/abs/1806.03536>`__
Description It aggregates the output representations of multiple GNN layers with
-----------
The Jumping Knowledge aggregation module introduced in `Representation Learning on
Graphs with Jumping Knowledge Networks <https://arxiv.org/abs/1806.03536>`__. It
aggregates the output representations of multiple GNN layers with
**concatenation** **concatenation**
......
...@@ -9,8 +9,8 @@ from .... import function as fn ...@@ -9,8 +9,8 @@ from .... import function as fn
class APPNPConv(layers.Layer): class APPNPConv(layers.Layer):
r"""Approximate Personalized Propagation of Neural Predictions r"""Approximate Personalized Propagation of Neural Predictions
layer from paper `Predict then Propagate: Graph Neural Networks layer from `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__. meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__
.. math:: .. math::
H^{0} & = X H^{0} & = X
......
...@@ -9,13 +9,9 @@ from .... import broadcast_nodes, function as fn ...@@ -9,13 +9,9 @@ from .... import broadcast_nodes, function as fn
class ChebConv(layers.Layer): class ChebConv(layers.Layer):
r""" r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__. <https://arxiv.org/pdf/1606.09375.pdf>`__
.. math:: .. math::
h_i^{l+1} &= \sum_{k=0}^{K-1} W^{k, l}z_i^{k, l} h_i^{l+1} &= \sum_{k=0}^{K-1} W^{k, l}z_i^{k, l}
...@@ -30,7 +26,6 @@ class ChebConv(layers.Layer): ...@@ -30,7 +26,6 @@ class ChebConv(layers.Layer):
where :math:`\tilde{A}` is :math:`A` + :math:`I`, :math:`W` is learnable weight. where :math:`\tilde{A}` is :math:`A` + :math:`I`, :math:`W` is learnable weight.
Parameters Parameters
---------- ----------
in_feats: int in_feats: int
...@@ -79,11 +74,7 @@ class ChebConv(layers.Layer): ...@@ -79,11 +74,7 @@ class ChebConv(layers.Layer):
self.linear = layers.Dense(out_feats, use_bias=bias) self.linear = layers.Dense(out_feats, use_bias=bias)
def call(self, graph, feat, lambda_max=None): def call(self, graph, feat, lambda_max=None):
r""" r"""Compute ChebNet layer.
Description
-----------
Compute ChebNet layer.
Parameters Parameters
---------- ----------
......
...@@ -6,13 +6,9 @@ import numpy as np ...@@ -6,13 +6,9 @@ import numpy as np
class DenseChebConv(layers.Layer): class DenseChebConv(layers.Layer):
r""" r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__. <https://arxiv.org/pdf/1606.09375.pdf>`__
We recommend to use this module when applying ChebConv on dense graphs. We recommend to use this module when applying ChebConv on dense graphs.
...@@ -57,11 +53,7 @@ class DenseChebConv(layers.Layer): ...@@ -57,11 +53,7 @@ class DenseChebConv(layers.Layer):
self.bias = None self.bias = None
def call(self, adj, feat, lambda_max=None): def call(self, adj, feat, lambda_max=None):
r""" r"""Compute (Dense) Chebyshev Spectral Graph Convolution layer.
Description
-----------
Compute (Dense) Chebyshev Spectral Graph Convolution layer.
Parameters Parameters
---------- ----------
......
...@@ -9,21 +9,25 @@ from ....utils import expand_as_pair ...@@ -9,21 +9,25 @@ from ....utils import expand_as_pair
class EdgeConv(layers.Layer): class EdgeConv(layers.Layer):
r""" r"""EdgeConv layer from `Dynamic Graph CNN for Learning on Point Clouds
Description <https://arxiv.org/pdf/1801.07829>`__
-----------
EdgeConv layer. It can be described as follows:
Introduced in "`Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__". Can be described as follows:
.. math:: .. math::
h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} ( h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} (
\Theta \cdot (h_j^{(l)} - h_i^{(l)}) + \Phi \cdot h_i^{(l)}) \Theta \cdot (h_j^{(l)} - h_i^{(l)}) + \Phi \cdot h_i^{(l)})
where :math:`\mathcal{N}(i)` is the neighbor of :math:`i`.
where :math:`\mathcal{N}(i)` is the neighbor of :math:`i`,
:math:`\Theta` and :math:`\Phi` are linear layers. :math:`\Theta` and :math:`\Phi` are linear layers.
.. note:: .. note::
The original formulation includes a ReLU inside the maximum operator. The original formulation includes a ReLU inside the maximum operator.
This is equivalent to first applying a maximum operator then applying This is equivalent to first applying a maximum operator then applying
the ReLU. the ReLU.
Parameters Parameters
---------- ----------
in_feat : int in_feat : int
...@@ -38,14 +42,18 @@ class EdgeConv(layers.Layer): ...@@ -38,14 +42,18 @@ class EdgeConv(layers.Layer):
causing silent performance regression. This module will raise a DGLError if it detects causing silent performance regression. This module will raise a DGLError if it detects
0-in-degree nodes in input graph. By setting ``True``, it will suppress the check 0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
and let the users handle it by themselves. Default: ``False``. and let the users handle it by themselves. Default: ``False``.
Note Note
---- ----
Zero in-degree nodes will lead to invalid output value. This is because no message Zero in-degree nodes will lead to invalid output value. This is because no message
will be passed to those nodes, the aggregation function will be appied on empty input. will be passed to those nodes, the aggregation function will be appied on empty input.
A common practice to avoid this is to add a self-loop for each node in the graph if A common practice to avoid this is to add a self-loop for each node in the graph if
it is homogeneous, which can be achieved by: it is homogeneous, which can be achieved by:
>>> g = ... # a DGLGraph >>> g = ... # a DGLGraph
>>> g = dgl.add_self_loop(g) >>> g = dgl.add_self_loop(g)
Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree`` since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually. to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.
...@@ -66,10 +74,8 @@ class EdgeConv(layers.Layer): ...@@ -66,10 +74,8 @@ class EdgeConv(layers.Layer):
self.bn = layers.BatchNormalization() self.bn = layers.BatchNormalization()
def set_allow_zero_in_degree(self, set_value): def set_allow_zero_in_degree(self, set_value):
r""" r"""Set allow_zero_in_degree flag.
Description
-----------
Set allow_zero_in_degree flag.
Parameters Parameters
---------- ----------
set_value : bool set_value : bool
...@@ -78,10 +84,8 @@ class EdgeConv(layers.Layer): ...@@ -78,10 +84,8 @@ class EdgeConv(layers.Layer):
self._allow_zero_in_degree = set_value self._allow_zero_in_degree = set_value
def call(self, g, feat): def call(self, g, feat):
""" """Forward computation
Description
-----------
Forward computation
Parameters Parameters
---------- ----------
g : DGLGraph g : DGLGraph
...@@ -92,10 +96,12 @@ class EdgeConv(layers.Layer): ...@@ -92,10 +96,12 @@ class EdgeConv(layers.Layer):
If a pair of tensors is given, the graph must be a uni-bipartite graph If a pair of tensors is given, the graph must be a uni-bipartite graph
with only one edge type, and the two tensors must have the same with only one edge type, and the two tensors must have the same
dimensionality on all except the first axis. dimensionality on all except the first axis.
Returns Returns
------- -------
tf.Tensor or pair of tf.Tensor tf.Tensor or pair of tf.Tensor
New node features. New node features.
Raises Raises
------ ------
DGLError DGLError
......
...@@ -13,12 +13,8 @@ from ..utils import Identity ...@@ -13,12 +13,8 @@ from ..utils import Identity
class GATConv(layers.Layer): class GATConv(layers.Layer):
r""" r"""Graph Attention Layer from `Graph Attention Network
<https://arxiv.org/pdf/1710.10903.pdf>`__
Description
-----------
Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
.. math:: .. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)} h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
...@@ -182,11 +178,7 @@ class GATConv(layers.Layer): ...@@ -182,11 +178,7 @@ class GATConv(layers.Layer):
self.activation = activation self.activation = activation
def set_allow_zero_in_degree(self, set_value): def set_allow_zero_in_degree(self, set_value):
r""" r"""Set allow_zero_in_degree flag.
Description
-----------
Set allow_zero_in_degree flag.
Parameters Parameters
---------- ----------
...@@ -196,11 +188,7 @@ class GATConv(layers.Layer): ...@@ -196,11 +188,7 @@ class GATConv(layers.Layer):
self._allow_zero_in_degree = set_value self._allow_zero_in_degree = set_value
def call(self, graph, feat, get_attention=False): def call(self, graph, feat, get_attention=False):
r""" r"""Compute graph attention network layer.
Description
-----------
Compute graph attention network layer.
Parameters Parameters
---------- ----------
......
...@@ -8,12 +8,8 @@ from ....utils import expand_as_pair ...@@ -8,12 +8,8 @@ from ....utils import expand_as_pair
class GINConv(layers.Layer): class GINConv(layers.Layer):
r""" r"""Graph Isomorphism Network layer from `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__
Description
-----------
Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math:: .. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} + h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
...@@ -80,11 +76,7 @@ class GINConv(layers.Layer): ...@@ -80,11 +76,7 @@ class GINConv(layers.Layer):
self.eps = tf.Variable(initial_value=[init_eps], dtype=tf.float32, trainable=learn_eps) self.eps = tf.Variable(initial_value=[init_eps], dtype=tf.float32, trainable=learn_eps)
def call(self, graph, feat): def call(self, graph, feat):
r""" r"""Compute Graph Isomorphism Network layer.
Description
-----------
Compute Graph Isomorphism Network layer.
Parameters Parameters
---------- ----------
......
...@@ -12,12 +12,10 @@ from ....utils import expand_as_pair ...@@ -12,12 +12,10 @@ from ....utils import expand_as_pair
class GraphConv(layers.Layer): class GraphConv(layers.Layer):
r""" r"""Graph convolution from `Semi-Supervised Classification with Graph Convolutional Networks
<https://arxiv.org/abs/1609.02907>`__
Description Mathematically it is defined as follows:
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
.. math:: .. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)}) h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})
...@@ -170,11 +168,7 @@ class GraphConv(layers.Layer): ...@@ -170,11 +168,7 @@ class GraphConv(layers.Layer):
self._activation = activation self._activation = activation
def set_allow_zero_in_degree(self, set_value): def set_allow_zero_in_degree(self, set_value):
r""" r"""Set allow_zero_in_degree flag.
Description
-----------
Set allow_zero_in_degree flag.
Parameters Parameters
---------- ----------
...@@ -184,11 +178,7 @@ class GraphConv(layers.Layer): ...@@ -184,11 +178,7 @@ class GraphConv(layers.Layer):
self._allow_zero_in_degree = set_value self._allow_zero_in_degree = set_value
def call(self, graph, feat, weight=None): def call(self, graph, feat, weight=None):
r""" r"""Compute graph convolution.
Description
-----------
Compute graph convolution.
Parameters Parameters
---------- ----------
......
...@@ -8,15 +8,10 @@ from .. import utils ...@@ -8,15 +8,10 @@ from .. import utils
class RelGraphConv(layers.Layer): class RelGraphConv(layers.Layer):
r""" r"""Relational graph convolution layer from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Description It can be described as below:
-----------
Relational graph convolution layer.
Relational graph convolution is introduced in "`Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described as below:
.. math:: .. math::
...@@ -248,7 +243,7 @@ class RelGraphConv(layers.Layer): ...@@ -248,7 +243,7 @@ class RelGraphConv(layers.Layer):
return {'msg': msg} return {'msg': msg}
def call(self, g, x, etypes, norm=None): def call(self, g, x, etypes, norm=None):
""" Forward computation """Forward computation
Parameters Parameters
---------- ----------
......
...@@ -9,12 +9,8 @@ from ....utils import expand_as_pair, check_eq_shape ...@@ -9,12 +9,8 @@ from ....utils import expand_as_pair, check_eq_shape
class SAGEConv(layers.Layer): class SAGEConv(layers.Layer):
r""" r"""GraphSAGE layer from `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__
Description
-----------
GraphSAGE layer from paper `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__.
.. math:: .. math::
h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate} h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate}
...@@ -133,11 +129,7 @@ class SAGEConv(layers.Layer): ...@@ -133,11 +129,7 @@ class SAGEConv(layers.Layer):
return {'neigh': rst} return {'neigh': rst}
def call(self, graph, feat): def call(self, graph, feat):
r""" r"""Compute GraphSAGE layer.
Description
-----------
Compute GraphSAGE layer.
Parameters Parameters
---------- ----------
......
...@@ -9,12 +9,8 @@ from ....base import DGLError ...@@ -9,12 +9,8 @@ from ....base import DGLError
class SGConv(layers.Layer): class SGConv(layers.Layer):
r""" r"""SGC layer from `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__
Description
-----------
Simplifying Graph Convolution layer from paper `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__.
.. math:: .. math::
H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta
...@@ -104,11 +100,7 @@ class SGConv(layers.Layer): ...@@ -104,11 +100,7 @@ class SGConv(layers.Layer):
self._allow_zero_in_degree = allow_zero_in_degree self._allow_zero_in_degree = allow_zero_in_degree
def set_allow_zero_in_degree(self, set_value): def set_allow_zero_in_degree(self, set_value):
r""" r"""Set allow_zero_in_degree flag.
Description
-----------
Set allow_zero_in_degree flag.
Parameters Parameters
---------- ----------
...@@ -118,11 +110,7 @@ class SGConv(layers.Layer): ...@@ -118,11 +110,7 @@ class SGConv(layers.Layer):
self._allow_zero_in_degree = set_value self._allow_zero_in_degree = set_value
def call(self, graph, feat): def call(self, graph, feat):
r""" r"""Compute Simplifying Graph Convolution layer.
Description
-----------
Compute Simplifying Graph Convolution layer.
Parameters Parameters
---------- ----------
......
...@@ -113,8 +113,8 @@ class MaxPooling(layers.Layer): ...@@ -113,8 +113,8 @@ class MaxPooling(layers.Layer):
class SortPooling(layers.Layer): class SortPooling(layers.Layer):
r"""Apply Sort Pooling (`An End-to-End Deep Learning Architecture for Graph Classification r"""Sort Pooling from `An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__) over the nodes in the graph. <https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__
Parameters Parameters
---------- ----------
...@@ -134,7 +134,7 @@ class SortPooling(layers.Layer): ...@@ -134,7 +134,7 @@ class SortPooling(layers.Layer):
graph : DGLGraph graph : DGLGraph
The graph. The graph.
feat : tf.Tensor feat : tf.Tensor
The input feature with shape :math:`(N, D)` where The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph. :math:`N` is the number of nodes in the graph.
Returns Returns
...@@ -154,8 +154,8 @@ class SortPooling(layers.Layer): ...@@ -154,8 +154,8 @@ class SortPooling(layers.Layer):
class GlobalAttentionPooling(layers.Layer): class GlobalAttentionPooling(layers.Layer):
r"""Apply Global Attention Pooling (`Gated Graph Sequence Neural Networks r"""Global Attention Pooling from `Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__) over the nodes in the graph. <https://arxiv.org/abs/1511.05493.pdf>`__
.. math:: .. math::
r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate} r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate}
...@@ -183,7 +183,7 @@ class GlobalAttentionPooling(layers.Layer): ...@@ -183,7 +183,7 @@ class GlobalAttentionPooling(layers.Layer):
graph : DGLGraph graph : DGLGraph
The graph. The graph.
feat : tf.Tensor feat : tf.Tensor
The input feature with shape :math:`(N, D)` where The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph. :math:`N` is the number of nodes in the graph.
Returns Returns
......
...@@ -496,7 +496,7 @@ def sample_neighbors_biased(g, nodes, fanout, bias, edge_dir='in', ...@@ -496,7 +496,7 @@ def sample_neighbors_biased(g, nodes, fanout, bias, edge_dir='in',
Sort the graph (necessary!) Sort the graph (necessary!)
>>> g_sorted = dgl.transform.sort_csr_by_tag(g, tag) >>> g_sorted = dgl.transforms.sort_csr_by_tag(g, tag)
>>> g_sorted.ndata['_TAG_OFFSET'] >>> g_sorted.ndata['_TAG_OFFSET']
tensor([[0, 1, 2], tensor([[0, 1, 2],
[0, 2, 2], [0, 2, 2],
......
...@@ -5,7 +5,7 @@ import numpy as np ...@@ -5,7 +5,7 @@ import numpy as np
from ..utils import recursive_apply, recursive_apply_pair from ..utils import recursive_apply, recursive_apply_pair
from ..base import EID from ..base import EID
from .. import backend as F from .. import backend as F
from .. import transform, utils from .. import transforms, utils
def _locate_eids_to_exclude(frontier_parent_eids, exclude_eids): def _locate_eids_to_exclude(frontier_parent_eids, exclude_eids):
"""Find the edges whose IDs in parent graph appeared in exclude_eids. """Find the edges whose IDs in parent graph appeared in exclude_eids.
...@@ -63,7 +63,7 @@ class EidExcluder(object): ...@@ -63,7 +63,7 @@ class EidExcluder(object):
# to the mapping from the new graph to the old frontier. # to the mapping from the new graph to the old frontier.
# So we need to test if located_eids is empty, and do the remapping ourselves. # So we need to test if located_eids is empty, and do the remapping ourselves.
if len(located_eids) > 0: if len(located_eids) > 0:
frontier = transform.remove_edges( frontier = transforms.remove_edges(
frontier, located_eids, store_ids=True) frontier, located_eids, store_ids=True)
frontier.edata[EID] = F.gather_row(parent_eids, frontier.edata[EID]) frontier.edata[EID] = F.gather_row(parent_eids, frontier.edata[EID])
else: else:
...@@ -72,7 +72,7 @@ class EidExcluder(object): ...@@ -72,7 +72,7 @@ class EidExcluder(object):
new_eids = parent_eids.copy() new_eids = parent_eids.copy()
for k, v in located_eids.items(): for k, v in located_eids.items():
if len(v) > 0: if len(v) > 0:
frontier = transform.remove_edges( frontier = transforms.remove_edges(
frontier, v, etype=k, store_ids=True) frontier, v, etype=k, store_ids=True)
new_eids[k] = F.gather_row(parent_eids[k], frontier.edges[k].data[EID]) new_eids[k] = F.gather_row(parent_eids[k], frontier.edges[k].data[EID])
frontier.edata[EID] = new_eids frontier.edata[EID] = new_eids
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment