Unverified Commit 6d9433b0 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Transform] [Doc] Rename transform to transforms and update doc (#3765)

* Update

* Update

* Update

* Fix

* Update

* Update

* Update

* Fix
parent ccaa0bf2
...@@ -9,14 +9,10 @@ from ....utils import expand_as_pair ...@@ -9,14 +9,10 @@ from ....utils import expand_as_pair
class EdgeConv(nn.Block): class EdgeConv(nn.Block):
r""" r"""EdgeConv layer from `Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__
Description It can be described as follows:
-----------
EdgeConv layer.
Introduced in "`Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__". Can be described as follows:
.. math:: .. math::
h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} ( h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} (
......
...@@ -12,12 +12,8 @@ from ....utils import expand_as_pair ...@@ -12,12 +12,8 @@ from ....utils import expand_as_pair
#pylint: enable=W0235 #pylint: enable=W0235
class GATConv(nn.Block): class GATConv(nn.Block):
r""" r"""Graph attention layer from `Graph Attention Network
<https://arxiv.org/pdf/1710.10903.pdf>`__
Description
-----------
Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
.. math:: .. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)} h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
......
...@@ -7,12 +7,8 @@ from mxnet.gluon import nn ...@@ -7,12 +7,8 @@ from mxnet.gluon import nn
from .... import function as fn from .... import function as fn
class GatedGraphConv(nn.Block): class GatedGraphConv(nn.Block):
r""" r"""Gated Graph Convolution layer from `Gated Graph Sequence
Neural Networks <https://arxiv.org/pdf/1511.05493.pdf>`__
Description
-----------
Gated Graph Convolution layer from paper `Gated Graph Sequence
Neural Networks <https://arxiv.org/pdf/1511.05493.pdf>`__.
.. math:: .. math::
h_{i}^{0} &= [ x_i \| \mathbf{0} ] h_{i}^{0} &= [ x_i \| \mathbf{0} ]
......
...@@ -8,12 +8,8 @@ from ....utils import expand_as_pair ...@@ -8,12 +8,8 @@ from ....utils import expand_as_pair
class GINConv(nn.Block): class GINConv(nn.Block):
r""" r"""Graph Isomorphism layer from `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__
Description
-----------
Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math:: .. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} + h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
......
...@@ -12,13 +12,8 @@ from ....utils import expand_as_pair ...@@ -12,13 +12,8 @@ from ....utils import expand_as_pair
class GMMConv(nn.Block): class GMMConv(nn.Block):
r""" r"""Gaussian Mixture Model Convolution layer from `Geometric Deep Learning on Graphs and
Manifolds using Mixture Model CNNs <https://arxiv.org/abs/1611.08402>`__
Description
-----------
The Gaussian Mixture Model Convolution layer from `Geometric Deep
Learning on Graphs and Manifolds using Mixture Model CNNs
<http://openaccess.thecvf.com/content_cvpr_2017/papers/Monti_Geometric_Deep_Learning_CVPR_2017_paper.pdf>`__.
.. math:: .. math::
u_{ij} &= f(x_i, x_j), x_j \in \mathcal{N}(i) u_{ij} &= f(x_i, x_j), x_j \in \mathcal{N}(i)
......
...@@ -10,12 +10,10 @@ from ....base import DGLError ...@@ -10,12 +10,10 @@ from ....base import DGLError
from ....utils import expand_as_pair from ....utils import expand_as_pair
class GraphConv(gluon.Block): class GraphConv(gluon.Block):
r""" r"""Graph convolutional layer from `Semi-Supervised Classification with Graph Convolutional
Networks <https://arxiv.org/abs/1609.02907>`__
Description Mathematically it is defined as follows:
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
.. math:: .. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)}) h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})
......
...@@ -9,12 +9,8 @@ from ....utils import expand_as_pair ...@@ -9,12 +9,8 @@ from ....utils import expand_as_pair
class NNConv(nn.Block): class NNConv(nn.Block):
r""" r"""Graph Convolution layer from `Neural Message Passing
for Quantum Chemistry <https://arxiv.org/pdf/1704.01212.pdf>`__
Description
-----------
Graph Convolution layer introduced in `Neural Message Passing
for Quantum Chemistry <https://arxiv.org/pdf/1704.01212.pdf>`__.
.. math:: .. math::
h_{i}^{l+1} = h_{i}^{l} + \mathrm{aggregate}\left(\left\{ h_{i}^{l+1} = h_{i}^{l} + \mathrm{aggregate}\left(\left\{
......
...@@ -11,15 +11,10 @@ from .. import utils ...@@ -11,15 +11,10 @@ from .. import utils
class RelGraphConv(gluon.Block): class RelGraphConv(gluon.Block):
r""" r"""Relational graph convolution layer from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Description It can be described as below:
-----------
Relational graph convolution layer.
Relational graph convolution is introduced in "`Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described as below:
.. math:: .. math::
......
...@@ -10,12 +10,8 @@ from ....base import DGLError ...@@ -10,12 +10,8 @@ from ....base import DGLError
from ....utils import expand_as_pair, check_eq_shape from ....utils import expand_as_pair, check_eq_shape
class SAGEConv(nn.Block): class SAGEConv(nn.Block):
r""" r"""GraphSAGE layer from `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__
Description
-----------
GraphSAGE layer from paper `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__.
.. math:: .. math::
h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate} h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate}
......
...@@ -10,12 +10,8 @@ from ....base import DGLError ...@@ -10,12 +10,8 @@ from ....base import DGLError
class SGConv(nn.Block): class SGConv(nn.Block):
r""" r"""SGC layer from `Simplifying Graph Convolutional Networks
<https://arxiv.org/pdf/1902.07153.pdf>`__
Description
-----------
Simplifying Graph Convolution layer from paper `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__.
.. math:: .. math::
H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta
......
...@@ -9,11 +9,7 @@ from .... import function as fn ...@@ -9,11 +9,7 @@ from .... import function as fn
class TAGConv(gluon.Block): class TAGConv(gluon.Block):
r""" r"""Topology Adaptive Graph Convolutional layer from `Topology
Description
-----------
Topology Adaptive Graph Convolutional layer from paper `Topology
Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__. Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__.
.. math:: .. math::
......
...@@ -118,8 +118,8 @@ class MaxPooling(nn.Block): ...@@ -118,8 +118,8 @@ class MaxPooling(nn.Block):
class SortPooling(nn.Block): class SortPooling(nn.Block):
r"""Apply Sort Pooling (`An End-to-End Deep Learning Architecture for Graph Classification r"""Pooling layer from `An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__) over the nodes in the graph. <https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__
Parameters Parameters
---------- ----------
...@@ -138,7 +138,7 @@ class SortPooling(nn.Block): ...@@ -138,7 +138,7 @@ class SortPooling(nn.Block):
graph : DGLGraph graph : DGLGraph
The graph. The graph.
feat : mxnet.NDArray feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph. :math:`N` is the number of nodes in the graph.
Returns Returns
...@@ -161,8 +161,8 @@ class SortPooling(nn.Block): ...@@ -161,8 +161,8 @@ class SortPooling(nn.Block):
class GlobalAttentionPooling(nn.Block): class GlobalAttentionPooling(nn.Block):
r"""Apply Global Attention Pooling (`Gated Graph Sequence Neural Networks r"""Global Attention Pooling layer from `Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__) over the nodes in the graph. <https://arxiv.org/abs/1511.05493.pdf>`__
.. math:: .. math::
r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate} r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate}
...@@ -190,7 +190,7 @@ class GlobalAttentionPooling(nn.Block): ...@@ -190,7 +190,7 @@ class GlobalAttentionPooling(nn.Block):
graph : DGLGraph graph : DGLGraph
The graph. The graph.
feat : mxnet.NDArray feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph. :math:`N` is the number of nodes in the graph.
Returns Returns
...@@ -214,8 +214,8 @@ class GlobalAttentionPooling(nn.Block): ...@@ -214,8 +214,8 @@ class GlobalAttentionPooling(nn.Block):
class Set2Set(nn.Block): class Set2Set(nn.Block):
r"""Apply Set2Set (`Order Matters: Sequence to sequence for sets r"""Set2Set operator from `Order Matters: Sequence to sequence for sets
<https://arxiv.org/pdf/1511.06391.pdf>`__) over the nodes in the graph. <https://arxiv.org/pdf/1511.06391.pdf>`__
For each individual graph in the batch, set2set computes For each individual graph in the batch, set2set computes
...@@ -257,7 +257,7 @@ class Set2Set(nn.Block): ...@@ -257,7 +257,7 @@ class Set2Set(nn.Block):
graph : DGLGraph graph : DGLGraph
The graph. The graph.
feat : mxnet.NDArray feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph. :math:`N` is the number of nodes in the graph.
Returns Returns
......
...@@ -5,7 +5,7 @@ from mxnet.gluon import nn ...@@ -5,7 +5,7 @@ from mxnet.gluon import nn
__all__ = ['HeteroGraphConv'] __all__ = ['HeteroGraphConv']
class HeteroGraphConv(nn.Block): class HeteroGraphConv(nn.Block):
r"""A generic module for computing convolution on heterogeneous graphs. r"""A generic module for computing convolution on heterogeneous graphs
The heterograph convolution applies sub-modules on their associating The heterograph convolution applies sub-modules on their associating
relation graphs, which reads the features from source nodes and writes the relation graphs, which reads the features from source nodes and writes the
......
...@@ -108,7 +108,7 @@ def normalize(x, p=2, axis=1, eps=1e-12): ...@@ -108,7 +108,7 @@ def normalize(x, p=2, axis=1, eps=1e-12):
return x / denom return x / denom
class Sequential(gluon.nn.Sequential): class Sequential(gluon.nn.Sequential):
r"""A squential container for stacking graph neural network blocks. r"""A squential container for stacking graph neural network blocks
We support two modes: sequentially apply GNN blocks on the same graph or We support two modes: sequentially apply GNN blocks on the same graph or
a list of given graphs. In the second case, the number of graphs equals the a list of given graphs. In the second case, the number of graphs equals the
......
...@@ -11,13 +11,8 @@ from ....utils import expand_as_pair ...@@ -11,13 +11,8 @@ from ....utils import expand_as_pair
class AGNNConv(nn.Module): class AGNNConv(nn.Module):
r""" r"""Attention-based Graph Neural Network layer from `Attention-based Graph Neural Network for
Semi-Supervised Learning <https://arxiv.org/abs/1803.03735>`__
Description
-----------
Attention-based Graph Neural Network layer from paper `Attention-based
Graph Neural Network for Semi-Supervised Learning
<https://arxiv.org/abs/1803.03735>`__.
.. math:: .. math::
H^{l+1} = P H^{l} H^{l+1} = P H^{l}
......
...@@ -8,13 +8,9 @@ from .graphconv import EdgeWeightNorm ...@@ -8,13 +8,9 @@ from .graphconv import EdgeWeightNorm
class APPNPConv(nn.Module): class APPNPConv(nn.Module):
r""" r"""Approximate Personalized Propagation of Neural Predictions layer from `Predict then
Propagate: Graph Neural Networks meet Personalized PageRank
Description <https://arxiv.org/pdf/1810.05997.pdf>`__
-----------
Approximate Personalized Propagation of Neural Predictions
layer from paper `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__.
.. math:: .. math::
H^{0} &= X H^{0} &= X
......
...@@ -5,12 +5,8 @@ import torch as th ...@@ -5,12 +5,8 @@ import torch as th
import torch.nn as nn import torch.nn as nn
class RadialPooling(nn.Module): class RadialPooling(nn.Module):
r""" r"""Radial pooling from `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__
Description
-----------
Radial pooling from paper `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
We denote the distance between atom :math:`i` and :math:`j` by :math:`r_{ij}`. We denote the distance between atom :math:`i` and :math:`j` by :math:`r_{ij}`.
...@@ -132,12 +128,8 @@ def reduce_func(nodes): ...@@ -132,12 +128,8 @@ def reduce_func(nodes):
return {'hv_new': nodes.mailbox['m'].sum(1)} return {'hv_new': nodes.mailbox['m'].sum(1)}
class AtomicConv(nn.Module): class AtomicConv(nn.Module):
r""" r"""Atomic Convolution Layer from `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__
Description
-----------
Atomic Convolution Layer from paper `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
Denoting the type of atom :math:`i` by :math:`z_i` and the distance between atom Denoting the type of atom :math:`i` by :math:`z_i` and the distance between atom
:math:`i` and :math:`j` by :math:`r_{ij}`. :math:`i` and :math:`j` by :math:`r_{ij}`.
......
...@@ -6,11 +6,7 @@ import torch.nn as nn ...@@ -6,11 +6,7 @@ import torch.nn as nn
from .... import function as fn from .... import function as fn
class ShiftedSoftplus(nn.Module): class ShiftedSoftplus(nn.Module):
r""" r"""Applies the element-wise function:
Description
-----------
Applies the element-wise function:
.. math:: .. math::
\text{SSP}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) - \log(\text{shift}) \text{SSP}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) - \log(\text{shift})
...@@ -48,14 +44,8 @@ class ShiftedSoftplus(nn.Module): ...@@ -48,14 +44,8 @@ class ShiftedSoftplus(nn.Module):
return self.softplus(inputs) - np.log(float(self.shift)) return self.softplus(inputs) - np.log(float(self.shift))
class CFConv(nn.Module): class CFConv(nn.Module):
r""" r"""CFConv from `SchNet: A continuous-filter convolutional neural network for
modeling quantum interactions <https://arxiv.org/abs/1706.08566>`__
Description
-----------
CFConv in SchNet.
SchNet is introduced in `SchNet: A continuous-filter convolutional neural network for
modeling quantum interactions <https://arxiv.org/abs/1706.08566>`__.
It combines node and edge features in message passing and updates node representations. It combines node and edge features in message passing and updates node representations.
......
...@@ -9,13 +9,9 @@ from .... import broadcast_nodes, function as fn ...@@ -9,13 +9,9 @@ from .... import broadcast_nodes, function as fn
class ChebConv(nn.Module): class ChebConv(nn.Module):
r""" r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__. <https://arxiv.org/pdf/1606.09375.pdf>`__
.. math:: .. math::
h_i^{l+1} &= \sum_{k=0}^{K-1} W^{k, l}z_i^{k, l} h_i^{l+1} &= \sum_{k=0}^{K-1} W^{k, l}z_i^{k, l}
...@@ -78,11 +74,7 @@ class ChebConv(nn.Module): ...@@ -78,11 +74,7 @@ class ChebConv(nn.Module):
self.linear = nn.Linear(k * in_feats, out_feats, bias) self.linear = nn.Linear(k * in_feats, out_feats, bias)
def forward(self, graph, feat, lambda_max=None): def forward(self, graph, feat, lambda_max=None):
r""" r"""Compute ChebNet layer.
Description
-----------
Compute ChebNet layer.
Parameters Parameters
---------- ----------
......
...@@ -6,13 +6,9 @@ from torch.nn import init ...@@ -6,13 +6,9 @@ from torch.nn import init
class DenseChebConv(nn.Module): class DenseChebConv(nn.Module):
r""" r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__. <https://arxiv.org/pdf/1606.09375.pdf>`__
We recommend to use this module when applying ChebConv on dense graphs. We recommend to use this module when applying ChebConv on dense graphs.
...@@ -81,11 +77,7 @@ class DenseChebConv(nn.Module): ...@@ -81,11 +77,7 @@ class DenseChebConv(nn.Module):
init.xavier_normal_(self.W[i], init.calculate_gain('relu')) init.xavier_normal_(self.W[i], init.calculate_gain('relu'))
def forward(self, adj, feat, lambda_max=None): def forward(self, adj, feat, lambda_max=None):
r""" r"""Compute (Dense) Chebyshev Spectral Graph Convolution layer
Description
-----------
Compute (Dense) Chebyshev Spectral Graph Convolution layer.
Parameters Parameters
---------- ----------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment