Unverified Commit 6d9433b0 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Transform] [Doc] Rename transform to transforms and update doc (#3765)

* Update

* Update

* Update

* Fix

* Update

* Update

* Update

* Fix
parent ccaa0bf2
......@@ -9,14 +9,10 @@ from ....utils import expand_as_pair
class EdgeConv(nn.Block):
r"""
r"""EdgeConv layer from `Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__
Description
-----------
EdgeConv layer.
Introduced in "`Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__". Can be described as follows:
It can be described as follows:
.. math::
h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} (
......
......@@ -12,12 +12,8 @@ from ....utils import expand_as_pair
#pylint: enable=W0235
class GATConv(nn.Block):
r"""
Description
-----------
Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
r"""Graph attention layer from `Graph Attention Network
<https://arxiv.org/pdf/1710.10903.pdf>`__
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
......
......@@ -7,12 +7,8 @@ from mxnet.gluon import nn
from .... import function as fn
class GatedGraphConv(nn.Block):
r"""
Description
-----------
Gated Graph Convolution layer from paper `Gated Graph Sequence
Neural Networks <https://arxiv.org/pdf/1511.05493.pdf>`__.
r"""Gated Graph Convolution layer from `Gated Graph Sequence
Neural Networks <https://arxiv.org/pdf/1511.05493.pdf>`__
.. math::
h_{i}^{0} &= [ x_i \| \mathbf{0} ]
......
......@@ -8,12 +8,8 @@ from ....utils import expand_as_pair
class GINConv(nn.Block):
r"""
Description
-----------
Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
r"""Graph Isomorphism layer from `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
......
......@@ -12,13 +12,8 @@ from ....utils import expand_as_pair
class GMMConv(nn.Block):
r"""
Description
-----------
The Gaussian Mixture Model Convolution layer from `Geometric Deep
Learning on Graphs and Manifolds using Mixture Model CNNs
<http://openaccess.thecvf.com/content_cvpr_2017/papers/Monti_Geometric_Deep_Learning_CVPR_2017_paper.pdf>`__.
r"""Gaussian Mixture Model Convolution layer from `Geometric Deep Learning on Graphs and
Manifolds using Mixture Model CNNs <https://arxiv.org/abs/1611.08402>`__
.. math::
u_{ij} &= f(x_i, x_j), x_j \in \mathcal{N}(i)
......
......@@ -10,12 +10,10 @@ from ....base import DGLError
from ....utils import expand_as_pair
class GraphConv(gluon.Block):
r"""
r"""Graph convolutional layer from `Semi-Supervised Classification with Graph Convolutional
Networks <https://arxiv.org/abs/1609.02907>`__
Description
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
Mathematically it is defined as follows:
.. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})
......
......@@ -9,12 +9,8 @@ from ....utils import expand_as_pair
class NNConv(nn.Block):
r"""
Description
-----------
Graph Convolution layer introduced in `Neural Message Passing
for Quantum Chemistry <https://arxiv.org/pdf/1704.01212.pdf>`__.
r"""Graph Convolution layer from `Neural Message Passing
for Quantum Chemistry <https://arxiv.org/pdf/1704.01212.pdf>`__
.. math::
h_{i}^{l+1} = h_{i}^{l} + \mathrm{aggregate}\left(\left\{
......
......@@ -11,15 +11,10 @@ from .. import utils
class RelGraphConv(gluon.Block):
r"""
r"""Relational graph convolution layer from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Description
-----------
Relational graph convolution layer.
Relational graph convolution is introduced in "`Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described as below:
It can be described as below:
.. math::
......
......@@ -10,12 +10,8 @@ from ....base import DGLError
from ....utils import expand_as_pair, check_eq_shape
class SAGEConv(nn.Block):
r"""
Description
-----------
GraphSAGE layer from paper `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__.
r"""GraphSAGE layer from `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__
.. math::
h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate}
......
......@@ -10,12 +10,8 @@ from ....base import DGLError
class SGConv(nn.Block):
r"""
Description
-----------
Simplifying Graph Convolution layer from paper `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__.
r"""SGC layer from `Simplifying Graph Convolutional Networks
<https://arxiv.org/pdf/1902.07153.pdf>`__
.. math::
H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta
......
......@@ -9,11 +9,7 @@ from .... import function as fn
class TAGConv(gluon.Block):
r"""
Description
-----------
Topology Adaptive Graph Convolutional layer from paper `Topology
r"""Topology Adaptive Graph Convolutional layer from `Topology
Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__.
.. math::
......
......@@ -118,8 +118,8 @@ class MaxPooling(nn.Block):
class SortPooling(nn.Block):
r"""Apply Sort Pooling (`An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__) over the nodes in the graph.
r"""Pooling layer from `An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__
Parameters
----------
......@@ -138,7 +138,7 @@ class SortPooling(nn.Block):
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where
The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph.
Returns
......@@ -161,8 +161,8 @@ class SortPooling(nn.Block):
class GlobalAttentionPooling(nn.Block):
r"""Apply Global Attention Pooling (`Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__) over the nodes in the graph.
r"""Global Attention Pooling layer from `Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__
.. math::
r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate}
......@@ -190,7 +190,7 @@ class GlobalAttentionPooling(nn.Block):
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where
The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph.
Returns
......@@ -214,8 +214,8 @@ class GlobalAttentionPooling(nn.Block):
class Set2Set(nn.Block):
r"""Apply Set2Set (`Order Matters: Sequence to sequence for sets
<https://arxiv.org/pdf/1511.06391.pdf>`__) over the nodes in the graph.
r"""Set2Set operator from `Order Matters: Sequence to sequence for sets
<https://arxiv.org/pdf/1511.06391.pdf>`__
For each individual graph in the batch, set2set computes
......@@ -257,7 +257,7 @@ class Set2Set(nn.Block):
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where
The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph.
Returns
......
......@@ -5,7 +5,7 @@ from mxnet.gluon import nn
__all__ = ['HeteroGraphConv']
class HeteroGraphConv(nn.Block):
r"""A generic module for computing convolution on heterogeneous graphs.
r"""A generic module for computing convolution on heterogeneous graphs
The heterograph convolution applies sub-modules on their associating
relation graphs, which reads the features from source nodes and writes the
......
......@@ -108,7 +108,7 @@ def normalize(x, p=2, axis=1, eps=1e-12):
return x / denom
class Sequential(gluon.nn.Sequential):
r"""A squential container for stacking graph neural network blocks.
r"""A squential container for stacking graph neural network blocks
We support two modes: sequentially apply GNN blocks on the same graph or
a list of given graphs. In the second case, the number of graphs equals the
......
......@@ -11,13 +11,8 @@ from ....utils import expand_as_pair
class AGNNConv(nn.Module):
r"""
Description
-----------
Attention-based Graph Neural Network layer from paper `Attention-based
Graph Neural Network for Semi-Supervised Learning
<https://arxiv.org/abs/1803.03735>`__.
r"""Attention-based Graph Neural Network layer from `Attention-based Graph Neural Network for
Semi-Supervised Learning <https://arxiv.org/abs/1803.03735>`__
.. math::
H^{l+1} = P H^{l}
......
......@@ -8,13 +8,9 @@ from .graphconv import EdgeWeightNorm
class APPNPConv(nn.Module):
r"""
Description
-----------
Approximate Personalized Propagation of Neural Predictions
layer from paper `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__.
r"""Approximate Personalized Propagation of Neural Predictions layer from `Predict then
Propagate: Graph Neural Networks meet Personalized PageRank
<https://arxiv.org/pdf/1810.05997.pdf>`__
.. math::
H^{0} &= X
......
......@@ -5,12 +5,8 @@ import torch as th
import torch.nn as nn
class RadialPooling(nn.Module):
r"""
Description
-----------
Radial pooling from paper `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
r"""Radial pooling from `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__
We denote the distance between atom :math:`i` and :math:`j` by :math:`r_{ij}`.
......@@ -132,12 +128,8 @@ def reduce_func(nodes):
return {'hv_new': nodes.mailbox['m'].sum(1)}
class AtomicConv(nn.Module):
r"""
Description
-----------
Atomic Convolution Layer from paper `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
r"""Atomic Convolution Layer from `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__
Denoting the type of atom :math:`i` by :math:`z_i` and the distance between atom
:math:`i` and :math:`j` by :math:`r_{ij}`.
......
......@@ -6,11 +6,7 @@ import torch.nn as nn
from .... import function as fn
class ShiftedSoftplus(nn.Module):
r"""
Description
-----------
Applies the element-wise function:
r"""Applies the element-wise function:
.. math::
\text{SSP}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) - \log(\text{shift})
......@@ -48,14 +44,8 @@ class ShiftedSoftplus(nn.Module):
return self.softplus(inputs) - np.log(float(self.shift))
class CFConv(nn.Module):
r"""
Description
-----------
CFConv in SchNet.
SchNet is introduced in `SchNet: A continuous-filter convolutional neural network for
modeling quantum interactions <https://arxiv.org/abs/1706.08566>`__.
r"""CFConv from `SchNet: A continuous-filter convolutional neural network for
modeling quantum interactions <https://arxiv.org/abs/1706.08566>`__
It combines node and edge features in message passing and updates node representations.
......
......@@ -9,13 +9,9 @@ from .... import broadcast_nodes, function as fn
class ChebConv(nn.Module):
r"""
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__.
<https://arxiv.org/pdf/1606.09375.pdf>`__
.. math::
h_i^{l+1} &= \sum_{k=0}^{K-1} W^{k, l}z_i^{k, l}
......@@ -78,11 +74,7 @@ class ChebConv(nn.Module):
self.linear = nn.Linear(k * in_feats, out_feats, bias)
def forward(self, graph, feat, lambda_max=None):
r"""
Description
-----------
Compute ChebNet layer.
r"""Compute ChebNet layer.
Parameters
----------
......
......@@ -6,13 +6,9 @@ from torch.nn import init
class DenseChebConv(nn.Module):
r"""
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__.
<https://arxiv.org/pdf/1606.09375.pdf>`__
We recommend to use this module when applying ChebConv on dense graphs.
......@@ -81,11 +77,7 @@ class DenseChebConv(nn.Module):
init.xavier_normal_(self.W[i], init.calculate_gain('relu'))
def forward(self, adj, feat, lambda_max=None):
r"""
Description
-----------
Compute (Dense) Chebyshev Spectral Graph Convolution layer.
r"""Compute (Dense) Chebyshev Spectral Graph Convolution layer
Parameters
----------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment