Unverified Commit 6d9433b0 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Transform] [Doc] Rename transform to transforms and update doc (#3765)

* Update

* Update

* Update

* Fix

* Update

* Update

* Update

* Fix
parent ccaa0bf2
......@@ -6,12 +6,9 @@ from torch.nn import init
class DenseGraphConv(nn.Module):
"""
"""Graph Convolutional layer from `Semi-Supervised Classification with Graph
Convolutional Networks <https://arxiv.org/abs/1609.02907>`__
Description
-----------
Graph Convolutional Network layer where the graph structure
is given by an adjacency matrix.
We recommend user to use this module when applying graph convolution on
dense graphs.
......@@ -92,11 +89,7 @@ class DenseGraphConv(nn.Module):
init.zeros_(self.bias)
def forward(self, adj, feat):
r"""
Description
-----------
Compute (Dense) Graph Convolution layer.
r"""Compute (Dense) Graph Convolution layer.
Parameters
----------
......
......@@ -5,12 +5,9 @@ from ....utils import check_eq_shape
class DenseSAGEConv(nn.Module):
"""
"""GraphSAGE layer from `Inductive Representation Learning on Large Graphs
<https://arxiv.org/abs/1706.02216>`__
Description
-----------
GraphSAGE layer where the graph structure is given by an
adjacency matrix.
We recommend to use this module when appying GraphSAGE on dense graphs.
Note that we only support gcn aggregator in DenseSAGEConv.
......
......@@ -9,11 +9,8 @@ from ....utils import expand_as_pair
class DotGatConv(nn.Module):
r"""
Description
-----------
Apply dot product version of self attention in GCN.
r"""Apply dot product version of self attention in `Graph Attention Network
<https://arxiv.org/pdf/1710.10903.pdf>`__
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i, j} h_j^{(l)}
......
......@@ -8,14 +8,10 @@ from ....utils import expand_as_pair
class EdgeConv(nn.Module):
r"""
r"""EdgeConv layer from `Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__
Description
-----------
EdgeConv layer.
Introduced in "`Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__". Can be described as follows:
It can be described as follows:
.. math::
h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} (
......
......@@ -10,15 +10,10 @@ from ....base import DGLError
# pylint: enable=W0235
class EGATConv(nn.Module):
r"""
Description
-----------
Apply Graph Attention Layer over input graph. EGAT is an extension
of regular `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
handling edge features, detailed description is available in `Rossmann-Toolbox
<https://pubmed.ncbi.nlm.nih.gov/34571541/>`__ (see supplementary data).
The difference appears in the method how unnormalized attention scores :math:`e_{ij}`
are obtained:
r"""Graph attention layer that handles edge features from `Rossmann-Toolbox
<https://pubmed.ncbi.nlm.nih.gov/34571541/>`__ (see supplementary data)
The difference lies in how unnormalized attention scores :math:`e_{ij}` are obtained:
.. math::
e_{ij} &= \vec{F} (f_{ij}^{\prime})
......@@ -27,7 +22,7 @@ class EGATConv(nn.Module):
where :math:`f_{ij}^{\prime}` are edge features, :math:`\mathrm{A}` is weight matrix and
:math: `\vec{F}` is weight vector. After that resulting node features
:math: `\vec{F}` is weight vector. After that, resulting node features
:math:`h_{i}^{\prime}` are updated in the same way as in regular GAT.
Parameters
......
......@@ -11,12 +11,8 @@ from ....utils import expand_as_pair
# pylint: enable=W0235
class GATConv(nn.Module):
r"""
Description
-----------
Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
r"""Graph attention layer from `Graph Attention Network
<https://arxiv.org/pdf/1710.10903.pdf>`__
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
......
......@@ -8,12 +8,8 @@ from .... import function as fn
class GatedGraphConv(nn.Module):
r"""
Description
-----------
Gated Graph Convolution layer from paper `Gated Graph Sequence
Neural Networks <https://arxiv.org/pdf/1511.05493.pdf>`__.
r"""Gated Graph Convolution layer from `Gated Graph Sequence
Neural Networks <https://arxiv.org/pdf/1511.05493.pdf>`__
.. math::
h_{i}^{0} &= [ x_i \| \mathbf{0} ]
......
......@@ -11,13 +11,8 @@ from ....utils import expand_as_pair
# pylint: enable=W0235
class GATv2Conv(nn.Module):
r"""
Description
-----------
Apply GATv2 from
`How Attentive are Graph Attention Networks? <https://arxiv.org/pdf/2105.14491.pdf>`__
over an input signal.
r"""GATv2 from `How Attentive are Graph Attention Networks?
<https://arxiv.org/pdf/2105.14491.pdf>`__
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{ij}^{(l)} W^{(l)}_{right} h_j^{(l)}
......
......@@ -12,14 +12,10 @@ from .graphconv import EdgeWeightNorm
class GCN2Conv(nn.Module):
r"""Graph Convolutional Network via Initial residual
and Identity mapping (GCNII) from `Simple and Deep Graph Convolutional
Networks <https://arxiv.org/abs/2007.02133>`__
r"""
Description
-----------
The Graph Convolutional Network via Initial residual
and Identity mapping (GCNII) was introduced in `"Simple and Deep Graph Convolutional
Networks" <https://arxiv.org/abs/2007.02133>`_ paper.
It is mathematically is defined as follows:
.. math::
......
......@@ -8,12 +8,8 @@ from ....utils import expand_as_pair
class GINConv(nn.Module):
r"""
Description
-----------
Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
r"""Graph Isomorphism Network layer from `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
......
......@@ -11,13 +11,9 @@ from ....utils import expand_as_pair
class GMMConv(nn.Module):
r"""
Description
-----------
The Gaussian Mixture Model Convolution layer from `Geometric Deep
r"""Gaussian Mixture Model Convolution layer from `Geometric Deep
Learning on Graphs and Manifolds using Mixture Model CNNs
<http://openaccess.thecvf.com/content_cvpr_2017/papers/Monti_Geometric_Deep_Learning_CVPR_2017_paper.pdf>`__.
<https://arxiv.org/abs/1611.08402>`__
.. math::
u_{ij} &= f(x_i, x_j), x_j \in \mathcal{N}(i)
......
......@@ -7,16 +7,12 @@ from torch.nn import init
from .... import function as fn
from ....base import DGLError
from ....utils import expand_as_pair
from ....transform import reverse
from ....transforms import reverse
from ....convert import block_to_graph
from ....heterograph import DGLBlock
class EdgeWeightNorm(nn.Module):
r"""
Description
-----------
This module normalizes positive scalar edge weights on a graph
r"""This module normalizes positive scalar edge weights on a graph
following the form in `GCN <https://arxiv.org/abs/1609.02907>`__.
Mathematically, setting ``norm='both'`` yields the following normalization term:
......@@ -139,12 +135,10 @@ class EdgeWeightNorm(nn.Module):
# pylint: disable=W0235
class GraphConv(nn.Module):
r"""
r"""Graph convolutional layer from `Semi-Supervised Classification with Graph Convolutional
Networks <https://arxiv.org/abs/1609.02907>`__
Description
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
Mathematically it is defined as follows:
.. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ji}}h_j^{(l)}W^{(l)})
......
......@@ -9,9 +9,9 @@ from ..linear import TypedLinear
from ..softmax import edge_softmax
class HGTConv(nn.Module):
r"""Heterogeneous graph transformer convolution.
r"""Heterogeneous graph transformer convolution from `Heterogeneous Graph Transformer
<https://arxiv.org/abs/2003.01332>`__
Introduced in "`Heterogeneous Graph Transformer <https://arxiv.org/abs/2003.01332>`__".
Given a graph :math:`G(V, E)` and input node features :math:`H^{(l-1)}`,
it computes the new node features as follows:
......
......@@ -10,12 +10,8 @@ from ....utils import expand_as_pair
class NNConv(nn.Module):
r"""
Description
-----------
Graph Convolution layer introduced in `Neural Message Passing
for Quantum Chemistry <https://arxiv.org/pdf/1704.01212.pdf>`__.
r"""Graph Convolution layer from `Neural Message Passing
for Quantum Chemistry <https://arxiv.org/pdf/1704.01212.pdf>`__
.. math::
h_{i}^{l+1} = h_{i}^{l} + \mathrm{aggregate}\left(\left\{
......
......@@ -7,11 +7,10 @@ from .... import function as fn
from ..linear import TypedLinear
class RelGraphConv(nn.Module):
r"""Relational graph convolution layer.
r"""Relational graph convolution layer from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Relational graph convolution is introduced in "`Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described in DGL as below:
It can be described in as below:
.. math::
......
......@@ -10,12 +10,8 @@ from ....utils import expand_as_pair, check_eq_shape, dgl_warning
class SAGEConv(nn.Module):
r"""
Description
-----------
GraphSAGE layer from paper `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__.
r"""GraphSAGE layer from `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__
.. math::
h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate}
......
......@@ -9,12 +9,8 @@ from .graphconv import EdgeWeightNorm
class SGConv(nn.Module):
r"""
Description
-----------
Simplifying Graph Convolution layer from paper `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__.
r"""SGC layer from `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__
.. math::
H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta
......
......@@ -8,12 +8,8 @@ from .graphconv import EdgeWeightNorm
class TAGConv(nn.Module):
r"""
Description
-----------
Topology Adaptive Graph Convolutional layer from paper `Topology
Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__.
r"""Topology Adaptive Graph Convolutional layer from `Topology
Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__
.. math::
H^{K} = {\sum}_{k=0}^K (D^{-1/2} A D^{-1/2})^{k} X {\Theta}_{k},
......
......@@ -7,12 +7,9 @@ import torch.nn.functional as F
from .... import function as fn
class TWIRLSConv(nn.Module):
r"""
Description
-----------
Together with iteratively reweighting least squre from paper `Graph Neural Networks Inspired
by Classical Iterative Algorithms <https://arxiv.org/pdf/2103.06064.pdf>`__.
r"""Convolution together with iteratively reweighting least squre from
`Graph Neural Networks Inspired by Classical Iterative Algorithms
<https://arxiv.org/pdf/2103.06064.pdf>`__
Parameters
----------
......
......@@ -10,13 +10,10 @@ from ....base import NID, EID
from ....subgraph import khop_in_subgraph
class GNNExplainer(nn.Module):
r"""
r"""GNNExplainer model from `GNNExplainer: Generating Explanations for
Graph Neural Networks <https://arxiv.org/abs/1903.03894>`__
Description
-----------
GNNExplainer model from paper `GNNExplainer: Generating Explanations for
Graph Neural Networks <https://arxiv.org/abs/1903.03894>`__ for identifying
compact subgraph structures and small subsets of node features that play a
It identifies compact subgraph structures and small subsets of node features that play a
critical role in GNN-based node classification and graph classification.
Parameters
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment