"tests/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "d96cbacacdb1e0a49ab436c3f647d002e0f411bf"
Unverified Commit aa06e457 authored by Zihao Ye's avatar Zihao Ye Committed by GitHub
Browse files

[Doc] Improve the docstring of several nn modules. (#1187)



* upd

* upd

* upd

* upd

* lint

* upd

* upd

* upd

* upd

* upd
Co-authored-by: default avatarVoVAllen <VoVAllen@users.noreply.github.com>
parent 3ef757db
...@@ -11,8 +11,7 @@ class DenseChebConv(nn.Block): ...@@ -11,8 +11,7 @@ class DenseChebConv(nn.Block):
Neural Networks on Graphs with Fast Localized Spectral Filtering Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__. <https://arxiv.org/pdf/1606.09375.pdf>`__.
We recommend to use this module when inducing ChebConv operations on dense We recommend to use this module when applying ChebConv on dense graphs.
graphs / k-hop graphs.
Parameters Parameters
---------- ----------
......
...@@ -9,8 +9,8 @@ from mxnet.gluon import nn ...@@ -9,8 +9,8 @@ from mxnet.gluon import nn
class DenseGraphConv(nn.Block): class DenseGraphConv(nn.Block):
"""Graph Convolutional Network layer where the graph structure """Graph Convolutional Network layer where the graph structure
is given by an adjacency matrix. is given by an adjacency matrix.
We recommend user to use this module when inducing graph convolution We recommend user to use this module when applying graph convolution on
on dense graphs / k-hop graphs. dense graphs.
Parameters Parameters
---------- ----------
......
...@@ -9,8 +9,7 @@ from mxnet.gluon import nn ...@@ -9,8 +9,7 @@ from mxnet.gluon import nn
class DenseSAGEConv(nn.Block): class DenseSAGEConv(nn.Block):
"""GraphSAGE layer where the graph structure is given by an """GraphSAGE layer where the graph structure is given by an
adjacency matrix. adjacency matrix.
We recommend to use this module when inducing GraphSAGE operations We recommend to use this module when applying GraphSAGE on dense graphs.
on dense graphs / k-hop graphs.
Note that we only support gcn aggregator in DenseSAGEConv. Note that we only support gcn aggregator in DenseSAGEConv.
......
...@@ -108,7 +108,7 @@ def normalize(x, p=2, axis=1, eps=1e-12): ...@@ -108,7 +108,7 @@ def normalize(x, p=2, axis=1, eps=1e-12):
return x / denom return x / denom
class Sequential(gluon.nn.Sequential): class Sequential(gluon.nn.Sequential):
"""A squential container for stacking graph neural network blocks. r"""A squential container for stacking graph neural network blocks.
We support two modes: sequentially apply GNN blocks on the same graph or We support two modes: sequentially apply GNN blocks on the same graph or
a list of given graphs. In the second case, the number of graphs equals the a list of given graphs. In the second case, the number of graphs equals the
...@@ -147,7 +147,6 @@ class Sequential(gluon.nn.Sequential): ...@@ -147,7 +147,6 @@ class Sequential(gluon.nn.Sequential):
>>> n_feat = nd.random.randn(3, 4) >>> n_feat = nd.random.randn(3, 4)
>>> e_feat = nd.random.randn(9, 4) >>> e_feat = nd.random.randn(9, 4)
>>> net(g, n_feat, e_feat) >>> net(g, n_feat, e_feat)
( (
[[ 12.412863 99.61184 21.472883 -57.625923 ] [[ 12.412863 99.61184 21.472883 -57.625923 ]
[ 10.08097 100.68611 20.627377 -60.13458 ] [ 10.08097 100.68611 20.627377 -60.13458 ]
...@@ -192,7 +191,6 @@ class Sequential(gluon.nn.Sequential): ...@@ -192,7 +191,6 @@ class Sequential(gluon.nn.Sequential):
>>> net.initialize() >>> net.initialize()
>>> n_feat = nd.random.randn(32, 4) >>> n_feat = nd.random.randn(32, 4)
>>> net([g1, g2, g3], n_feat) >>> net([g1, g2, g3], n_feat)
[[-101.289566 -22.584694 -89.25348 -151.6447 ] [[-101.289566 -22.584694 -89.25348 -151.6447 ]
[-130.74239 -49.494812 -120.250854 -199.81546 ] [-130.74239 -49.494812 -120.250854 -199.81546 ]
[-112.32089 -50.036713 -116.13266 -190.38638 ] [-112.32089 -50.036713 -116.13266 -190.38638 ]
...@@ -203,15 +201,17 @@ class Sequential(gluon.nn.Sequential): ...@@ -203,15 +201,17 @@ class Sequential(gluon.nn.Sequential):
super(Sequential, self).__init__(prefix=prefix, params=params) super(Sequential, self).__init__(prefix=prefix, params=params)
def forward(self, graph, *feats): def forward(self, graph, *feats):
"""Sequentially apply modules to the input. r"""Sequentially apply modules to the input.
Parameters Parameters
---------- ----------
graph: a DGLGraph or a list of DGLGraphs. graph : DGLGraph or list of DGLGraphs
The graph(s) to apply modules on.
*feats: input features. *feats :
The output of i-th block should match that of the input Input features.
of (i+1)-th block. The output of :math:`i`-th block should match that of the input
of :math:`(i+1)`-th block.
""" """
if isinstance(graph, list): if isinstance(graph, list):
for graph_i, module in zip(graph, self): for graph_i, module in zip(graph, self):
......
...@@ -10,8 +10,7 @@ class DenseChebConv(nn.Module): ...@@ -10,8 +10,7 @@ class DenseChebConv(nn.Module):
Neural Networks on Graphs with Fast Localized Spectral Filtering Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__. <https://arxiv.org/pdf/1606.09375.pdf>`__.
We recommend to use this module when inducing ChebConv operations on dense We recommend to use this module when applying ChebConv on dense graphs.
graphs / k-hop graphs.
Parameters Parameters
---------- ----------
......
...@@ -8,8 +8,8 @@ from torch.nn import init ...@@ -8,8 +8,8 @@ from torch.nn import init
class DenseGraphConv(nn.Module): class DenseGraphConv(nn.Module):
"""Graph Convolutional Network layer where the graph structure """Graph Convolutional Network layer where the graph structure
is given by an adjacency matrix. is given by an adjacency matrix.
We recommend user to use this module when inducing graph convolution We recommend user to use this module when applying graph convolution on
on dense graphs / k-hop graphs. dense graphs.
Parameters Parameters
---------- ----------
......
...@@ -6,8 +6,7 @@ from torch import nn ...@@ -6,8 +6,7 @@ from torch import nn
class DenseSAGEConv(nn.Module): class DenseSAGEConv(nn.Module):
"""GraphSAGE layer where the graph structure is given by an """GraphSAGE layer where the graph structure is given by an
adjacency matrix. adjacency matrix.
We recommend to use this module when inducing GraphSAGE operations We recommend to use this module when appying GraphSAGE on dense graphs.
on dense graphs / k-hop graphs.
Note that we only support gcn aggregator in DenseSAGEConv. Note that we only support gcn aggregator in DenseSAGEConv.
......
...@@ -104,7 +104,7 @@ class Identity(nn.Module): ...@@ -104,7 +104,7 @@ class Identity(nn.Module):
return x return x
class Sequential(nn.Sequential): class Sequential(nn.Sequential):
"""A squential container for stacking graph neural network modules. r"""A squential container for stacking graph neural network modules.
We support two modes: sequentially apply GNN modules on the same graph or We support two modes: sequentially apply GNN modules on the same graph or
a list of given graphs. In the second case, the number of graphs equals the a list of given graphs. In the second case, the number of graphs equals the
...@@ -112,7 +112,8 @@ class Sequential(nn.Sequential): ...@@ -112,7 +112,8 @@ class Sequential(nn.Sequential):
Parameters Parameters
---------- ----------
*args : sub-modules of type torch.nn.Module, will be added to the container in *args :
Sub-modules of type torch.nn.Module, will be added to the container in
the order they are passed in the constructor. the order they are passed in the constructor.
Examples Examples
...@@ -144,7 +145,6 @@ class Sequential(nn.Sequential): ...@@ -144,7 +145,6 @@ class Sequential(nn.Sequential):
>>> n_feat = torch.rand(3, 4) >>> n_feat = torch.rand(3, 4)
>>> e_feat = torch.rand(9, 4) >>> e_feat = torch.rand(9, 4)
>>> net(g, n_feat, e_feat) >>> net(g, n_feat, e_feat)
(tensor([[39.8597, 45.4542, 25.1877, 30.8086], (tensor([[39.8597, 45.4542, 25.1877, 30.8086],
[40.7095, 45.3985, 25.4590, 30.0134], [40.7095, 45.3985, 25.4590, 30.0134],
[40.7894, 45.2556, 25.5221, 30.4220]]), tensor([[80.3772, 89.7752, 50.7762, 60.5520], [40.7894, 45.2556, 25.5221, 30.4220]]), tensor([[80.3772, 89.7752, 50.7762, 60.5520],
...@@ -181,7 +181,6 @@ class Sequential(nn.Sequential): ...@@ -181,7 +181,6 @@ class Sequential(nn.Sequential):
>>> net = Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer()) >>> net = Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer())
>>> n_feat = torch.rand(32, 4) >>> n_feat = torch.rand(32, 4)
>>> net([g1, g2, g3], n_feat) >>> net([g1, g2, g3], n_feat)
tensor([[209.6221, 225.5312, 193.8920, 220.1002], tensor([[209.6221, 225.5312, 193.8920, 220.1002],
[250.0169, 271.9156, 240.2467, 267.7766], [250.0169, 271.9156, 240.2467, 267.7766],
[220.4007, 239.7365, 213.8648, 234.9637], [220.4007, 239.7365, 213.8648, 234.9637],
...@@ -191,15 +190,17 @@ class Sequential(nn.Sequential): ...@@ -191,15 +190,17 @@ class Sequential(nn.Sequential):
super(Sequential, self).__init__(*args) super(Sequential, self).__init__(*args)
def forward(self, graph, *feats): def forward(self, graph, *feats):
"""Sequentially apply modules to the input. r"""Sequentially apply modules to the input.
Parameters Parameters
---------- ----------
graph: a DGLGraph or a list of DGLGraphs. graph : DGLGraph or list of DGLGraphs
The graph(s) to apply modules on.
*feats: input features. *feats :
The output of i-th block should match that of the input Input features.
of (i+1)-th block. The output of :math:`i`-th block should match that of the input
of :math:`(i+1)`-th block.
""" """
if isinstance(graph, list): if isinstance(graph, list):
for graph_i, module in zip(graph, self): for graph_i, module in zip(graph, self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment