Unverified Commit 31772b14 authored by blokhinnv's avatar blokhinnv Committed by GitHub
Browse files

fixed typos in docs and docstring (#3231)


Co-authored-by: default avatarQuan (Andy) Gan <coin2028@hotmail.com>
parent 6f36dd63
...@@ -35,7 +35,7 @@ The construction function performs the following steps: ...@@ -35,7 +35,7 @@ The construction function performs the following steps:
In construction function, one first needs to set the data dimensions. For In construction function, one first needs to set the data dimensions. For
general PyTorch module, the dimensions are usually input dimension, general PyTorch module, the dimensions are usually input dimension,
output dimension and hidden dimensions. For graph neural, the input output dimension and hidden dimensions. For graph neural networks, the input
dimension can be split into source node dimension and destination node dimension can be split into source node dimension and destination node
dimension. dimension.
......
...@@ -69,13 +69,13 @@ e.g. as logits of a categorical distribution. ...@@ -69,13 +69,13 @@ e.g. as logits of a categorical distribution.
def __init__(self, in_features, out_classes): def __init__(self, in_features, out_classes):
super().__init__() super().__init__()
self.W = nn.Linear(in_features * 2, out_classes) self.W = nn.Linear(in_features * 2, out_classes)
def apply_edges(self, edges): def apply_edges(self, edges):
h_u = edges.src['h'] h_u = edges.src['h']
h_v = edges.dst['h'] h_v = edges.dst['h']
score = self.W(torch.cat([h_u, h_v], 1)) score = self.W(torch.cat([h_u, h_v], 1))
return {'score': score} return {'score': score}
def forward(self, graph, h): def forward(self, graph, h):
# h contains the node representations computed from the GNN defined # h contains the node representations computed from the GNN defined
# in the node classification section (Section 5.1). # in the node classification section (Section 5.1).
...@@ -156,17 +156,17 @@ You can similarly write a ``HeteroMLPPredictor``. ...@@ -156,17 +156,17 @@ You can similarly write a ``HeteroMLPPredictor``.
.. code:: python .. code:: python
class MLPPredictor(nn.Module): class HeteroMLPPredictor(nn.Module):
def __init__(self, in_features, out_classes): def __init__(self, in_features, out_classes):
super().__init__() super().__init__()
self.W = nn.Linear(in_features * 2, out_classes) self.W = nn.Linear(in_features * 2, out_classes)
def apply_edges(self, edges): def apply_edges(self, edges):
h_u = edges.src['h'] h_u = edges.src['h']
h_v = edges.dst['h'] h_v = edges.dst['h']
score = self.W(torch.cat([h_u, h_v], 1)) score = self.W(torch.cat([h_u, h_v], 1))
return {'score': score} return {'score': score}
def forward(self, graph, h, etype): def forward(self, graph, h, etype):
# h contains the node representations for each edge type computed from # h contains the node representations for each edge type computed from
# the GNN for heterogeneous graphs defined in the node classification # the GNN for heterogeneous graphs defined in the node classification
...@@ -271,12 +271,12 @@ can write your predictor module as follows. ...@@ -271,12 +271,12 @@ can write your predictor module as follows.
def __init__(self, in_dims, n_classes): def __init__(self, in_dims, n_classes):
super().__init__() super().__init__()
self.W = nn.Linear(in_dims * 2, n_classes) self.W = nn.Linear(in_dims * 2, n_classes)
def apply_edges(self, edges): def apply_edges(self, edges):
x = torch.cat([edges.src['h'], edges.dst['h']], 1) x = torch.cat([edges.src['h'], edges.dst['h']], 1)
y = self.W(x) y = self.W(x)
return {'score': y} return {'score': y}
def forward(self, graph, h): def forward(self, graph, h):
# h contains the node representations for each edge type computed from # h contains the node representations for each edge type computed from
# the GNN for heterogeneous graphs defined in the node classification # the GNN for heterogeneous graphs defined in the node classification
...@@ -308,7 +308,7 @@ The training loop then simply be the following: ...@@ -308,7 +308,7 @@ The training loop then simply be the following:
user_feats = hetero_graph.nodes['user'].data['feature'] user_feats = hetero_graph.nodes['user'].data['feature']
item_feats = hetero_graph.nodes['item'].data['feature'] item_feats = hetero_graph.nodes['item'].data['feature']
node_features = {'user': user_feats, 'item': item_feats} node_features = {'user': user_feats, 'item': item_feats}
opt = torch.optim.Adam(model.parameters()) opt = torch.optim.Adam(model.parameters())
for epoch in range(10): for epoch in range(10):
logits = model(hetero_graph, node_features, dec_graph) logits = model(hetero_graph, node_features, dec_graph)
......
...@@ -101,7 +101,7 @@ where :math:`h_g` is the representation of :math:`g`, :math:`\mathcal{V}` is ...@@ -101,7 +101,7 @@ where :math:`h_g` is the representation of :math:`g`, :math:`\mathcal{V}` is
the set of nodes in :math:`g`, :math:`h_v` is the feature of node :math:`v`. the set of nodes in :math:`g`, :math:`h_v` is the feature of node :math:`v`.
DGL provides built-in support for common readout operations. For example, DGL provides built-in support for common readout operations. For example,
:func:`dgl.readout_nodes` implements the above readout operation. :func:`dgl.mean_nodes` implements the above readout operation.
Once :math:`h_g` is available, one can pass it through an MLP layer for Once :math:`h_g` is available, one can pass it through an MLP layer for
classification output. classification output.
...@@ -132,10 +132,10 @@ readout result will be :math:`(B, D)`. ...@@ -132,10 +132,10 @@ readout result will be :math:`(B, D)`.
g1.ndata['h'] = torch.tensor([1., 2.]) g1.ndata['h'] = torch.tensor([1., 2.])
g2 = dgl.graph(([0, 1], [1, 2])) g2 = dgl.graph(([0, 1], [1, 2]))
g2.ndata['h'] = torch.tensor([1., 2., 3.]) g2.ndata['h'] = torch.tensor([1., 2., 3.])
dgl.readout_nodes(g1, 'h') dgl.readout_nodes(g1, 'h')
# tensor([3.]) # 1 + 2 # tensor([3.]) # 1 + 2
bg = dgl.batch([g1, g2]) bg = dgl.batch([g1, g2])
dgl.readout_nodes(bg, 'h') dgl.readout_nodes(bg, 'h')
# tensor([3., 6.]) # [1 + 2, 1 + 2 + 3] # tensor([3., 6.]) # [1 + 2, 1 + 2 + 3]
...@@ -164,7 +164,7 @@ Being aware of the above computation rules, one can define a model as follows. ...@@ -164,7 +164,7 @@ Being aware of the above computation rules, one can define a model as follows.
self.conv1 = dglnn.GraphConv(in_dim, hidden_dim) self.conv1 = dglnn.GraphConv(in_dim, hidden_dim)
self.conv2 = dglnn.GraphConv(hidden_dim, hidden_dim) self.conv2 = dglnn.GraphConv(hidden_dim, hidden_dim)
self.classify = nn.Linear(hidden_dim, n_classes) self.classify = nn.Linear(hidden_dim, n_classes)
def forward(self, g, h): def forward(self, g, h):
# Apply graph convolution and activation. # Apply graph convolution and activation.
h = F.relu(self.conv1(g, h)) h = F.relu(self.conv1(g, h))
...@@ -229,7 +229,7 @@ updating the model. ...@@ -229,7 +229,7 @@ updating the model.
opt.step() opt.step()
For an end-to-end example of graph classification, see For an end-to-end example of graph classification, see
`DGL's GIN example <https://github.com/dmlc/dgl/tree/master/examples/pytorch/gin>`__. `DGL's GIN example <https://github.com/dmlc/dgl/tree/master/examples/pytorch/gin>`__.
The training loop is inside the The training loop is inside the
function ``train`` in function ``train`` in
`main.py <https://github.com/dmlc/dgl/blob/master/examples/pytorch/gin/main.py>`__. `main.py <https://github.com/dmlc/dgl/blob/master/examples/pytorch/gin/main.py>`__.
...@@ -255,28 +255,28 @@ representations for each node type. ...@@ -255,28 +255,28 @@ representations for each node type.
class RGCN(nn.Module): class RGCN(nn.Module):
def __init__(self, in_feats, hid_feats, out_feats, rel_names): def __init__(self, in_feats, hid_feats, out_feats, rel_names):
super().__init__() super().__init__()
self.conv1 = dglnn.HeteroGraphConv({ self.conv1 = dglnn.HeteroGraphConv({
rel: dglnn.GraphConv(in_feats, hid_feats) rel: dglnn.GraphConv(in_feats, hid_feats)
for rel in rel_names}, aggregate='sum') for rel in rel_names}, aggregate='sum')
self.conv2 = dglnn.HeteroGraphConv({ self.conv2 = dglnn.HeteroGraphConv({
rel: dglnn.GraphConv(hid_feats, out_feats) rel: dglnn.GraphConv(hid_feats, out_feats)
for rel in rel_names}, aggregate='sum') for rel in rel_names}, aggregate='sum')
def forward(self, graph, inputs): def forward(self, graph, inputs):
# inputs is features of nodes # inputs is features of nodes
h = self.conv1(graph, inputs) h = self.conv1(graph, inputs)
h = {k: F.relu(v) for k, v in h.items()} h = {k: F.relu(v) for k, v in h.items()}
h = self.conv2(graph, h) h = self.conv2(graph, h)
return h return h
class HeteroClassifier(nn.Module): class HeteroClassifier(nn.Module):
def __init__(self, in_dim, hidden_dim, n_classes, rel_names): def __init__(self, in_dim, hidden_dim, n_classes, rel_names):
super().__init__() super().__init__()
self.rgcn = RGCN(in_dim, hidden_dim, hidden_dim, rel_names) self.rgcn = RGCN(in_dim, hidden_dim, hidden_dim, rel_names)
self.classify = nn.Linear(hidden_dim, n_classes) self.classify = nn.Linear(hidden_dim, n_classes)
def forward(self, g): def forward(self, g):
h = g.ndata['feat'] h = g.ndata['feat']
h = self.rgcn(g, h) h = self.rgcn(g, h)
......
...@@ -108,7 +108,7 @@ class Sequential(nn.Sequential): ...@@ -108,7 +108,7 @@ class Sequential(nn.Sequential):
Description Description
----------- -----------
A squential container for stacking graph neural network modules. A sequential container for stacking graph neural network modules.
DGL supports two modes: sequentially apply GNN modules on 1) the same graph or DGL supports two modes: sequentially apply GNN modules on 1) the same graph or
2) a list of given graphs. In the second case, the number of graphs equals the 2) a list of given graphs. In the second case, the number of graphs equals the
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment