Unverified Commit 6bc92068 authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by GitHub
Browse files

[Doc] fix minibatch user guide broken code (#2233)

parent 6b0d42db
...@@ -188,7 +188,7 @@ classification/regression. ...@@ -188,7 +188,7 @@ classification/regression.
.. code:: python .. code:: python
class StochasticTwoLayerRGCN(nn.Module): class StochasticTwoLayerRGCN(nn.Module):
def __init__(self, in_feat, hidden_feat, out_feat): def __init__(self, in_feat, hidden_feat, out_feat, rel_names):
super().__init__() super().__init__()
self.conv1 = dglnn.HeteroGraphConv({ self.conv1 = dglnn.HeteroGraphConv({
rel : dglnn.GraphConv(in_feat, hidden_feat, norm='right') rel : dglnn.GraphConv(in_feat, hidden_feat, norm='right')
...@@ -226,6 +226,18 @@ over the edge types for :meth:`~dgl.DGLHeteroGraph.apply_edges`. ...@@ -226,6 +226,18 @@ over the edge types for :meth:`~dgl.DGLHeteroGraph.apply_edges`.
edge_subgraph.apply_edges(self.apply_edges, etype=etype) edge_subgraph.apply_edges(self.apply_edges, etype=etype)
return edge_subgraph.edata['score'] return edge_subgraph.edata['score']
class Model(nn.Module):
def __init__(self, in_features, hidden_features, out_features, num_classes,
etypes):
super().__init__()
self.rgcn = StochasticTwoLayerRGCN(
in_features, hidden_features, out_features, etypes)
self.pred = ScorePredictor(num_classes, out_features)
def forward(self, edge_subgraph, blocks, x):
x = self.rgcn(blocks, x)
return self.pred(edge_subgraph, x)
Data loader definition is also very similar to that of node Data loader definition is also very similar to that of node
classification. The only difference is that you need classification. The only difference is that you need
:class:`~dgl.dataloading.pytorch.EdgeDataLoader` instead of :class:`~dgl.dataloading.pytorch.EdgeDataLoader` instead of
...@@ -279,7 +291,7 @@ dictionaries of node types and predictions here. ...@@ -279,7 +291,7 @@ dictionaries of node types and predictions here.
.. code:: python .. code:: python
model = Model(in_features, hidden_features, out_features, num_classes) model = Model(in_features, hidden_features, out_features, num_classes, etypes)
model = model.cuda() model = model.cuda()
opt = torch.optim.Adam(model.parameters()) opt = torch.optim.Adam(model.parameters())
......
...@@ -146,7 +146,7 @@ above. ...@@ -146,7 +146,7 @@ above.
positive_graph = positive_graph.to(torch.device('cuda')) positive_graph = positive_graph.to(torch.device('cuda'))
negative_graph = negative_graph.to(torch.device('cuda')) negative_graph = negative_graph.to(torch.device('cuda'))
input_features = blocks[0].srcdata['features'] input_features = blocks[0].srcdata['features']
pos_score, neg_score = model(positive_graph, blocks, input_features) pos_score, neg_score = model(positive_graph, negative_graph, blocks, input_features)
loss = compute_loss(pos_score, neg_score) loss = compute_loss(pos_score, neg_score)
opt.zero_grad() opt.zero_grad()
loss.backward() loss.backward()
...@@ -166,7 +166,7 @@ classification/regression. ...@@ -166,7 +166,7 @@ classification/regression.
.. code:: python .. code:: python
class StochasticTwoLayerRGCN(nn.Module): class StochasticTwoLayerRGCN(nn.Module):
def __init__(self, in_feat, hidden_feat, out_feat): def __init__(self, in_feat, hidden_feat, out_feat, rel_names):
super().__init__() super().__init__()
self.conv1 = dglnn.HeteroGraphConv({ self.conv1 = dglnn.HeteroGraphConv({
rel : dglnn.GraphConv(in_feat, hidden_feat, norm='right') rel : dglnn.GraphConv(in_feat, hidden_feat, norm='right')
...@@ -197,6 +197,20 @@ over the edge types for :meth:`dgl.DGLHeteroGraph.apply_edges`. ...@@ -197,6 +197,20 @@ over the edge types for :meth:`dgl.DGLHeteroGraph.apply_edges`.
dgl.function.u_dot_v('x', 'x', 'score'), etype=etype) dgl.function.u_dot_v('x', 'x', 'score'), etype=etype)
return edge_subgraph.edata['score'] return edge_subgraph.edata['score']
class Model(nn.Module):
def __init__(self, in_features, hidden_features, out_features, num_classes,
etypes):
super().__init__()
self.rgcn = StochasticTwoLayerRGCN(
in_features, hidden_features, out_features, etypes)
self.pred = ScorePredictor()
def forward(self, positive_graph, negative_graph, blocks, x):
x = self.rgcn(blocks, x)
pos_score = self.pred(positive_graph, x)
neg_score = self.pred(negative_graph, x)
return pos_score, neg_score
Data loader definition is also very similar to that of edge Data loader definition is also very similar to that of edge
classification/regression. The only difference is that you need to give classification/regression. The only difference is that you need to give
the negative sampler and you will be supplying a dictionary of edge the negative sampler and you will be supplying a dictionary of edge
...@@ -252,7 +266,7 @@ dictionaries of node types and predictions here. ...@@ -252,7 +266,7 @@ dictionaries of node types and predictions here.
.. code:: python .. code:: python
model = Model(in_features, hidden_features, out_features, num_classes) model = Model(in_features, hidden_features, out_features, num_classes, etypes)
model = model.cuda() model = model.cuda()
opt = torch.optim.Adam(model.parameters()) opt = torch.optim.Adam(model.parameters())
...@@ -261,9 +275,8 @@ dictionaries of node types and predictions here. ...@@ -261,9 +275,8 @@ dictionaries of node types and predictions here.
positive_graph = positive_graph.to(torch.device('cuda')) positive_graph = positive_graph.to(torch.device('cuda'))
negative_graph = negative_graph.to(torch.device('cuda')) negative_graph = negative_graph.to(torch.device('cuda'))
input_features = blocks[0].srcdata['features'] input_features = blocks[0].srcdata['features']
edge_labels = edge_subgraph.edata['labels'] pos_score, neg_score = model(positive_graph, negative_graph, blocks, input_features)
edge_predictions = model(edge_subgraph, blocks, input_features) loss = compute_loss(pos_score, neg_score)
loss = compute_loss(edge_labels, edge_predictions)
opt.zero_grad() opt.zero_grad()
loss.backward() loss.backward()
opt.step() opt.step()
......
...@@ -184,7 +184,7 @@ removed for simplicity): ...@@ -184,7 +184,7 @@ removed for simplicity):
.. code:: python .. code:: python
class StochasticTwoLayerRGCN(nn.Module): class StochasticTwoLayerRGCN(nn.Module):
def __init__(self, in_feat, hidden_feat, out_feat): def __init__(self, in_feat, hidden_feat, out_feat, rel_names):
super().__init__() super().__init__()
self.conv1 = dglnn.HeteroGraphConv({ self.conv1 = dglnn.HeteroGraphConv({
rel : dglnn.GraphConv(in_feat, hidden_feat, norm='right') rel : dglnn.GraphConv(in_feat, hidden_feat, norm='right')
...@@ -224,7 +224,7 @@ dictionaries of node types and predictions here. ...@@ -224,7 +224,7 @@ dictionaries of node types and predictions here.
.. code:: python .. code:: python
model = StochasticTwoLayerRGCN(in_features, hidden_features, out_features) model = StochasticTwoLayerRGCN(in_features, hidden_features, out_features, etypes)
model = model.cuda() model = model.cuda()
opt = torch.optim.Adam(model.parameters()) opt = torch.optim.Adam(model.parameters())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment