"vscode:/vscode.git/clone" did not exist on "b02e2113ff4625100a4412abd1ae0392ee415364"
Commit c87564d5 authored by Lingfan Yu's avatar Lingfan Yu
Browse files

minor bug fixes

parent 2daba976
...@@ -126,6 +126,7 @@ def main(args): ...@@ -126,6 +126,7 @@ def main(args):
# convert labels and masks to tensor # convert labels and masks to tensor
labels = torch.FloatTensor(y_train) labels = torch.FloatTensor(y_train)
mask = torch.FloatTensor(train_mask.astype(np.float32)) mask = torch.FloatTensor(train_mask.astype(np.float32))
n_train = torch.sum(mask)
for epoch in range(args.epochs): for epoch in range(args.epochs):
# reset grad # reset grad
...@@ -141,7 +142,7 @@ def main(args): ...@@ -141,7 +142,7 @@ def main(args):
# masked cross entropy loss # masked cross entropy loss
# TODO: (lingfan) use gather to speed up # TODO: (lingfan) use gather to speed up
logp = F.log_softmax(logits, 1) logp = F.log_softmax(logits, 1)
loss = torch.mean(logp * labels * mask.view(-1, 1)) loss = -torch.sum(logp * labels * mask.view(-1, 1)) / n_train
print("epoch {} loss: {}".format(epoch, loss.item())) print("epoch {} loss: {}".format(epoch, loss.item()))
loss.backward() loss.backward()
......
...@@ -16,9 +16,10 @@ class NodeUpdateModule(nn.Module): ...@@ -16,9 +16,10 @@ class NodeUpdateModule(nn.Module):
def forward(self, node, msgs): def forward(self, node, msgs):
h = node['h'] h = node['h']
# (lingfan): how to write dropout, is the following correct?
if self.p is not None: if self.p is not None:
h = F.dropout(h, p=self.p) h = F.dropout(h, p=self.p)
# aggregator messages # aggregate messages
for msg in msgs: for msg in msgs:
h += msg h += msg
h = self.linear(h) h = self.linear(h)
...@@ -29,7 +30,7 @@ class NodeUpdateModule(nn.Module): ...@@ -29,7 +30,7 @@ class NodeUpdateModule(nn.Module):
class GCN(nn.Module): class GCN(nn.Module):
def __init__(self, input_dim, num_hidden, num_classes, num_layers, activation, dropout): def __init__(self, input_dim, num_hidden, num_classes, num_layers, activation, dropout=None, output_projection=True):
super(GCN, self).__init__() super(GCN, self).__init__()
self.layers = nn.ModuleList() self.layers = nn.ModuleList()
# hidden layers # hidden layers
...@@ -39,6 +40,7 @@ class GCN(nn.Module): ...@@ -39,6 +40,7 @@ class GCN(nn.Module):
NodeUpdateModule(last_dim, num_hidden, act=activation, p=dropout)) NodeUpdateModule(last_dim, num_hidden, act=activation, p=dropout))
last_dim = num_hidden last_dim = num_hidden
# output layer # output layer
if output_projection:
self.layers.append(NodeUpdateModule(num_hidden, num_classes, p=dropout)) self.layers.append(NodeUpdateModule(num_hidden, num_classes, p=dropout))
def forward(self, g): def forward(self, g):
...@@ -72,6 +74,7 @@ def main(args): ...@@ -72,6 +74,7 @@ def main(args):
# convert labels and masks to tensor # convert labels and masks to tensor
labels = torch.FloatTensor(y_train) labels = torch.FloatTensor(y_train)
mask = torch.FloatTensor(train_mask.astype(np.float32)) mask = torch.FloatTensor(train_mask.astype(np.float32))
n_train = torch.sum(mask)
for epoch in range(args.epochs): for epoch in range(args.epochs):
# reset grad # reset grad
...@@ -87,7 +90,7 @@ def main(args): ...@@ -87,7 +90,7 @@ def main(args):
# masked cross entropy loss # masked cross entropy loss
# TODO: (lingfan) use gather to speed up # TODO: (lingfan) use gather to speed up
logp = F.log_softmax(logits, 1) logp = F.log_softmax(logits, 1)
loss = torch.mean(logp * labels * mask.view(-1, 1)) loss = -torch.sum(logp * labels * mask.view(-1, 1)) / n_train
print("epoch {} loss: {}".format(epoch, loss.item())) print("epoch {} loss: {}".format(epoch, loss.item()))
loss.backward() loss.backward()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment