Commit 378c2645 authored by Lingfan Yu's avatar Lingfan Yu Committed by Minjie Wang
Browse files

[Model] Fix GCN Normalization (#249)

* WIP

* lr -> 0.01

* new cora dataset

* normalization code

* minor format change

* normalization factor for deg bucket
parent 21255b65
...@@ -27,7 +27,9 @@ class NodeApplyModule(nn.Module): ...@@ -27,7 +27,9 @@ class NodeApplyModule(nn.Module):
self.activation = activation self.activation = activation
def forward(self, nodes): def forward(self, nodes):
h = self.linear(nodes.data['h']) # normalization by square root of dst degree
h = nodes.data['h'] * nodes.data['norm']
h = self.linear(h)
if self.activation: if self.activation:
h = self.activation(h) h = self.activation(h)
return {'h' : h} return {'h' : h}
...@@ -49,8 +51,10 @@ class GCN(nn.Module): ...@@ -49,8 +51,10 @@ class GCN(nn.Module):
else: else:
self.dropout = 0. self.dropout = 0.
self.layers = nn.ModuleList()
# input layer # input layer
self.layers = nn.ModuleList([NodeApplyModule(in_feats, n_hidden, activation)]) self.layers.append(NodeApplyModule(in_feats, n_hidden, activation))
# hidden layers # hidden layers
for i in range(n_layers - 1): for i in range(n_layers - 1):
...@@ -62,22 +66,34 @@ class GCN(nn.Module): ...@@ -62,22 +66,34 @@ class GCN(nn.Module):
def forward(self, features): def forward(self, features):
self.g.ndata['h'] = features self.g.ndata['h'] = features
for layer in self.layers: for idx, layer in enumerate(self.layers):
# apply dropout # apply dropout
if self.dropout: if idx > 0 and self.dropout:
self.g.apply_nodes(apply_node_func= self.g.ndata['h'] = self.dropout(self.g.ndata['h'])
lambda nodes: {'h': self.dropout(nodes.data['h'])}) # normalization by square root of src degree
self.g.ndata['h'] = self.g.ndata['h'] * self.g.ndata['norm']
self.g.update_all(gcn_msg, gcn_reduce, layer) self.g.update_all(gcn_msg, gcn_reduce, layer)
return self.g.ndata.pop('h') return self.g.ndata.pop('h')
def evaluate(model, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(features)
logits = logits[mask]
labels = labels[mask]
_, indices = torch.max(logits, dim=1)
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels)
def main(args): def main(args):
# load and preprocess dataset # load and preprocess dataset
# Todo: adjacency normalization
data = load_data(args) data = load_data(args)
features = torch.FloatTensor(data.features) features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels) labels = torch.LongTensor(data.labels)
mask = torch.ByteTensor(data.train_mask) train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1] in_feats = features.shape[1]
n_classes = data.num_labels n_classes = data.num_labels
n_edges = data.graph.number_of_edges() n_edges = data.graph.number_of_edges()
...@@ -89,10 +105,24 @@ def main(args): ...@@ -89,10 +105,24 @@ def main(args):
torch.cuda.set_device(args.gpu) torch.cuda.set_device(args.gpu)
features = features.cuda() features = features.cuda()
labels = labels.cuda() labels = labels.cuda()
mask = mask.cuda() train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# create GCN model # graph preprocess and calculate normalization factor
g = DGLGraph(data.graph) g = DGLGraph(data.graph)
n_edges = g.number_of_edges()
# add self loop
g.add_edges(g.nodes(), g.nodes())
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)
# create GCN model
model = GCN(g, model = GCN(g,
in_feats, in_feats,
args.n_hidden, args.n_hidden,
...@@ -105,17 +135,20 @@ def main(args): ...@@ -105,17 +135,20 @@ def main(args):
model.cuda() model.cuda()
# use optimizer # use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) optimizer = torch.optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
# initialize graph # initialize graph
dur = [] dur = []
for epoch in range(args.n_epochs): for epoch in range(args.n_epochs):
model.train()
if epoch >= 3: if epoch >= 3:
t0 = time.time() t0 = time.time()
# forward # forward
logits = model(features) logits = model(features)
logp = F.log_softmax(logits, 1) logp = F.log_softmax(logits, 1)
loss = F.nll_loss(logp[mask], labels[mask]) loss = F.nll_loss(logp[train_mask], labels[train_mask])
optimizer.zero_grad() optimizer.zero_grad()
loss.backward() loss.backward()
...@@ -124,24 +157,33 @@ def main(args): ...@@ -124,24 +157,33 @@ def main(args):
if epoch >= 3: if epoch >= 3:
dur.append(time.time() - t0) dur.append(time.time() - t0)
print("Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | ETputs(KTEPS) {:.2f}".format( acc = evaluate(model, features, labels, val_mask)
epoch, loss.item(), np.mean(dur), n_edges / np.mean(dur) / 1000)) print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(epoch, np.mean(dur), loss.item(),
acc, n_edges / np.mean(dur) / 1000))
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN') parser = argparse.ArgumentParser(description='GCN')
register_data_args(parser) register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0, parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability") help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1, parser.add_argument("--gpu", type=int, default=-1,
help="gpu") help="gpu")
parser.add_argument("--lr", type=float, default=1e-3, parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate") help="learning rate")
parser.add_argument("--n-epochs", type=int, default=20, parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs") help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16, parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units") help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1, parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers") help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
......
...@@ -18,15 +18,16 @@ from dgl.data import register_data_args, load_data ...@@ -18,15 +18,16 @@ from dgl.data import register_data_args, load_data
class NodeApplyModule(nn.Module): class NodeApplyModule(nn.Module):
def __init__(self, in_feats, out_feats, activation=None): def __init__(self, in_feats, out_feats, activation=None):
super(NodeApplyModule, self).__init__() super(NodeApplyModule, self).__init__()
self.linear = nn.Linear(in_feats, out_feats) self.linear = nn.Linear(in_feats, out_feats)
nn.init.xavier_normal_(self.linear.weight)
self.activation = activation self.activation = activation
def forward(self, nodes): def forward(self, nodes):
h = self.linear(nodes.data['h']) # normalization by square root of dst degree
h = nodes.data['h'] * nodes.data['norm']
h = self.linear(h)
if self.activation: if self.activation:
h = self.activation(h) h = self.activation(h)
return {'h': h} return {'h': h}
class GCN(nn.Module): class GCN(nn.Module):
...@@ -46,8 +47,10 @@ class GCN(nn.Module): ...@@ -46,8 +47,10 @@ class GCN(nn.Module):
else: else:
self.dropout = 0. self.dropout = 0.
self.layers = nn.ModuleList()
# input layer # input layer
self.layers = nn.ModuleList([NodeApplyModule(in_feats, n_hidden, activation)]) self.layers.append(NodeApplyModule(in_feats, n_hidden, activation))
# hidden layers # hidden layers
for i in range(n_layers - 1): for i in range(n_layers - 1):
...@@ -59,24 +62,35 @@ class GCN(nn.Module): ...@@ -59,24 +62,35 @@ class GCN(nn.Module):
def forward(self, features): def forward(self, features):
self.g.ndata['h'] = features self.g.ndata['h'] = features
for layer in self.layers: for idx, layer in enumerate(self.layers):
# apply dropout # apply dropout
if self.dropout: if idx > 0 and self.dropout:
self.g.apply_nodes(apply_node_func= self.g.ndata['h'] = self.dropout(self.g.ndata['h'])
lambda nodes: {'h': self.dropout(nodes.data['h'])}) # normalization by square root of src degree
self.g.ndata['h'] = self.g.ndata['h'] * self.g.ndata['norm']
self.g.update_all(fn.copy_src(src='h', out='m'), self.g.update_all(fn.copy_src(src='h', out='m'),
fn.sum(msg='m', out='h'), fn.sum(msg='m', out='h'),
layer) layer)
return self.g.pop_n_repr('h') return self.g.pop_n_repr('h')
def evaluate(model, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(features)
logits = logits[mask]
labels = labels[mask]
_, indices = torch.max(logits, dim=1)
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels)
def main(args): def main(args):
# load and preprocess dataset # load and preprocess dataset
# Todo: adjacency normalization
data = load_data(args) data = load_data(args)
features = torch.FloatTensor(data.features) features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels) labels = torch.LongTensor(data.labels)
mask = torch.ByteTensor(data.train_mask) train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1] in_feats = features.shape[1]
n_classes = data.num_labels n_classes = data.num_labels
n_edges = data.graph.number_of_edges() n_edges = data.graph.number_of_edges()
...@@ -88,10 +102,24 @@ def main(args): ...@@ -88,10 +102,24 @@ def main(args):
torch.cuda.set_device(args.gpu) torch.cuda.set_device(args.gpu)
features = features.cuda() features = features.cuda()
labels = labels.cuda() labels = labels.cuda()
mask = mask.cuda() train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# create GCN model # graph preprocess and calculate normalization factor
g = DGLGraph(data.graph) g = DGLGraph(data.graph)
n_edges = g.number_of_edges()
# add self loop
g.add_edges(g.nodes(), g.nodes())
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)
# create GCN model
model = GCN(g, model = GCN(g,
in_feats, in_feats,
args.n_hidden, args.n_hidden,
...@@ -104,17 +132,20 @@ def main(args): ...@@ -104,17 +132,20 @@ def main(args):
model.cuda() model.cuda()
# use optimizer # use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) optimizer = torch.optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
# initialize graph # initialize graph
dur = [] dur = []
for epoch in range(args.n_epochs): for epoch in range(args.n_epochs):
model.train()
if epoch >= 3: if epoch >= 3:
t0 = time.time() t0 = time.time()
# forward # forward
logits = model(features) logits = model(features)
logp = F.log_softmax(logits, 1) logp = F.log_softmax(logits, 1)
loss = F.nll_loss(logp[mask], labels[mask]) loss = F.nll_loss(logp[train_mask], labels[train_mask])
optimizer.zero_grad() optimizer.zero_grad()
loss.backward() loss.backward()
...@@ -123,24 +154,33 @@ def main(args): ...@@ -123,24 +154,33 @@ def main(args):
if epoch >= 3: if epoch >= 3:
dur.append(time.time() - t0) dur.append(time.time() - t0)
print("Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | ETputs(KTEPS) {:.2f}".format( acc = evaluate(model, features, labels, val_mask)
epoch, loss.item(), np.mean(dur), n_edges / np.mean(dur) / 1000)) print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(epoch, np.mean(dur), loss.item(),
acc, n_edges / np.mean(dur) / 1000))
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN') parser = argparse.ArgumentParser(description='GCN')
register_data_args(parser) register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0, parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability") help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1, parser.add_argument("--gpu", type=int, default=-1,
help="gpu") help="gpu")
parser.add_argument("--lr", type=float, default=1e-3, parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate") help="learning rate")
parser.add_argument("--n-epochs", type=int, default=20, parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs") help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16, parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units") help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1, parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers") help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
......
...@@ -15,7 +15,7 @@ import dgl ...@@ -15,7 +15,7 @@ import dgl
from .utils import download, extract_archive, get_download_dir, _get_dgl_url from .utils import download, extract_archive, get_download_dir, _get_dgl_url
_urls = { _urls = {
'cora' : 'dataset/cora.zip', 'cora' : 'dataset/cora_raw.zip',
'citeseer' : 'dataset/citeseer.zip', 'citeseer' : 'dataset/citeseer.zip',
'pubmed' : 'dataset/pubmed.zip', 'pubmed' : 'dataset/pubmed.zip',
'cora_binary' : 'dataset/cora_binary.zip', 'cora_binary' : 'dataset/cora_binary.zip',
...@@ -140,7 +140,7 @@ def _sample_mask(idx, l): ...@@ -140,7 +140,7 @@ def _sample_mask(idx, l):
return mask return mask
def load_cora(): def load_cora():
data = CitationGraphDataset('cora') data = CoraDataset()
return data return data
def load_citeseer(): def load_citeseer():
...@@ -164,7 +164,7 @@ class GCNSyntheticDataset(object): ...@@ -164,7 +164,7 @@ class GCNSyntheticDataset(object):
# generate graph # generate graph
self.graph = graph_generator(seed) self.graph = graph_generator(seed)
num_nodes = self.graph.number_of_nodes() num_nodes = self.graph.number_of_nodes()
# generate features # generate features
#self.features = rng.randn(num_nodes, num_feats).astype(np.float32) #self.features = rng.randn(num_nodes, num_feats).astype(np.float32)
self.features = np.zeros((num_nodes, num_feats), dtype=np.float32) self.features = np.zeros((num_nodes, num_feats), dtype=np.float32)
...@@ -335,3 +335,64 @@ class CoraBinary(object): ...@@ -335,3 +335,64 @@ class CoraBinary(object):
batched_pmpds = sp.block_diag(pmpds) batched_pmpds = sp.block_diag(pmpds)
batched_labels = np.concatenate(labels, axis=0) batched_labels = np.concatenate(labels, axis=0)
return batched_graphs, batched_pmpds, batched_labels return batched_graphs, batched_pmpds, batched_labels
class CoraDataset(object):
def __init__(self):
self.name = 'cora'
self.dir = get_download_dir()
self.zip_file_path='{}/{}.zip'.format(self.dir, self.name)
download(_get_dgl_url(_urls[self.name]), path=self.zip_file_path)
extract_archive(self.zip_file_path,
'{}/{}'.format(self.dir, self.name))
self._load()
def _load(self):
idx_features_labels = np.genfromtxt("{}/cora/cora.content".
format(self.dir),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1],
dtype=np.float32)
labels = _encode_onehot(idx_features_labels[:, -1])
self.num_labels = labels.shape[1]
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}/cora/cora.cites".format(self.dir),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]),
(edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
self.graph = nx.from_scipy_sparse_matrix(adj, create_using=nx.DiGraph())
features = _normalize(features)
self.features = np.array(features.todense())
self.labels = np.where(labels)[1]
self.train_mask = _sample_mask(range(140), labels.shape[0])
self.val_mask = _sample_mask(range(200, 500), labels.shape[0])
self.test_mask = _sample_mask(range(500, 1500), labels.shape[0])
def _normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = np.inf
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def _encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment