sgc.py 4.01 KB
Newer Older
1
2
3
4
5
6
7
"""
This code was modified from the GCN implementation in DGL examples.
Simplifying Graph Convolutional Networks
Paper: https://arxiv.org/abs/1902.07153
Code: https://github.com/Tiiiger/SGC
SGC implementation in DGL.
"""
8
9
10
11
import argparse
import math
import time

12
import mxnet as mx
13
14
import numpy as np
from mxnet import gluon, nd
15
from mxnet.gluon import nn
16

17
import dgl
18
19
from dgl.data import (CiteseerGraphDataset, CoraGraphDataset,
                      PubmedGraphDataset, register_data_args)
20
21
22
23
24
25
26
27
from dgl.nn.mxnet.conv import SGConv


def evaluate(model, g, features, labels, mask):
    pred = model(g, features).argmax(axis=1)
    accuracy = ((pred == labels) * mask).sum() / mask.sum().asscalar()
    return accuracy.asscalar()

28

29
30
def main(args):
    # load and preprocess dataset
31
    if args.dataset == "cora":
32
        data = CoraGraphDataset()
33
    elif args.dataset == "citeseer":
34
        data = CiteseerGraphDataset()
35
    elif args.dataset == "pubmed":
36
37
        data = PubmedGraphDataset()
    else:
38
        raise ValueError("Unknown dataset: {}".format(args.dataset))
39
40
41
42
43
44
45
46

    g = data[0]
    if args.gpu < 0:
        cuda = False
        ctx = mx.cpu(0)
    else:
        cuda = True
        ctx = mx.gpu(args.gpu)
47
        g = g.int().to(ctx)
48

49
50
51
52
53
    features = g.ndata["feat"]
    labels = mx.nd.array(g.ndata["label"], dtype="float32", ctx=ctx)
    train_mask = g.ndata["train_mask"]
    val_mask = g.ndata["val_mask"]
    test_mask = g.ndata["test_mask"]
54
55
56
    in_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()
57
58
    print(
        """----Data statistics------'
59
60
61
62
      #Edges %d
      #Classes %d
      #Train samples %d
      #Val samples %d
63
64
65
66
67
68
69
70
71
      #Test samples %d"""
        % (
            n_edges,
            n_classes,
            train_mask.sum().asscalar(),
            val_mask.sum().asscalar(),
            test_mask.sum().asscalar(),
        )
    )
72
73

    # add self loop
74
75
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
76
77

    # create SGC model
78
    model = SGConv(in_feats, n_classes, k=2, cached=True, bias=args.bias)
79
80
81
82
83
84
85

    model.initialize(ctx=ctx)
    n_train_samples = train_mask.sum().asscalar()
    loss_fcn = gluon.loss.SoftmaxCELoss()

    # use optimizer
    print(model.collect_params())
86
87
88
89
90
    trainer = gluon.Trainer(
        model.collect_params(),
        "adam",
        {"learning_rate": args.lr, "wd": args.weight_decay},
    )
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109

    # initialize graph
    dur = []
    for epoch in range(args.n_epochs):
        if epoch >= 3:
            t0 = time.time()
        # forward
        with mx.autograd.record():
            pred = model(g, features)
            loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
            loss = loss.sum() / n_train_samples

        loss.backward()
        trainer.step(batch_size=1)

        if epoch >= 3:
            loss.asscalar()
            dur.append(time.time() - t0)
            acc = evaluate(model, g, features, labels, val_mask)
110
111
112
113
114
115
116
117
118
119
            print(
                "Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
                "ETputs(KTEPS) {:.2f}".format(
                    epoch,
                    np.mean(dur),
                    loss.asscalar(),
                    acc,
                    n_edges / np.mean(dur) / 1000,
                )
            )
120
121
122
123
124
125

    # test set accuracy
    acc = evaluate(model, g, features, labels, test_mask)
    print("Test accuracy {:.2%}".format(acc))


126
127
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="SGC")
128
    register_data_args(parser)
129
130
131
132
133
134
135
136
137
138
139
    parser.add_argument("--gpu", type=int, default=-1, help="gpu")
    parser.add_argument("--lr", type=float, default=0.2, help="learning rate")
    parser.add_argument(
        "--bias", action="store_true", default=False, help="flag to use bias"
    )
    parser.add_argument(
        "--n-epochs", type=int, default=100, help="number of training epochs"
    )
    parser.add_argument(
        "--weight-decay", type=float, default=5e-6, help="Weight for L2 loss"
    )
140
141
142
    args = parser.parse_args()
    print(args)

143
    main(args)