sgc.py 4.01 KB
Newer Older
1
2
3
4
5
6
7
"""
This code was modified from the GCN implementation in DGL examples.
Simplifying Graph Convolutional Networks
Paper: https://arxiv.org/abs/1902.07153
Code: https://github.com/Tiiiger/SGC
SGC implementation in DGL.
"""
8
9
10
11
import argparse
import math
import time

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
12
13
import dgl

14
import mxnet as mx
15
import numpy as np
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
16
17
18
19
20
21
22
from dgl.data import (
    CiteseerGraphDataset,
    CoraGraphDataset,
    PubmedGraphDataset,
    register_data_args,
)
from dgl.nn.mxnet.conv import SGConv
23
from mxnet import gluon, nd
24
from mxnet.gluon import nn
25

26
27
28
29
30
31

def evaluate(model, g, features, labels, mask):
    pred = model(g, features).argmax(axis=1)
    accuracy = ((pred == labels) * mask).sum() / mask.sum().asscalar()
    return accuracy.asscalar()

32

33
34
def main(args):
    # load and preprocess dataset
35
    if args.dataset == "cora":
36
        data = CoraGraphDataset()
37
    elif args.dataset == "citeseer":
38
        data = CiteseerGraphDataset()
39
    elif args.dataset == "pubmed":
40
41
        data = PubmedGraphDataset()
    else:
42
        raise ValueError("Unknown dataset: {}".format(args.dataset))
43
44
45
46
47
48
49
50

    g = data[0]
    if args.gpu < 0:
        cuda = False
        ctx = mx.cpu(0)
    else:
        cuda = True
        ctx = mx.gpu(args.gpu)
51
        g = g.int().to(ctx)
52

53
54
55
56
57
    features = g.ndata["feat"]
    labels = mx.nd.array(g.ndata["label"], dtype="float32", ctx=ctx)
    train_mask = g.ndata["train_mask"]
    val_mask = g.ndata["val_mask"]
    test_mask = g.ndata["test_mask"]
58
59
60
    in_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()
61
62
    print(
        """----Data statistics------'
63
64
65
66
      #Edges %d
      #Classes %d
      #Train samples %d
      #Val samples %d
67
68
69
70
71
72
73
74
75
      #Test samples %d"""
        % (
            n_edges,
            n_classes,
            train_mask.sum().asscalar(),
            val_mask.sum().asscalar(),
            test_mask.sum().asscalar(),
        )
    )
76
77

    # add self loop
78
79
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
80
81

    # create SGC model
82
    model = SGConv(in_feats, n_classes, k=2, cached=True, bias=args.bias)
83
84
85
86
87
88
89

    model.initialize(ctx=ctx)
    n_train_samples = train_mask.sum().asscalar()
    loss_fcn = gluon.loss.SoftmaxCELoss()

    # use optimizer
    print(model.collect_params())
90
91
92
93
94
    trainer = gluon.Trainer(
        model.collect_params(),
        "adam",
        {"learning_rate": args.lr, "wd": args.weight_decay},
    )
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113

    # initialize graph
    dur = []
    for epoch in range(args.n_epochs):
        if epoch >= 3:
            t0 = time.time()
        # forward
        with mx.autograd.record():
            pred = model(g, features)
            loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
            loss = loss.sum() / n_train_samples

        loss.backward()
        trainer.step(batch_size=1)

        if epoch >= 3:
            loss.asscalar()
            dur.append(time.time() - t0)
            acc = evaluate(model, g, features, labels, val_mask)
114
115
116
117
118
119
120
121
122
123
            print(
                "Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
                "ETputs(KTEPS) {:.2f}".format(
                    epoch,
                    np.mean(dur),
                    loss.asscalar(),
                    acc,
                    n_edges / np.mean(dur) / 1000,
                )
            )
124
125
126
127
128
129

    # test set accuracy
    acc = evaluate(model, g, features, labels, test_mask)
    print("Test accuracy {:.2%}".format(acc))


130
131
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="SGC")
132
    register_data_args(parser)
133
134
135
136
137
138
139
140
141
142
143
    parser.add_argument("--gpu", type=int, default=-1, help="gpu")
    parser.add_argument("--lr", type=float, default=0.2, help="learning rate")
    parser.add_argument(
        "--bias", action="store_true", default=False, help="flag to use bias"
    )
    parser.add_argument(
        "--n-epochs", type=int, default=100, help="number of training epochs"
    )
    parser.add_argument(
        "--weight-decay", type=float, default=5e-6, help="Weight for L2 loss"
    )
144
145
146
    args = parser.parse_args()
    print(args)

147
    main(args)