main.py 5.96 KB
Newer Older
1
2
3
4
5
6
7
8
"""
Inductive Representation Learning on Large Graphs
Paper: http://papers.nips.cc/paper/6703-inductive-representation-learning-on-large-graphs.pdf
Code: https://github.com/williamleif/graphsage-simple
Simple reference implementation of GraphSAGE.
"""
import argparse
import time
9

10
import mxnet as mx
11
12
13
import networkx as nx
import numpy as np
from mxnet import gluon, nd
14
from mxnet.gluon import nn
15

16
import dgl
17
18
from dgl.data import (CiteseerGraphDataset, CoraGraphDataset,
                      PubmedGraphDataset, register_data_args)
19
20
21
22
from dgl.nn.mxnet.conv import SAGEConv


class GraphSAGE(nn.Block):
23
24
25
26
27
28
29
30
31
32
33
    def __init__(
        self,
        g,
        in_feats,
        n_hidden,
        n_classes,
        n_layers,
        activation,
        dropout,
        aggregator_type,
    ):
34
35
36
37
38
39
        super(GraphSAGE, self).__init__()
        self.g = g

        with self.name_scope():
            self.layers = nn.Sequential()
            # input layer
40
41
42
43
44
45
46
47
48
            self.layers.add(
                SAGEConv(
                    in_feats,
                    n_hidden,
                    aggregator_type,
                    feat_drop=dropout,
                    activation=activation,
                )
            )
49
50
            # hidden layers
            for i in range(n_layers - 1):
51
52
53
54
55
56
57
58
59
                self.layers.add(
                    SAGEConv(
                        n_hidden,
                        n_hidden,
                        aggregator_type,
                        feat_drop=dropout,
                        activation=activation,
                    )
                )
60
            # output layer
61
62
63
64
65
66
67
68
69
            self.layers.add(
                SAGEConv(
                    n_hidden,
                    n_classes,
                    aggregator_type,
                    feat_drop=dropout,
                    activation=None,
                )
            )  # activation None
70
71
72
73
74
75
76

    def forward(self, features):
        h = features
        for layer in self.layers:
            h = layer(self.g, h)
        return h

77

78
79
80
81
82
def evaluate(model, features, labels, mask):
    pred = model(features).argmax(axis=1)
    accuracy = ((pred == labels) * mask).sum() / mask.sum().asscalar()
    return accuracy.asscalar()

83

84
85
def main(args):
    # load and preprocess dataset
86
    if args.dataset == "cora":
87
        data = CoraGraphDataset()
88
    elif args.dataset == "citeseer":
89
        data = CiteseerGraphDataset()
90
    elif args.dataset == "pubmed":
91
92
        data = PubmedGraphDataset()
    else:
93
        raise ValueError("Unknown dataset: {}".format(args.dataset))
94
95
96
97
98
99
100
101

    g = data[0]
    if args.gpu < 0:
        cuda = False
        ctx = mx.cpu(0)
    else:
        cuda = True
        ctx = mx.gpu(args.gpu)
102
        g = g.int().to(ctx)
103

104
105
106
107
108
    features = g.ndata["feat"]
    labels = mx.nd.array(g.ndata["label"], dtype="float32", ctx=ctx)
    train_mask = g.ndata["train_mask"]
    val_mask = g.ndata["val_mask"]
    test_mask = g.ndata["test_mask"]
109
110
111
    in_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()
112
113
    print(
        """----Data statistics------'
114
115
116
117
      #Edges %d
      #Classes %d
      #Train samples %d
      #Val samples %d
118
119
120
121
122
123
124
125
126
      #Test samples %d"""
        % (
            n_edges,
            n_classes,
            train_mask.sum().asscalar(),
            val_mask.sum().asscalar(),
            test_mask.sum().asscalar(),
        )
    )
127

128
129
130
    # add self loop
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
131
132
133
    n_edges = g.number_of_edges()

    # create GraphSAGE model
134
135
136
137
138
139
140
141
142
143
    model = GraphSAGE(
        g,
        in_feats,
        args.n_hidden,
        n_classes,
        args.n_layers,
        nd.relu,
        args.dropout,
        args.aggregator_type,
    )
144
145
146
147
148
149

    model.initialize(ctx=ctx)
    n_train_samples = train_mask.sum().asscalar()
    loss_fcn = gluon.loss.SoftmaxCELoss()

    print(model.collect_params())
150
151
152
153
154
    trainer = gluon.Trainer(
        model.collect_params(),
        "adam",
        {"learning_rate": args.lr, "wd": args.weight_decay},
    )
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

    # initialize graph
    dur = []
    for epoch in range(args.n_epochs):
        if epoch >= 3:
            t0 = time.time()
        # forward
        with mx.autograd.record():
            pred = model(features)
            loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
            loss = loss.sum() / n_train_samples

        loss.backward()
        trainer.step(batch_size=1)

        if epoch >= 3:
            loss.asscalar()
            dur.append(time.time() - t0)
            acc = evaluate(model, features, labels, val_mask)
174
175
176
177
178
179
180
181
182
183
            print(
                "Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
                "ETputs(KTEPS) {:.2f}".format(
                    epoch,
                    np.mean(dur),
                    loss.asscalar(),
                    acc,
                    n_edges / np.mean(dur) / 1000,
                )
            )
184
185
186
187
188
189

    # test set accuracy
    acc = evaluate(model, features, labels, test_mask)
    print("Test accuracy {:.2%}".format(acc))


190
191
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="GraphSAGE")
192
    register_data_args(parser)
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    parser.add_argument(
        "--dropout", type=float, default=0.5, help="dropout probability"
    )
    parser.add_argument("--gpu", type=int, default=-1, help="gpu")
    parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
    parser.add_argument(
        "--n-epochs", type=int, default=200, help="number of training epochs"
    )
    parser.add_argument(
        "--n-hidden", type=int, default=16, help="number of hidden gcn units"
    )
    parser.add_argument(
        "--n-layers", type=int, default=1, help="number of hidden gcn layers"
    )
    parser.add_argument(
        "--weight-decay", type=float, default=5e-4, help="Weight for L2 loss"
    )
    parser.add_argument(
        "--aggregator-type",
        type=str,
        default="gcn",
        help="Aggregator type: mean/gcn/pool/lstm",
    )
216
217
218
    args = parser.parse_args()
    print(args)

219
    main(args)