main.py 5.96 KB
Newer Older
1
2
3
4
5
6
7
8
"""
Inductive Representation Learning on Large Graphs
Paper: http://papers.nips.cc/paper/6703-inductive-representation-learning-on-large-graphs.pdf
Code: https://github.com/williamleif/graphsage-simple
Simple reference implementation of GraphSAGE.
"""
import argparse
import time
9

Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
10
11
import dgl

12
import mxnet as mx
13
14
import networkx as nx
import numpy as np
Hongzhi (Steve), Chen's avatar
Hongzhi (Steve), Chen committed
15
16
17
18
19
20
21
from dgl.data import (
    CiteseerGraphDataset,
    CoraGraphDataset,
    PubmedGraphDataset,
    register_data_args,
)
from dgl.nn.mxnet.conv import SAGEConv
22
from mxnet import gluon, nd
23
from mxnet.gluon import nn
24

25
26

class GraphSAGE(nn.Block):
27
28
29
30
31
32
33
34
35
36
37
    def __init__(
        self,
        g,
        in_feats,
        n_hidden,
        n_classes,
        n_layers,
        activation,
        dropout,
        aggregator_type,
    ):
38
39
40
41
42
43
        super(GraphSAGE, self).__init__()
        self.g = g

        with self.name_scope():
            self.layers = nn.Sequential()
            # input layer
44
45
46
47
48
49
50
51
52
            self.layers.add(
                SAGEConv(
                    in_feats,
                    n_hidden,
                    aggregator_type,
                    feat_drop=dropout,
                    activation=activation,
                )
            )
53
54
            # hidden layers
            for i in range(n_layers - 1):
55
56
57
58
59
60
61
62
63
                self.layers.add(
                    SAGEConv(
                        n_hidden,
                        n_hidden,
                        aggregator_type,
                        feat_drop=dropout,
                        activation=activation,
                    )
                )
64
            # output layer
65
66
67
68
69
70
71
72
73
            self.layers.add(
                SAGEConv(
                    n_hidden,
                    n_classes,
                    aggregator_type,
                    feat_drop=dropout,
                    activation=None,
                )
            )  # activation None
74
75
76
77
78
79
80

    def forward(self, features):
        h = features
        for layer in self.layers:
            h = layer(self.g, h)
        return h

81

82
83
84
85
86
def evaluate(model, features, labels, mask):
    pred = model(features).argmax(axis=1)
    accuracy = ((pred == labels) * mask).sum() / mask.sum().asscalar()
    return accuracy.asscalar()

87

88
89
def main(args):
    # load and preprocess dataset
90
    if args.dataset == "cora":
91
        data = CoraGraphDataset()
92
    elif args.dataset == "citeseer":
93
        data = CiteseerGraphDataset()
94
    elif args.dataset == "pubmed":
95
96
        data = PubmedGraphDataset()
    else:
97
        raise ValueError("Unknown dataset: {}".format(args.dataset))
98
99
100
101
102
103
104
105

    g = data[0]
    if args.gpu < 0:
        cuda = False
        ctx = mx.cpu(0)
    else:
        cuda = True
        ctx = mx.gpu(args.gpu)
106
        g = g.int().to(ctx)
107

108
109
110
111
112
    features = g.ndata["feat"]
    labels = mx.nd.array(g.ndata["label"], dtype="float32", ctx=ctx)
    train_mask = g.ndata["train_mask"]
    val_mask = g.ndata["val_mask"]
    test_mask = g.ndata["test_mask"]
113
114
115
    in_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()
116
117
    print(
        """----Data statistics------'
118
119
120
121
      #Edges %d
      #Classes %d
      #Train samples %d
      #Val samples %d
122
123
124
125
126
127
128
129
130
      #Test samples %d"""
        % (
            n_edges,
            n_classes,
            train_mask.sum().asscalar(),
            val_mask.sum().asscalar(),
            test_mask.sum().asscalar(),
        )
    )
131

132
133
134
    # add self loop
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
135
136
137
    n_edges = g.number_of_edges()

    # create GraphSAGE model
138
139
140
141
142
143
144
145
146
147
    model = GraphSAGE(
        g,
        in_feats,
        args.n_hidden,
        n_classes,
        args.n_layers,
        nd.relu,
        args.dropout,
        args.aggregator_type,
    )
148
149
150
151
152
153

    model.initialize(ctx=ctx)
    n_train_samples = train_mask.sum().asscalar()
    loss_fcn = gluon.loss.SoftmaxCELoss()

    print(model.collect_params())
154
155
156
157
158
    trainer = gluon.Trainer(
        model.collect_params(),
        "adam",
        {"learning_rate": args.lr, "wd": args.weight_decay},
    )
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177

    # initialize graph
    dur = []
    for epoch in range(args.n_epochs):
        if epoch >= 3:
            t0 = time.time()
        # forward
        with mx.autograd.record():
            pred = model(features)
            loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
            loss = loss.sum() / n_train_samples

        loss.backward()
        trainer.step(batch_size=1)

        if epoch >= 3:
            loss.asscalar()
            dur.append(time.time() - t0)
            acc = evaluate(model, features, labels, val_mask)
178
179
180
181
182
183
184
185
186
187
            print(
                "Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
                "ETputs(KTEPS) {:.2f}".format(
                    epoch,
                    np.mean(dur),
                    loss.asscalar(),
                    acc,
                    n_edges / np.mean(dur) / 1000,
                )
            )
188
189
190
191
192
193

    # test set accuracy
    acc = evaluate(model, features, labels, test_mask)
    print("Test accuracy {:.2%}".format(acc))


194
195
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="GraphSAGE")
196
    register_data_args(parser)
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
    parser.add_argument(
        "--dropout", type=float, default=0.5, help="dropout probability"
    )
    parser.add_argument("--gpu", type=int, default=-1, help="gpu")
    parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
    parser.add_argument(
        "--n-epochs", type=int, default=200, help="number of training epochs"
    )
    parser.add_argument(
        "--n-hidden", type=int, default=16, help="number of hidden gcn units"
    )
    parser.add_argument(
        "--n-layers", type=int, default=1, help="number of hidden gcn layers"
    )
    parser.add_argument(
        "--weight-decay", type=float, default=5e-4, help="Weight for L2 loss"
    )
    parser.add_argument(
        "--aggregator-type",
        type=str,
        default="gcn",
        help="Aggregator type: mean/gcn/pool/lstm",
    )
220
221
222
    args = parser.parse_args()
    print(args)

223
    main(args)