node_classification.py 13.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
"""
This script trains and tests a GraphSAGE model for node classification
on large graphs using GraphBolt dataloader.

Paper: [Inductive Representation Learning on Large Graphs]
(https://arxiv.org/abs/1706.02216)

Unlike previous dgl examples, we've utilized the newly defined dataloader
from GraphBolt. This example will help you grasp how to build an end-to-end
training pipeline using GraphBolt.

Before reading this example, please familiar yourself with graphsage node
classification by reading the example in the
`examples/core/graphsage/node_classification.py`. This introduction,
[A Blitz Introduction to Node Classification with DGL]
(https://docs.dgl.ai/tutorials/blitz/1_introduction.html), might be helpful.

If you want to train graphsage on a large graph in a distributed fashion,
please read the example in the `examples/distributed/graphsage/`.

This flowchart describes the main functional sequence of the provided example:
main

├───> OnDiskDataset pre-processing

├───> Instantiate SAGE model

├───> train
│     │
│     ├───> Get graphbolt dataloader (HIGHLIGHT)
│     │
│     └───> Training loop
│           │
│           ├───> SAGE.forward
│           │
│           └───> Validation set evaluation

38
└───> All nodes set inference & Test set evaluation
39
40
41
42
43
44
45
46
47
"""
import argparse

import dgl.graphbolt as gb
import dgl.nn as dglnn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics.functional as MF
48
from tqdm import tqdm
49
50


51
def create_dataloader(args, graph, features, itemset, job):
52
53
54
55
56
57
    """
    [HIGHLIGHT]
    Get a GraphBolt version of a dataloader for node classification tasks.
    This function demonstrates how to utilize functional forms of datapipes in
    GraphBolt.
    Alternatively, you can create a datapipe using its class constructor.
58
59
60
61
62
63
64
65
66
67
68

    Parameters
    ----------
    args : Namespace
        The arguments parsed by `parser.parse_args()`.
    graph : SamplingGraph
        The network topology for sampling.
    features : FeatureStore
        The node features.
    itemset : Union[ItemSet, ItemSetDict]
        Data to be sampled.
69
    job : one of ["train", "evaluate", "infer"]
70
71
        The stage where dataloader is created, with options "train", "evaluate"
        and "infer".
72
73
74
75
76
77
78
79
80
81
82
    """

    ############################################################################
    # [Step-1]:
    # gb.ItemSampler()
    # [Input]:
    # 'itemset': The current dataset. (e.g. `train_set` or `valid_set`)
    # 'args.batch_size': Specify the number of samples to be processed together,
    # referred to as a 'mini-batch'. (The term 'mini-batch' is used here to
    # indicate a subset of the entire dataset that is processed together. This
    # is in contrast to processing the entire dataset, known as a 'full batch'.)
83
    # 'job': Determines whether data should be shuffled. (Shuffling is
84
85
86
87
88
89
90
91
92
    # generally used only in training to improve model generalization. It's
    # not used in validation and testing as the focus there is to evaluate
    # performance rather than to learn from the data.)
    # [Output]:
    # An ItemSampler object for handling mini-batch sampling.
    # [Role]:
    # Initialize the ItemSampler to sample mini-batche from the dataset.
    ############################################################################
    datapipe = gb.ItemSampler(
93
        itemset, batch_size=args.batch_size, shuffle=(job == "train")
94
95
96
97
98
99
100
    )

    ############################################################################
    # [Step-2]:
    # self.sample_neighbor()
    # [Input]:
    # 'graph': The network topology for sampling.
101
102
103
104
    # '[-1] or args.fanout': Number of neighbors to sample per node. In
    # training or validation, the length of args.fanout should be equal to the
    # number of layers in the model. In inference, this parameter is set to
    # [-1], indicating that all neighbors of a node are sampled.
105
106
107
108
109
    # [Output]:
    # A NeighborSampler object to sample neighbors.
    # [Role]:
    # Initialize a neighbor sampler for sampling the neighborhoods of nodes.
    ############################################################################
110
111
112
    datapipe = datapipe.sample_neighbor(
        graph, args.fanout if job != "infer" else [-1]
    )
113
114
115
116
117
118
119
120
121
122
123

    ############################################################################
    # [Step-3]:
    # self.fetch_feature()
    # [Input]:
    # 'features': The node features.
    # 'node_feature_keys': The keys of the node features to be fetched.
    # [Output]:
    # A FeatureFetcher object to fetch node features.
    # [Role]:
    # Initialize a feature fetcher for fetching features of the sampled
124
125
    # subgraphs. This step is skipped in inference because features are updated
    # as a whole during it, thus storing features in minibatch is unnecessary.
126
    ############################################################################
127
128
    if job != "infer":
        datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"])
129
130
131

    ############################################################################
    # [Step-4]:
132
133
134
135
136
137
138
139
140
141
142
143
    # self.to_dgl()
    # [Input]:
    # 'datapipe': The previous datapipe object.
    # [Output]:
    # A DGLMiniBatch used for computing.
    # [Role]:
    # Convert a mini-batch to dgl-minibatch.
    ############################################################################
    datapipe = datapipe.to_dgl()

    ############################################################################
    # [Step-5]:
144
145
146
147
148
149
150
151
152
153
    # self.copy_to()
    # [Input]:
    # 'device': The device to copy the data to.
    # [Output]:
    # A CopyTo object to copy the data to the specified device.
    ############################################################################
    datapipe = datapipe.copy_to(device=args.device)

    ############################################################################
    # [Step-6]:
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
    # gb.MultiProcessDataLoader()
    # [Input]:
    # 'datapipe': The datapipe object to be used for data loading.
    # 'args.num_workers': The number of processes to be used for data loading.
    # [Output]:
    # A MultiProcessDataLoader object to handle data loading.
    # [Role]:
    # Initialize a multi-process dataloader to load the data in parallel.
    ############################################################################
    dataloader = gb.MultiProcessDataLoader(
        datapipe, num_workers=args.num_workers
    )

    # Return the fully-initialized DataLoader object.
    return dataloader


171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
class SAGE(nn.Module):
    def __init__(self, in_size, hidden_size, out_size):
        super().__init__()
        self.layers = nn.ModuleList()
        # Three-layer GraphSAGE-mean.
        self.layers.append(dglnn.SAGEConv(in_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, out_size, "mean"))
        self.dropout = nn.Dropout(0.5)
        self.hidden_size = hidden_size
        self.out_size = out_size
        # Set the dtype for the layers manually.
        self.set_layer_dtype(torch.float64)

    def set_layer_dtype(self, _dtype):
        for layer in self.layers:
            for param in layer.parameters():
                param.data = param.data.to(_dtype)

    def forward(self, blocks, x):
        hidden_x = x
        for layer_idx, (layer, block) in enumerate(zip(self.layers, blocks)):
            hidden_x = layer(block, hidden_x)
            is_last_layer = layer_idx == len(self.layers) - 1
            if not is_last_layer:
                hidden_x = F.relu(hidden_x)
                hidden_x = self.dropout(hidden_x)
        return hidden_x

200
    def inference(self, graph, features, dataloader, device):
201
202
203
        """Conduct layer-wise inference to get all the node embeddings."""
        feature = features.read("node", None, "feat")

204
205
206
207
208
        buffer_device = torch.device("cpu")
        # Enable pin_memory for faster CPU to GPU data transfer if the
        # model is running on a GPU.
        pin_memory = buffer_device != device

209
210
211
212
213
214
215
        for layer_idx, layer in enumerate(self.layers):
            is_last_layer = layer_idx == len(self.layers) - 1

            y = torch.empty(
                graph.total_num_nodes,
                self.out_size if is_last_layer else self.hidden_size,
                dtype=torch.float64,
216
217
                device=buffer_device,
                pin_memory=pin_memory,
218
            )
219
            feature = feature.to(device)
220

221
            for step, data in tqdm(enumerate(dataloader)):
222
223
224
225
226
227
                x = feature[data.input_nodes]
                hidden_x = layer(data.blocks[0], x)  # len(blocks) = 1
                if not is_last_layer:
                    hidden_x = F.relu(hidden_x)
                    hidden_x = self.dropout(hidden_x)
                # By design, our output nodes are contiguous.
228
229
230
                y[
                    data.output_nodes[0] : data.output_nodes[-1] + 1
                ] = hidden_x.to(buffer_device)
231
232
233
234
235
236
237
238
239
240
241
242
243
            feature = y

        return y


@torch.no_grad()
def layerwise_infer(
    args, graph, features, test_set, all_nodes_set, model, num_classes
):
    model.eval()
    dataloader = create_dataloader(
        args, graph, features, all_nodes_set, job="infer"
    )
244
    pred = model.inference(graph, features, dataloader, args.device)
245
246
247
248
249
250
251
252
253
254
255
    pred = pred[test_set._items[0]]
    label = test_set._items[1].to(pred.device)

    return MF.accuracy(
        pred,
        label,
        task="multiclass",
        num_classes=num_classes,
    )


256
257
258
259
260
261
@torch.no_grad()
def evaluate(args, model, graph, features, itemset, num_classes):
    model.eval()
    y = []
    y_hats = []
    dataloader = create_dataloader(
262
        args, graph, features, itemset, job="evaluate"
263
264
    )

265
    for step, data in tqdm(enumerate(dataloader)):
266
267
        x = data.node_features["feat"]
        y.append(data.labels)
268
        y_hats.append(model(data.blocks, x))
269

270
    return MF.accuracy(
271
272
273
274
275
276
277
278
279
280
        torch.cat(y_hats),
        torch.cat(y),
        task="multiclass",
        num_classes=num_classes,
    )


def train(args, graph, features, train_set, valid_set, num_classes, model):
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    dataloader = create_dataloader(
281
        args, graph, features, train_set, job="train"
282
283
    )

284
    for epoch in range(args.epochs):
285
286
        model.train()
        total_loss = 0
287
        for step, data in tqdm(enumerate(dataloader)):
288
289
290
291
292
293
294
295
            # The input features from the source nodes in the first layer's
            # computation graph.
            x = data.node_features["feat"]

            # The ground truth labels from the destination nodes
            # in the last layer's computation graph.
            y = data.labels

296
            y_hat = model(data.blocks, x)
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344

            # Compute loss.
            loss = F.cross_entropy(y_hat, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

        # Evaluate the model.
        acc = evaluate(args, model, graph, features, valid_set, num_classes)
        print(
            f"Epoch {epoch:05d} | Loss {total_loss / (step + 1):.4f} | "
            f"Accuracy {acc.item():.4f} "
        )


def parse_args():
    parser = argparse.ArgumentParser(
        description="A script trains and tests a GraphSAGE model "
        "for node classification using GraphBolt dataloader."
    )
    parser.add_argument(
        "--epochs", type=int, default=10, help="Number of training epochs."
    )
    parser.add_argument(
        "--lr",
        type=float,
        default=0.0005,
        help="Learning rate for optimization.",
    )
    parser.add_argument(
        "--batch-size", type=int, default=256, help="Batch size for training."
    )
    parser.add_argument(
        "--num-workers",
        type=int,
        default=4,
        help="Number of workers for data loading.",
    )
    parser.add_argument(
        "--fanout",
        type=str,
        default="15,10,5",
        help="Fan-out of neighbor sampling. It is IMPORTANT to keep len(fanout)"
        " identical with the number of layers in your model. Default: 15,10,5",
    )
345
346
347
348
349
350
    parser.add_argument(
        "--device",
        default="cpu",
        choices=["cpu", "cuda"],
        help="Train device: 'cpu' for CPU, 'cuda' for GPU.",
    )
351
352
353
354
    return parser.parse_args()


def main(args):
355
356
357
358
359
    if not torch.cuda.is_available():
        args.device = "cpu"
    print(f"Training in {args.device} mode.")
    args.device = torch.device(args.device)

360
    # Load and preprocess dataset.
361
    print("Loading data...")
362
363
364
    dataset = gb.BuiltinDataset("ogbn-products").load()

    graph = dataset.graph
365
366
    # Currently the neighbor-sampling process can only be done on the CPU,
    # therefore there is no need to copy the graph to the GPU.
367
368
369
    features = dataset.feature
    train_set = dataset.tasks[0].train_set
    valid_set = dataset.tasks[0].validation_set
370
371
    test_set = dataset.tasks[0].test_set
    all_nodes_set = dataset.all_nodes_set
372
373
374
375
    args.fanout = list(map(int, args.fanout.split(",")))

    num_classes = dataset.tasks[0].metadata["num_classes"]

376
    in_size = features.size("node", None, "feat")[0]
377
378
379
380
    hidden_size = 128
    out_size = num_classes

    model = SAGE(in_size, hidden_size, out_size)
381
382
    assert len(args.fanout) == len(model.layers)
    model = model.to(args.device)
383
384
385
386
387
388
389

    # Model training.
    print("Training...")
    train(args, graph, features, train_set, valid_set, num_classes, model)

    # Test the model.
    print("Testing...")
390
391
392
393
394
395
396
397
    test_acc = layerwise_infer(
        args,
        graph,
        features,
        test_set,
        all_nodes_set,
        model,
        num_classes,
398
399
400
401
402
403
404
    )
    print(f"Test Accuracy is {test_acc.item():.4f}")


if __name__ == "__main__":
    args = parse_args()
    main(args)