node_classification.py 15.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
"""
This script trains and tests a GraphSAGE model for node classification
on large graphs using GraphBolt dataloader.

Paper: [Inductive Representation Learning on Large Graphs]
(https://arxiv.org/abs/1706.02216)

Unlike previous dgl examples, we've utilized the newly defined dataloader
from GraphBolt. This example will help you grasp how to build an end-to-end
training pipeline using GraphBolt.

Before reading this example, please familiar yourself with graphsage node
classification by reading the example in the
`examples/core/graphsage/node_classification.py`. This introduction,
[A Blitz Introduction to Node Classification with DGL]
(https://docs.dgl.ai/tutorials/blitz/1_introduction.html), might be helpful.

If you want to train graphsage on a large graph in a distributed fashion,
please read the example in the `examples/distributed/graphsage/`.

This flowchart describes the main functional sequence of the provided example:
main

├───> OnDiskDataset pre-processing

├───> Instantiate SAGE model

├───> train
│     │
│     ├───> Get graphbolt dataloader (HIGHLIGHT)
│     │
│     └───> Training loop
│           │
│           ├───> SAGE.forward
│           │
│           └───> Validation set evaluation

38
└───> All nodes set inference & Test set evaluation
39
40
"""
import argparse
41
import time
42
43
44
45
46
47
48

import dgl.graphbolt as gb
import dgl.nn as dglnn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics.functional as MF
49
from tqdm import tqdm
50
51


52
def create_dataloader(
53
    graph, features, itemset, batch_size, fanout, device, num_workers, job
54
):
55
56
57
58
    """
    [HIGHLIGHT]
    Get a GraphBolt version of a dataloader for node classification tasks.
    This function demonstrates how to utilize functional forms of datapipes in
59
60
    GraphBolt. For a more detailed tutorial, please read the examples in
    `dgl/notebooks/graphbolt/walkthrough.ipynb`.
61
    Alternatively, you can create a datapipe using its class constructor.
62
63
64

    Parameters
    ----------
65
    job : one of ["train", "evaluate", "infer"]
66
67
        The stage where dataloader is created, with options "train", "evaluate"
        and "infer".
68
    Other parameters are explicated in the comments below.
69
70
71
72
73
74
75
    """

    ############################################################################
    # [Step-1]:
    # gb.ItemSampler()
    # [Input]:
    # 'itemset': The current dataset. (e.g. `train_set` or `valid_set`)
76
    # 'batch_size': Specify the number of samples to be processed together,
77
78
79
    # referred to as a 'mini-batch'. (The term 'mini-batch' is used here to
    # indicate a subset of the entire dataset that is processed together. This
    # is in contrast to processing the entire dataset, known as a 'full batch'.)
80
    # 'job': Determines whether data should be shuffled. (Shuffling is
81
82
83
84
85
86
87
88
89
    # generally used only in training to improve model generalization. It's
    # not used in validation and testing as the focus there is to evaluate
    # performance rather than to learn from the data.)
    # [Output]:
    # An ItemSampler object for handling mini-batch sampling.
    # [Role]:
    # Initialize the ItemSampler to sample mini-batche from the dataset.
    ############################################################################
    datapipe = gb.ItemSampler(
90
        itemset, batch_size=batch_size, shuffle=(job == "train")
91
92
93
94
    )

    ############################################################################
    # [Step-2]:
95
96
97
98
99
100
101
102
103
104
105
106
107
    # self.copy_to()
    # [Input]:
    # 'device': The device to copy the data to.
    # 'extra_attrs': The extra attributes to copy.
    # [Output]:
    # A CopyTo object to copy the data to the specified device. Copying here
    # ensures that the rest of the operations run on the GPU.
    ############################################################################
    if args.storage_device != "cpu":
        datapipe = datapipe.copy_to(device=device, extra_attrs=["seed_nodes"])

    ############################################################################
    # [Step-3]:
108
109
110
    # self.sample_neighbor()
    # [Input]:
    # 'graph': The network topology for sampling.
111
112
    # '[-1] or fanout': Number of neighbors to sample per node. In
    # training or validation, the length of `fanout` should be equal to the
113
114
    # number of layers in the model. In inference, this parameter is set to
    # [-1], indicating that all neighbors of a node are sampled.
115
116
117
118
119
    # [Output]:
    # A NeighborSampler object to sample neighbors.
    # [Role]:
    # Initialize a neighbor sampler for sampling the neighborhoods of nodes.
    ############################################################################
120
    datapipe = getattr(datapipe, args.sample_mode)(
121
        graph, fanout if job != "infer" else [-1]
122
    )
123
124

    ############################################################################
125
    # [Step-4]:
126
127
128
129
130
131
132
133
    # self.fetch_feature()
    # [Input]:
    # 'features': The node features.
    # 'node_feature_keys': The keys of the node features to be fetched.
    # [Output]:
    # A FeatureFetcher object to fetch node features.
    # [Role]:
    # Initialize a feature fetcher for fetching features of the sampled
134
    # subgraphs.
135
    ############################################################################
136
    datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"])
137
138

    ############################################################################
139
    # [Step-5]:
140
141
142
143
144
145
    # self.copy_to()
    # [Input]:
    # 'device': The device to copy the data to.
    # [Output]:
    # A CopyTo object to copy the data to the specified device.
    ############################################################################
146
147
    if args.storage_device == "cpu":
        datapipe = datapipe.copy_to(device=device)
148
149

    ############################################################################
150
    # [Step-6]:
151
    # gb.DataLoader()
152
153
    # [Input]:
    # 'datapipe': The datapipe object to be used for data loading.
154
    # 'num_workers': The number of processes to be used for data loading.
155
    # [Output]:
156
    # A DataLoader object to handle data loading.
157
158
159
    # [Role]:
    # Initialize a multi-process dataloader to load the data in parallel.
    ############################################################################
160
161
162
163
164
    dataloader = gb.DataLoader(
        datapipe,
        num_workers=num_workers,
        overlap_graph_fetch=args.overlap_graph_fetch,
    )
165
166
167
168
169

    # Return the fully-initialized DataLoader object.
    return dataloader


170
171
172
173
174
175
176
177
178
179
180
181
class SAGE(nn.Module):
    def __init__(self, in_size, hidden_size, out_size):
        super().__init__()
        self.layers = nn.ModuleList()
        # Three-layer GraphSAGE-mean.
        self.layers.append(dglnn.SAGEConv(in_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, out_size, "mean"))
        self.dropout = nn.Dropout(0.5)
        self.hidden_size = hidden_size
        self.out_size = out_size
        # Set the dtype for the layers manually.
182
        self.set_layer_dtype(torch.float32)
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

    def set_layer_dtype(self, _dtype):
        for layer in self.layers:
            for param in layer.parameters():
                param.data = param.data.to(_dtype)

    def forward(self, blocks, x):
        hidden_x = x
        for layer_idx, (layer, block) in enumerate(zip(self.layers, blocks)):
            hidden_x = layer(block, hidden_x)
            is_last_layer = layer_idx == len(self.layers) - 1
            if not is_last_layer:
                hidden_x = F.relu(hidden_x)
                hidden_x = self.dropout(hidden_x)
        return hidden_x

199
    def inference(self, graph, features, dataloader, storage_device):
200
        """Conduct layer-wise inference to get all the node embeddings."""
201
202
        pin_memory = storage_device == "pinned"
        buffer_device = torch.device("cpu" if pin_memory else storage_device)
203

204
205
206
207
208
209
        for layer_idx, layer in enumerate(self.layers):
            is_last_layer = layer_idx == len(self.layers) - 1

            y = torch.empty(
                graph.total_num_nodes,
                self.out_size if is_last_layer else self.hidden_size,
210
                dtype=torch.float32,
211
212
                device=buffer_device,
                pin_memory=pin_memory,
213
            )
214
215
216
            for data in tqdm(dataloader):
                # len(blocks) = 1
                hidden_x = layer(data.blocks[0], data.node_features["feat"])
217
218
219
220
                if not is_last_layer:
                    hidden_x = F.relu(hidden_x)
                    hidden_x = self.dropout(hidden_x)
                # By design, our output nodes are contiguous.
221
222
223
                y[data.seed_nodes[0] : data.seed_nodes[-1] + 1] = hidden_x.to(
                    buffer_device
                )
224
225
            if not is_last_layer:
                features.update("node", None, "feat", y)
226
227
228
229
230
231
232
233
234
235

        return y


@torch.no_grad()
def layerwise_infer(
    args, graph, features, test_set, all_nodes_set, model, num_classes
):
    model.eval()
    dataloader = create_dataloader(
236
237
238
239
240
241
242
243
        graph=graph,
        features=features,
        itemset=all_nodes_set,
        batch_size=4 * args.batch_size,
        fanout=[-1],
        device=args.device,
        num_workers=args.num_workers,
        job="infer",
244
    )
245
    pred = model.inference(graph, features, dataloader, args.storage_device)
246
247
248
249
250
251
252
253
254
255
256
    pred = pred[test_set._items[0]]
    label = test_set._items[1].to(pred.device)

    return MF.accuracy(
        pred,
        label,
        task="multiclass",
        num_classes=num_classes,
    )


257
258
259
260
261
262
@torch.no_grad()
def evaluate(args, model, graph, features, itemset, num_classes):
    model.eval()
    y = []
    y_hats = []
    dataloader = create_dataloader(
263
264
265
266
267
268
269
270
        graph=graph,
        features=features,
        itemset=itemset,
        batch_size=args.batch_size,
        fanout=args.fanout,
        device=args.device,
        num_workers=args.num_workers,
        job="evaluate",
271
272
    )

273
    for step, data in tqdm(enumerate(dataloader), "Evaluating"):
274
275
        x = data.node_features["feat"]
        y.append(data.labels)
276
        y_hats.append(model(data.blocks, x))
277

278
    return MF.accuracy(
279
280
281
282
283
284
285
286
        torch.cat(y_hats),
        torch.cat(y),
        task="multiclass",
        num_classes=num_classes,
    )


def train(args, graph, features, train_set, valid_set, num_classes, model):
287
288
289
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr, weight_decay=5e-4
    )
290
    dataloader = create_dataloader(
291
292
293
294
295
296
297
298
        graph=graph,
        features=features,
        itemset=train_set,
        batch_size=args.batch_size,
        fanout=args.fanout,
        device=args.device,
        num_workers=args.num_workers,
        job="train",
299
300
    )

301
    for epoch in range(args.epochs):
302
        t0 = time.time()
303
304
        model.train()
        total_loss = 0
305
        for step, data in tqdm(enumerate(dataloader), "Training"):
306
307
308
309
310
311
312
313
            # The input features from the source nodes in the first layer's
            # computation graph.
            x = data.node_features["feat"]

            # The ground truth labels from the destination nodes
            # in the last layer's computation graph.
            y = data.labels

314
            y_hat = model(data.blocks, x)
315
316
317
318
319
320
321
322
323
324

            # Compute loss.
            loss = F.cross_entropy(y_hat, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

325
        t1 = time.time()
326
327
328
329
        # Evaluate the model.
        acc = evaluate(args, model, graph, features, valid_set, num_classes)
        print(
            f"Epoch {epoch:05d} | Loss {total_loss / (step + 1):.4f} | "
330
            f"Accuracy {acc.item():.4f} | Time {t1 - t0:.4f}"
331
332
333
334
335
336
337
338
339
340
341
342
343
344
        )


def parse_args():
    parser = argparse.ArgumentParser(
        description="A script trains and tests a GraphSAGE model "
        "for node classification using GraphBolt dataloader."
    )
    parser.add_argument(
        "--epochs", type=int, default=10, help="Number of training epochs."
    )
    parser.add_argument(
        "--lr",
        type=float,
345
        default=1e-3,
346
347
348
        help="Learning rate for optimization.",
    )
    parser.add_argument(
349
        "--batch-size", type=int, default=1024, help="Batch size for training."
350
351
352
353
    )
    parser.add_argument(
        "--num-workers",
        type=int,
354
        default=0,
355
356
357
358
359
        help="Number of workers for data loading.",
    )
    parser.add_argument(
        "--fanout",
        type=str,
360
        default="10,10,10",
361
        help="Fan-out of neighbor sampling. It is IMPORTANT to keep len(fanout)"
362
        " identical with the number of layers in your model. Default: 10,10,10",
363
    )
364
365
366
367
    parser.add_argument(
        "--dataset",
        type=str,
        default="ogbn-products",
368
        choices=["ogbn-arxiv", "ogbn-products", "ogbn-papers100M"],
369
        help="The dataset we can use for node classification example. Currently"
370
        " ogbn-products, ogbn-arxiv, ogbn-papers100M datasets are supported.",
371
    )
372
    parser.add_argument(
373
374
375
376
377
        "--mode",
        default="pinned-cuda",
        choices=["cpu-cpu", "cpu-cuda", "pinned-cuda", "cuda-cuda"],
        help="Dataset storage placement and Train device: 'cpu' for CPU and RAM,"
        " 'pinned' for pinned memory in RAM, 'cuda' for GPU and GPU memory.",
378
    )
379
380
381
382
383
384
385
386
387
388
389
390
391
392
    parser.add_argument(
        "--sample-mode",
        default="sample_neighbor",
        choices=["sample_neighbor", "sample_layer_neighbor"],
        help="The sampling function when doing layerwise sampling.",
    )
    parser.add_argument(
        "--overlap-graph-fetch",
        action="store_true",
        help="An option for enabling overlap_graph_fetch in graphbolt dataloader."
        "If True, the data loader will overlap the UVA graph fetching operations"
        "with the rest of operations by using an alternative CUDA stream. Disabled"
        "by default.",
    )
393
394
395
396
    return parser.parse_args()


def main(args):
397
    if not torch.cuda.is_available():
398
399
400
        args.mode = "cpu-cpu"
    print(f"Training in {args.mode} mode.")
    args.storage_device, args.device = args.mode.split("-")
401
402
    args.device = torch.device(args.device)

403
    # Load and preprocess dataset.
404
    print("Loading data...")
405
    dataset = gb.BuiltinDataset(args.dataset).load()
406

407
    # Move the dataset to the selected storage.
408
409
410
411
412
413
    if args.storage_device == "pinned":
        graph = dataset.graph.pin_memory_()
        features = dataset.feature.pin_memory_()
    else:
        graph = dataset.graph.to(args.storage_device)
        features = dataset.feature.to(args.storage_device)
414

415
416
    train_set = dataset.tasks[0].train_set
    valid_set = dataset.tasks[0].validation_set
417
418
    test_set = dataset.tasks[0].test_set
    all_nodes_set = dataset.all_nodes_set
419
420
421
422
    args.fanout = list(map(int, args.fanout.split(",")))

    num_classes = dataset.tasks[0].metadata["num_classes"]

423
    in_size = features.size("node", None, "feat")[0]
424
    hidden_size = 256
425
426
427
    out_size = num_classes

    model = SAGE(in_size, hidden_size, out_size)
428
429
    assert len(args.fanout) == len(model.layers)
    model = model.to(args.device)
430
431
432
433
434
435
436

    # Model training.
    print("Training...")
    train(args, graph, features, train_set, valid_set, num_classes, model)

    # Test the model.
    print("Testing...")
437
438
439
440
441
442
443
444
    test_acc = layerwise_infer(
        args,
        graph,
        features,
        test_set,
        all_nodes_set,
        model,
        num_classes,
445
    )
446
    print(f"Test accuracy {test_acc.item():.4f}")
447
448
449
450
451


if __name__ == "__main__":
    args = parse_args()
    main(args)