node_classification.py 13.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
"""
This script trains and tests a GraphSAGE model for node classification
on large graphs using GraphBolt dataloader.

Paper: [Inductive Representation Learning on Large Graphs]
(https://arxiv.org/abs/1706.02216)

Unlike previous dgl examples, we've utilized the newly defined dataloader
from GraphBolt. This example will help you grasp how to build an end-to-end
training pipeline using GraphBolt.

Before reading this example, please familiar yourself with graphsage node
classification by reading the example in the
`examples/core/graphsage/node_classification.py`. This introduction,
[A Blitz Introduction to Node Classification with DGL]
(https://docs.dgl.ai/tutorials/blitz/1_introduction.html), might be helpful.

If you want to train graphsage on a large graph in a distributed fashion,
please read the example in the `examples/distributed/graphsage/`.

This flowchart describes the main functional sequence of the provided example:
main

├───> OnDiskDataset pre-processing

├───> Instantiate SAGE model

├───> train
│     │
│     ├───> Get graphbolt dataloader (HIGHLIGHT)
│     │
│     └───> Training loop
│           │
│           ├───> SAGE.forward
│           │
│           └───> Validation set evaluation

38
└───> All nodes set inference & Test set evaluation
39
40
"""
import argparse
41
import time
42
43
44
45
46
47
48

import dgl.graphbolt as gb
import dgl.nn as dglnn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics.functional as MF
49
from tqdm import tqdm
50
51


52
53
54
def create_dataloader(
    graph, features, itemset, batch_size, fanout, device, num_workers, job
):
55
56
57
58
    """
    [HIGHLIGHT]
    Get a GraphBolt version of a dataloader for node classification tasks.
    This function demonstrates how to utilize functional forms of datapipes in
59
60
    GraphBolt. For a more detailed tutorial, please read the examples in
    `dgl/notebooks/graphbolt/walkthrough.ipynb`.
61
    Alternatively, you can create a datapipe using its class constructor.
62
63
64

    Parameters
    ----------
65
    job : one of ["train", "evaluate", "infer"]
66
67
        The stage where dataloader is created, with options "train", "evaluate"
        and "infer".
68
    Other parameters are explicated in the comments below.
69
70
71
72
73
74
75
    """

    ############################################################################
    # [Step-1]:
    # gb.ItemSampler()
    # [Input]:
    # 'itemset': The current dataset. (e.g. `train_set` or `valid_set`)
76
    # 'batch_size': Specify the number of samples to be processed together,
77
78
79
    # referred to as a 'mini-batch'. (The term 'mini-batch' is used here to
    # indicate a subset of the entire dataset that is processed together. This
    # is in contrast to processing the entire dataset, known as a 'full batch'.)
80
    # 'job': Determines whether data should be shuffled. (Shuffling is
81
82
83
84
85
86
87
88
89
    # generally used only in training to improve model generalization. It's
    # not used in validation and testing as the focus there is to evaluate
    # performance rather than to learn from the data.)
    # [Output]:
    # An ItemSampler object for handling mini-batch sampling.
    # [Role]:
    # Initialize the ItemSampler to sample mini-batche from the dataset.
    ############################################################################
    datapipe = gb.ItemSampler(
90
        itemset, batch_size=batch_size, shuffle=(job == "train")
91
92
93
94
95
96
97
    )

    ############################################################################
    # [Step-2]:
    # self.sample_neighbor()
    # [Input]:
    # 'graph': The network topology for sampling.
98
99
    # '[-1] or fanout': Number of neighbors to sample per node. In
    # training or validation, the length of `fanout` should be equal to the
100
101
    # number of layers in the model. In inference, this parameter is set to
    # [-1], indicating that all neighbors of a node are sampled.
102
103
104
105
106
    # [Output]:
    # A NeighborSampler object to sample neighbors.
    # [Role]:
    # Initialize a neighbor sampler for sampling the neighborhoods of nodes.
    ############################################################################
107
    datapipe = datapipe.sample_neighbor(
108
        graph, fanout if job != "infer" else [-1]
109
    )
110
111
112
113
114
115
116
117
118
119
120

    ############################################################################
    # [Step-3]:
    # self.fetch_feature()
    # [Input]:
    # 'features': The node features.
    # 'node_feature_keys': The keys of the node features to be fetched.
    # [Output]:
    # A FeatureFetcher object to fetch node features.
    # [Role]:
    # Initialize a feature fetcher for fetching features of the sampled
121
122
    # subgraphs. This step is skipped in inference because features are updated
    # as a whole during it, thus storing features in minibatch is unnecessary.
123
    ############################################################################
124
125
    if job != "infer":
        datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"])
126
127
128

    ############################################################################
    # [Step-4]:
129
130
131
132
133
134
    # self.copy_to()
    # [Input]:
    # 'device': The device to copy the data to.
    # [Output]:
    # A CopyTo object to copy the data to the specified device.
    ############################################################################
135
    datapipe = datapipe.copy_to(device=device)
136
137

    ############################################################################
138
    # [Step-5]:
139
    # gb.DataLoader()
140
141
    # [Input]:
    # 'datapipe': The datapipe object to be used for data loading.
142
    # 'num_workers': The number of processes to be used for data loading.
143
    # [Output]:
144
    # A DataLoader object to handle data loading.
145
146
147
    # [Role]:
    # Initialize a multi-process dataloader to load the data in parallel.
    ############################################################################
148
    dataloader = gb.DataLoader(datapipe, num_workers=num_workers)
149
150
151
152
153

    # Return the fully-initialized DataLoader object.
    return dataloader


154
155
156
157
158
159
160
161
162
163
164
165
class SAGE(nn.Module):
    def __init__(self, in_size, hidden_size, out_size):
        super().__init__()
        self.layers = nn.ModuleList()
        # Three-layer GraphSAGE-mean.
        self.layers.append(dglnn.SAGEConv(in_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, out_size, "mean"))
        self.dropout = nn.Dropout(0.5)
        self.hidden_size = hidden_size
        self.out_size = out_size
        # Set the dtype for the layers manually.
166
        self.set_layer_dtype(torch.float32)
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182

    def set_layer_dtype(self, _dtype):
        for layer in self.layers:
            for param in layer.parameters():
                param.data = param.data.to(_dtype)

    def forward(self, blocks, x):
        hidden_x = x
        for layer_idx, (layer, block) in enumerate(zip(self.layers, blocks)):
            hidden_x = layer(block, hidden_x)
            is_last_layer = layer_idx == len(self.layers) - 1
            if not is_last_layer:
                hidden_x = F.relu(hidden_x)
                hidden_x = self.dropout(hidden_x)
        return hidden_x

183
    def inference(self, graph, features, dataloader, device):
184
185
186
        """Conduct layer-wise inference to get all the node embeddings."""
        feature = features.read("node", None, "feat")

187
188
189
190
191
        buffer_device = torch.device("cpu")
        # Enable pin_memory for faster CPU to GPU data transfer if the
        # model is running on a GPU.
        pin_memory = buffer_device != device

192
193
194
195
196
197
        for layer_idx, layer in enumerate(self.layers):
            is_last_layer = layer_idx == len(self.layers) - 1

            y = torch.empty(
                graph.total_num_nodes,
                self.out_size if is_last_layer else self.hidden_size,
198
                dtype=torch.float32,
199
200
                device=buffer_device,
                pin_memory=pin_memory,
201
            )
202
            feature = feature.to(device)
203

204
            for step, data in tqdm(enumerate(dataloader)):
205
                x = feature[data.input_nodes]
206
                hidden_x = layer(data.blocks[0], x)  # len(blocks) = 1
207
208
209
210
                if not is_last_layer:
                    hidden_x = F.relu(hidden_x)
                    hidden_x = self.dropout(hidden_x)
                # By design, our output nodes are contiguous.
211
212
213
                y[data.seed_nodes[0] : data.seed_nodes[-1] + 1] = hidden_x.to(
                    buffer_device
                )
214
215
216
217
218
219
220
221
222
223
224
            feature = y

        return y


@torch.no_grad()
def layerwise_infer(
    args, graph, features, test_set, all_nodes_set, model, num_classes
):
    model.eval()
    dataloader = create_dataloader(
225
226
227
228
229
230
231
232
        graph=graph,
        features=features,
        itemset=all_nodes_set,
        batch_size=4 * args.batch_size,
        fanout=[-1],
        device=args.device,
        num_workers=args.num_workers,
        job="infer",
233
    )
234
    pred = model.inference(graph, features, dataloader, args.device)
235
236
237
238
239
240
241
242
243
244
245
    pred = pred[test_set._items[0]]
    label = test_set._items[1].to(pred.device)

    return MF.accuracy(
        pred,
        label,
        task="multiclass",
        num_classes=num_classes,
    )


246
247
248
249
250
251
@torch.no_grad()
def evaluate(args, model, graph, features, itemset, num_classes):
    model.eval()
    y = []
    y_hats = []
    dataloader = create_dataloader(
252
253
254
255
256
257
258
259
        graph=graph,
        features=features,
        itemset=itemset,
        batch_size=args.batch_size,
        fanout=args.fanout,
        device=args.device,
        num_workers=args.num_workers,
        job="evaluate",
260
261
    )

262
    for step, data in tqdm(enumerate(dataloader)):
263
264
        x = data.node_features["feat"]
        y.append(data.labels)
265
        y_hats.append(model(data.blocks, x))
266

267
    return MF.accuracy(
268
269
270
271
272
273
274
275
276
277
        torch.cat(y_hats),
        torch.cat(y),
        task="multiclass",
        num_classes=num_classes,
    )


def train(args, graph, features, train_set, valid_set, num_classes, model):
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    dataloader = create_dataloader(
278
279
280
281
282
283
284
285
        graph=graph,
        features=features,
        itemset=train_set,
        batch_size=args.batch_size,
        fanout=args.fanout,
        device=args.device,
        num_workers=args.num_workers,
        job="train",
286
287
    )

288
    for epoch in range(args.epochs):
289
        t0 = time.time()
290
291
        model.train()
        total_loss = 0
292
        for step, data in enumerate(dataloader):
293
294
295
296
297
298
299
300
            # The input features from the source nodes in the first layer's
            # computation graph.
            x = data.node_features["feat"]

            # The ground truth labels from the destination nodes
            # in the last layer's computation graph.
            y = data.labels

301
            y_hat = model(data.blocks, x)
302
303
304
305
306
307
308
309
310
311

            # Compute loss.
            loss = F.cross_entropy(y_hat, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

312
        t1 = time.time()
313
314
315
316
        # Evaluate the model.
        acc = evaluate(args, model, graph, features, valid_set, num_classes)
        print(
            f"Epoch {epoch:05d} | Loss {total_loss / (step + 1):.4f} | "
317
            f"Accuracy {acc.item():.4f} | Time {t1 - t0:.4f}"
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
        )


def parse_args():
    parser = argparse.ArgumentParser(
        description="A script trains and tests a GraphSAGE model "
        "for node classification using GraphBolt dataloader."
    )
    parser.add_argument(
        "--epochs", type=int, default=10, help="Number of training epochs."
    )
    parser.add_argument(
        "--lr",
        type=float,
        default=0.0005,
        help="Learning rate for optimization.",
    )
    parser.add_argument(
336
        "--batch-size", type=int, default=1024, help="Batch size for training."
337
338
339
340
    )
    parser.add_argument(
        "--num-workers",
        type=int,
341
        default=0,
342
343
344
345
346
        help="Number of workers for data loading.",
    )
    parser.add_argument(
        "--fanout",
        type=str,
347
        default="10,10,10",
348
        help="Fan-out of neighbor sampling. It is IMPORTANT to keep len(fanout)"
349
        " identical with the number of layers in your model. Default: 10,10,10",
350
    )
351
352
353
354
355
356
    parser.add_argument(
        "--device",
        default="cpu",
        choices=["cpu", "cuda"],
        help="Train device: 'cpu' for CPU, 'cuda' for GPU.",
    )
357
358
359
360
    return parser.parse_args()


def main(args):
361
362
363
364
365
    if not torch.cuda.is_available():
        args.device = "cpu"
    print(f"Training in {args.device} mode.")
    args.device = torch.device(args.device)

366
    # Load and preprocess dataset.
367
    print("Loading data...")
368
369
370
    dataset = gb.BuiltinDataset("ogbn-products").load()

    graph = dataset.graph
371
372
    # Currently the neighbor-sampling process can only be done on the CPU,
    # therefore there is no need to copy the graph to the GPU.
373
374
375
    features = dataset.feature
    train_set = dataset.tasks[0].train_set
    valid_set = dataset.tasks[0].validation_set
376
377
    test_set = dataset.tasks[0].test_set
    all_nodes_set = dataset.all_nodes_set
378
379
380
381
    args.fanout = list(map(int, args.fanout.split(",")))

    num_classes = dataset.tasks[0].metadata["num_classes"]

382
    in_size = features.size("node", None, "feat")[0]
383
    hidden_size = 256
384
385
386
    out_size = num_classes

    model = SAGE(in_size, hidden_size, out_size)
387
388
    assert len(args.fanout) == len(model.layers)
    model = model.to(args.device)
389
390
391
392
393
394
395

    # Model training.
    print("Training...")
    train(args, graph, features, train_set, valid_set, num_classes, model)

    # Test the model.
    print("Testing...")
396
397
398
399
400
401
402
403
    test_acc = layerwise_infer(
        args,
        graph,
        features,
        test_set,
        all_nodes_set,
        model,
        num_classes,
404
    )
405
    print(f"Test accuracy {test_acc.item():.4f}")
406
407
408
409
410


if __name__ == "__main__":
    args = parse_args()
    main(args)