link_prediction.py 15.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
"""
This script trains and tests a GraphSAGE model for link prediction on
large graphs using graphbolt dataloader.

Paper: [Inductive Representation Learning on Large Graphs]
(https://arxiv.org/abs/1706.02216)

Unlike previous dgl examples, we've utilized the newly defined dataloader
from GraphBolt. This example will help you grasp how to build an end-to-end
training pipeline using GraphBolt.

While node classification predicts labels for nodes based on their
local neighborhoods, link prediction assesses the likelihood of an edge
existing between two nodes, necessitating different sampling strategies
that account for pairs of nodes and their joint neighborhoods.

TODO: Add the link_prediction.py example to core/graphsage.
Before reading this example, please familiar yourself with graphsage link
prediction by reading the example in the
`examples/core/graphsage/link_prediction.py`

If you want to train graphsage on a large graph in a distributed fashion, read
the example in the `examples/distributed/graphsage/`.

This flowchart describes the main functional sequence of the provided example.
main

├───> OnDiskDataset pre-processing

├───> Instantiate SAGE model

├───> train
│     │
│     ├───> Get graphbolt dataloader (HIGHLIGHT)
│     │
│     └───> Training loop
│           │
│           ├───> SAGE.forward
│           │
│           └───> Validation set evaluation

└───> Test set evaluation
"""
import argparse
45
import time
46
from functools import partial
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81

import dgl.graphbolt as gb
import dgl.nn as dglnn
import torch
import torch.nn as nn
import torch.nn.functional as F
import tqdm
from ogb.linkproppred import Evaluator


class SAGE(nn.Module):
    def __init__(self, in_size, hidden_size):
        super().__init__()
        self.layers = nn.ModuleList()
        self.layers.append(dglnn.SAGEConv(in_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, hidden_size, "mean"))
        self.hidden_size = hidden_size
        self.predictor = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 1),
        )

    def forward(self, blocks, x):
        hidden_x = x
        for layer_idx, (layer, block) in enumerate(zip(self.layers, blocks)):
            hidden_x = layer(block, hidden_x)
            is_last_layer = layer_idx == len(self.layers) - 1
            if not is_last_layer:
                hidden_x = F.relu(hidden_x)
        return hidden_x

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
    def inference(self, graph, features, dataloader, device):
        """Conduct layer-wise inference to get all the node embeddings."""
        feature = features.read("node", None, "feat")

        buffer_device = torch.device("cpu")
        # Enable pin_memory for faster CPU to GPU data transfer if the
        # model is running on a GPU.
        pin_memory = buffer_device != device

        print("Start node embedding inference.")
        for layer_idx, layer in enumerate(self.layers):
            is_last_layer = layer_idx == len(self.layers) - 1

            y = torch.empty(
                graph.total_num_nodes,
                self.hidden_size,
                dtype=torch.float32,
                device=buffer_device,
                pin_memory=pin_memory,
            )
            feature = feature.to(device)
            for step, data in tqdm.tqdm(enumerate(dataloader)):
                x = feature[data.input_nodes]
                hidden_x = layer(data.blocks[0], x)  # len(blocks) = 1
                if not is_last_layer:
                    hidden_x = F.relu(hidden_x)
108
109
110
111
                # By design, our seed nodes are contiguous.
                y[data.seed_nodes[0] : data.seed_nodes[-1] + 1] = hidden_x.to(
                    buffer_device, non_blocking=True
                )
112
113
114
115
            feature = y

        return y

116
117
118
119
120

def create_dataloader(args, graph, features, itemset, is_train=True):
    """Get a GraphBolt version of a dataloader for link prediction tasks. This
    function demonstrates how to utilize functional forms of datapipes in
    GraphBolt. Alternatively, you can create a datapipe using its class
121
122
    constructor. For a more detailed tutorial, please read the examples in
    `dgl/notebooks/graphbolt/walkthrough.ipynb`.
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
    """

    ############################################################################
    # [Input]:
    # 'itemset': The current dataset.
    # 'args.batch_size': Specify the number of samples to be processed together,
    # referred to as a 'mini-batch'. (The term 'mini-batch' is used here to
    # indicate a subset of the entire dataset that is processed together. This
    # is in contrast to processing the entire dataset, known as a 'full batch'.)
    # 'is_train': Determining if data should be shuffled. (Shuffling is
    # generally used only in training to improve model generalization. It's
    # not used in validation and testing as the focus there is to evaluate
    # performance rather than to learn from the data.)
    # [Output]:
    # An ItemSampler object for handling mini-batch sampling.
    # [Role]:
    # Initialize the ItemSampler to sample mini-batche from the dataset.
    ############################################################################
    datapipe = gb.ItemSampler(
142
143
144
        itemset,
        batch_size=args.train_batch_size if is_train else args.eval_batch_size,
        shuffle=is_train,
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
    )

    ############################################################################
    # [Input]:
    # 'args.neg_ratio': Specify the ratio of negative to positive samples.
    # (E.g., if neg_ratio is 1, for each positive sample there will be 1
    # negative sample.)
    # 'graph': The overall network topology for negative sampling.
    # [Output]:
    # A UniformNegativeSampler object that will handle the generation of
    # negative samples for link prediction tasks.
    # [Role]:
    # Initialize the UniformNegativeSampler for negative sampling in link
    # prediction.
    # [Note]:
    # If 'is_train' is False, the UniformNegativeSampler will not be used.
    # Since, in validation and testing, the itemset already contains the
    # negative edges information.
    ############################################################################
    if is_train:
165
        datapipe = datapipe.sample_uniform_negative(graph, args.neg_ratio)
166
167
168
169
170
171
172
173
174
175
176
177

    ############################################################################
    # [Input]:
    # 'datapipe' is either 'ItemSampler' or 'UniformNegativeSampler' depending
    # on whether training is needed ('is_train'),
    # 'graph': The network topology for sampling.
    # 'args.fanout': Number of neighbors to sample per node.
    # [Output]:
    # A NeighborSampler object to sample neighbors.
    # [Role]:
    # Initialize a neighbor sampler for sampling the neighborhoods of nodes.
    ############################################################################
178
179
180
181
182
    datapipe = datapipe.sample_neighbor(
        graph,
        args.fanout,
        output_cscformat=(args.output_cscformat == "True"),
    )
183

184
185
186
187
188
189
190
191
192
193
194
195
196
    ############################################################################
    # [Input]:
    # 'gb.exclude_seed_edges': Function to exclude seed edges, optionally
    # including their reverse edges, from the sampled subgraphs in the
    # minibatch.
    # [Output]:
    # A MiniBatchTransformer object with excluded seed edges.
    # [Role]:
    # During the training phase of link prediction, negative edges are
    # sampled. It's essential to exclude the seed edges from the process
    # to ensure that positive samples are not inadvertently included within
    # the negative samples.
    ############################################################################
197
    if is_train and args.exclude_edges:
198
199
200
        datapipe = datapipe.transform(
            partial(gb.exclude_seed_edges, include_reverse_edges=True)
        )
201

202
203
204
205
206
207
208
209
    ############################################################################
    # [Input]:
    # 'features': The node features.
    # 'node_feature_keys': The node feature keys (list) to be fetched.
    # [Output]:
    # A FeatureFetcher object to fetch node features.
    # [Role]:
    # Initialize a feature fetcher for fetching features of the sampled
210
211
212
    # subgraphs. This step is skipped in evaluation/inference because features
    # are updated as a whole during it, thus storing features in minibatch is
    # unnecessary.
213
    ############################################################################
214
215
    if is_train:
        datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"])
216
217
218
219
220
221
222
223
224
225
226
227
228
229

    ############################################################################
    # [Input]:
    # 'device': The device to copy the data to.
    # [Output]:
    # A CopyTo object to copy the data to the specified device.
    ############################################################################
    datapipe = datapipe.copy_to(device=args.device)

    ############################################################################
    # [Input]:
    # 'datapipe': The datapipe object to be used for data loading.
    # 'args.num_workers': The number of processes to be used for data loading.
    # [Output]:
230
    # A DataLoader object to handle data loading.
231
232
233
    # [Role]:
    # Initialize a multi-process dataloader to load the data in parallel.
    ############################################################################
234
    dataloader = gb.DataLoader(
235
236
237
238
239
240
241
242
243
        datapipe,
        num_workers=args.num_workers,
    )

    # Return the fully-initialized DataLoader object.
    return dataloader


@torch.no_grad()
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def compute_mrr(args, model, evaluator, node_emb, src, dst, neg_dst):
    """Compute the Mean Reciprocal Rank (MRR) for given source and destination
    nodes.

    This function computes the MRR for a set of node pairs, dividing the task
    into batches to handle potentially large graphs.
    """
    rr = torch.zeros(src.shape[0])
    # Loop over node pairs in batches.
    for start in tqdm.trange(
        0, src.shape[0], args.eval_batch_size, desc="Evaluate"
    ):
        end = min(start + args.eval_batch_size, src.shape[0])

        # Concatenate positive and negative destination nodes.
        all_dst = torch.cat([dst[start:end, None], neg_dst[start:end]], 1)

        # Fetch embeddings for current batch of source and destination nodes.
        h_src = node_emb[src[start:end]][:, None, :].to(args.device)
        h_dst = (
            node_emb[all_dst.view(-1)].view(*all_dst.shape, -1).to(args.device)
        )

        # Compute prediction scores using the model.
        pred = model.predictor(h_src * h_dst).squeeze(-1)

        # Evaluate the predictions to obtain MRR values.
        input_dict = {"y_pred_pos": pred[:, 0], "y_pred_neg": pred[:, 1:]}
        rr[start:end] = evaluator.eval(input_dict)["mrr_list"]
    return rr.mean()


@torch.no_grad()
def evaluate(args, model, graph, features, all_nodes_set, valid_set, test_set):
    """Evaluate the model on validation and test sets."""
    model.eval()
280
281
    evaluator = Evaluator(name="ogbl-citation2")

282
283
284
    # Since we need to use all neghborhoods for evaluation, we set the fanout
    # to -1.
    args.fanout = [-1]
285
    dataloader = create_dataloader(
286
        args, graph, features, all_nodes_set, is_train=False
287
288
    )

289
290
291
    # Compute node embeddings for the entire graph.
    node_emb = model.inference(graph, features, dataloader, args.device)
    results = []
292

293
294
295
296
297
298
    # Loop over both validation and test sets.
    for split in [valid_set, test_set]:
        # Unpack the item set.
        src = split._items[0][:, 0].to(node_emb.device)
        dst = split._items[0][:, 1].to(node_emb.device)
        neg_dst = split._items[1].to(node_emb.device)
299

300
301
302
303
304
        # Compute MRR values for the current split.
        results.append(
            compute_mrr(args, model, evaluator, node_emb, src, dst, neg_dst)
        )
    return results
305
306


307
def train(args, model, graph, features, train_set):
308
309
310
311
312
313
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    dataloader = create_dataloader(args, graph, features, train_set)

    for epoch in tqdm.trange(args.epochs):
        model.train()
        total_loss = 0
314
        start_epoch_time = time.time()
315
        for step, data in enumerate(dataloader):
316
317
            # Get node pairs with labels for loss calculation.
            compacted_pairs, labels = data.node_pairs_with_labels
318

319
            node_feature = data.node_features["feat"]
320
            blocks = data.blocks
321
322
323
324
325
326
327
328
329
330
331
332
333
334

            # Get the embeddings of the input nodes.
            y = model(blocks, node_feature)
            logits = model.predictor(
                y[compacted_pairs[0]] * y[compacted_pairs[1]]
            ).squeeze()

            # Compute loss.
            loss = F.binary_cross_entropy_with_logits(logits, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
335
336
            if step + 1 == args.early_stop:
                break
337

338
339
340
341
342
343
        end_epoch_time = time.time()
        print(
            f"Epoch {epoch:05d} | "
            f"Loss {(total_loss) / (step + 1):.4f} | "
            f"Time {(end_epoch_time - start_epoch_time):.4f} s"
        )
344
345
346
347
348
349
350


def parse_args():
    parser = argparse.ArgumentParser(description="OGBL-Citation2 (GraphBolt)")
    parser.add_argument("--epochs", type=int, default=10)
    parser.add_argument("--lr", type=float, default=0.0005)
    parser.add_argument("--neg-ratio", type=int, default=1)
351
    parser.add_argument("--train-batch-size", type=int, default=512)
352
    parser.add_argument("--eval-batch-size", type=int, default=1024)
353
    parser.add_argument("--num-workers", type=int, default=0)
354
355
356
357
358
359
    parser.add_argument(
        "--early-stop",
        type=int,
        default=0,
        help="0 means no early stop, otherwise stop at the input-th step",
    )
360
361
362
363
364
365
    parser.add_argument(
        "--fanout",
        type=str,
        default="15,10,5",
        help="Fan-out of neighbor sampling. Default: 15,10,5",
    )
366
367
368
369
370
371
    parser.add_argument(
        "--exclude-edges",
        type=int,
        default=1,
        help="Whether to exclude reverse edges during sampling. Default: 1",
    )
372
373
374
375
376
377
    parser.add_argument(
        "--device",
        default="cpu",
        choices=["cpu", "cuda"],
        help="Train device: 'cpu' for CPU, 'cuda' for GPU.",
    )
378
379
380
381
382
383
    parser.add_argument(
        "--output_cscformat",
        default="False",
        choices=["False", "True"],
        help="Output type of SampledSubgraph. True for csc_formats, False for node_pairs.",
    )
384
385
386
387
388
389
390
391
392
393
    return parser.parse_args()


def main(args):
    if not torch.cuda.is_available():
        args.device = "cpu"
    print(f"Training in {args.device} mode.")

    # Load and preprocess dataset.
    print("Loading data")
394
    dataset = gb.BuiltinDataset("ogbl-citation2").load()
395
396
397
398
399
    graph = dataset.graph
    features = dataset.feature
    train_set = dataset.tasks[0].train_set
    args.fanout = list(map(int, args.fanout.split(",")))

400
    in_size = features.size("node", None, "feat")[0]
401
    hidden_channels = 256
402
403
    args.device = torch.device(args.device)
    model = SAGE(in_size, hidden_channels).to(args.device)
404
405
406

    # Model training.
    print("Training...")
407
    train(args, model, graph, features, train_set)
408

409
410
411
    # Test the model.
    print("Testing...")
    test_set = dataset.tasks[0].test_set
412
413
414
415
416
417
418
419
420
    valid_set = dataset.tasks[0].validation_set
    all_nodes_set = dataset.all_nodes_set
    valid_mrr, test_mrr = evaluate(
        args, model, graph, features, all_nodes_set, valid_set, test_set
    )
    print(
        f"Validation MRR {valid_mrr.item():.4f}, "
        f"Test MRR {test_mrr.item():.4f}"
    )
421
422
423
424
425


if __name__ == "__main__":
    args = parse_args()
    main(args)