hetero_rgcn.py 24.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
"""
This script, `hetero_rgcn.py`, trains and tests a Relational Graph
Convolutional Network (R-GCN) model for node classification on the
Open Graph Benchmark (OGB) dataset "ogbn-mag". For more details on
"ogbn-mag", please refer to the OGB website:
(https://ogb.stanford.edu/docs/linkprop/)

Paper [Modeling Relational Data with Graph Convolutional Networks]
(https://arxiv.org/abs/1703.06103).

Generation of graph embeddings is the main difference between homograph
node classification and heterograph node classification:
- Homograph: Since all nodes and edges are of the same type, embeddings
  can be generated using a unified approach. Type-specific handling is
  typically not required.
- Heterograph: Due to the existence of multiple types of nodes and edges,
  specific embeddings need to be generated for each type. This allows for
  a more nuanced capture of the complex structure and semantic information
  within the heterograph.

This flowchart describes the main functional sequence of the provided example.
main

├───> prepare_data
│     │
│     └───> Load and preprocess dataset

├───> rel_graph_embed [HIGHLIGHT]
│     │
│     └───> Generate graph embeddings

├───> Instantiate RGCN model
│     │
│     ├───> RelGraphConvLayer (input to hidden)
│     │
│     └───> RelGraphConvLayer (hidden to output)

└───> train


      └───> Training loop

            ├───> EntityClassify.forward (RGCN model forward pass)

            └───> test

                  └───> EntityClassify.evaluate
"""
import argparse
import itertools
import sys

import dgl
import dgl.nn as dglnn
55
import numpy as np
56
57
58
59
60
61
62
63

import psutil

import torch as th
import torch.nn as nn
import torch.nn.functional as F
from dgl import AddReverse, Compose, ToSimple
from dgl.nn import HeteroEmbedding
64
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
65
66
67
68
69
from ogb.nodeproppred import DglNodePropPredDataset, Evaluator
from tqdm import tqdm


def prepare_data(args, device):
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    feats = {}
    if args.dataset == "ogbn-mag":
        dataset = DglNodePropPredDataset(name="ogbn-mag", root=args.rootdir)

        # - graph: dgl graph object.
        # - label: torch tensor of shape (num_nodes, num_tasks).
        g, labels = dataset[0]

        # Flatten the labels for "paper" type nodes. This step reduces the
        # dimensionality of the labels. We need to flatten the labels because
        # the model requires a 1-dimensional label tensor.
        labels = labels["paper"].flatten().long()

        # Apply transformation to the graph.
        # - "ToSimple()" removes multi-edge between two nodes.
        # - "AddReverse()" adds reverse edges to the graph.
        transform = Compose([ToSimple(), AddReverse()])
        g = transform(g)
    else:
        dataset = MAG240MDataset(root=args.rootdir)
        (g,), _ = dgl.load_graphs(args.graph_path)
        g = g.formats(["csc"])
        labels = th.as_tensor(dataset.paper_label).long()
        # As feature data is too large to fit in memory, we read it from disk.
        feats["paper"] = th.as_tensor(
            np.load(args.paper_feature_path, mmap_mode="r+")
        )
        feats["author"] = th.as_tensor(
            np.load(args.author_feature_path, mmap_mode="r+")
        )
        feats["institution"] = th.as_tensor(
            np.load(args.inst_feature_path, mmap_mode="r+")
        )
    print(f"Loaded graph: {g}")

105
106
    # Get train/valid/test index.
    split_idx = dataset.get_idx_split()
107
108
109
110
111
    if args.dataset == "ogb-lsc-mag240m":
        split_idx = {
            split_type: {"paper": split_idx[split_type]}
            for split_type in split_idx
        }
112
113

    # Initialize a train sampler that samples neighbors for multi-layer graph
114
    # convolution. It samples 25 and 10 neighbors for the first and second
115
    # layers respectively.
116
    sampler = dgl.dataloading.MultiLayerNeighborSampler([25, 10])
117
118
119
120
121
122
123
124
125
126
127
    num_workers = args.num_workers
    train_loader = dgl.dataloading.DataLoader(
        g,
        split_idx["train"],
        sampler,
        batch_size=1024,
        shuffle=True,
        num_workers=num_workers,
        device=device,
    )

128
    return g, labels, dataset.num_classes, split_idx, train_loader, feats
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308


def extract_embed(node_embed, input_nodes):
    emb = node_embed(
        {ntype: input_nodes[ntype] for ntype in input_nodes if ntype != "paper"}
    )
    return emb


def rel_graph_embed(graph, embed_size):
    """Initialize a heterogenous embedding layer for all node types in the
    graph, except for the "paper" node type.

    The function constructs a dictionary 'node_num', where the keys are node
    types (ntype) and the values are the number of nodes for each type. This
    dictionary is used to create a HeteroEmbedding instance.

    (HIGHLIGHT)
    A HeteroEmbedding instance holds separate embedding layers for each node
    type, each with its own feature space of dimensionality
    (node_num[ntype], embed_size), where 'node_num[ntype]' is the number of
    nodes of type 'ntype' and 'embed_size' is the embedding dimension.

    The "paper" node type is specifically excluded, possibly because these nodes
    might already have predefined feature representations, and therefore, do not
    require an additional embedding layer.

    Parameters
    ----------
    graph : DGLGraph
        The graph for which to create the heterogenous embedding layer.
    embed_size : int
        The size of the embedding vectors.

    Returns
    --------
    HeteroEmbedding
        A heterogenous embedding layer for all node types in the graph, except
        for the "paper" node type.
    """
    node_num = {}
    for ntype in graph.ntypes:
        # Skip the "paper" node type.
        if ntype == "paper":
            continue
        node_num[ntype] = graph.num_nodes(ntype)
    return HeteroEmbedding(node_num, embed_size)


class RelGraphConvLayer(nn.Module):
    def __init__(
        self,
        in_size,
        out_size,
        ntypes,
        relation_names,
        activation=None,
        dropout=0.0,
    ):
        super(RelGraphConvLayer, self).__init__()
        self.in_size = in_size
        self.out_size = out_size
        self.ntypes = ntypes
        self.relation_names = relation_names
        self.activation = activation

        ########################################################################
        # (HIGHLIGHT) HeteroGraphConv is a graph convolution operator over
        # heterogeneous graphs. A dictionary is passed where the key is the
        # relation name and the value is the instance of GraphConv. norm="right"
        # is to divide the aggregated messages by each node’s in-degrees, which
        # is equivalent to averaging the received messages. weight=False and
        # bias=False as we will use our own weight matrices defined later.
        ########################################################################
        self.conv = dglnn.HeteroGraphConv(
            {
                rel: dglnn.GraphConv(
                    in_size, out_size, norm="right", weight=False, bias=False
                )
                for rel in relation_names
            }
        )

        # Create a separate Linear layer for each relationship. Each
        # relationship has its own weights which will be applied to the node
        # features before performing convolution.
        self.weight = nn.ModuleDict(
            {
                rel_name: nn.Linear(in_size, out_size, bias=False)
                for rel_name in self.relation_names
            }
        )

        # Create a separate Linear layer for each node type.
        # loop_weights are used to update the output embedding of each target node
        # based on its own features, thereby allowing the model to refine the node
        # representations. Note that this does not imply the existence of self-loop
        # edges in the graph. It is similar to residual connection.
        self.loop_weights = nn.ModuleDict(
            {
                ntype: nn.Linear(in_size, out_size, bias=True)
                for ntype in self.ntypes
            }
        )

        self.loop_weights = nn.ModuleDict(
            {
                ntype: nn.Linear(in_size, out_size, bias=True)
                for ntype in self.ntypes
            }
        )

        self.dropout = nn.Dropout(dropout)
        # Initialize parameters of the model.
        self.reset_parameters()

    def reset_parameters(self):
        for layer in self.weight.values():
            layer.reset_parameters()

        for layer in self.loop_weights.values():
            layer.reset_parameters()

    def forward(self, g, inputs):
        """
        Parameters
        ----------
        g : DGLGraph
            Input graph.
        inputs : dict[str, torch.Tensor]
            Node feature for each node type.

        Returns
        -------
        dict[str, torch.Tensor]
            New node features for each node type.
        """
        # Create a deep copy of the graph g with features saved in local
        # frames to prevent side effects from modifying the graph.
        g = g.local_var()

        # Create a dictionary of weights for each relationship. The weights
        # are retrieved from the Linear layers defined earlier.
        weight_dict = {
            rel_name: {"weight": self.weight[rel_name].weight.T}
            for rel_name in self.relation_names
        }

        # Create a dictionary of node features for the destination nodes in
        # the graph. We slice the node features according to the number of
        # destination nodes of each type. This is necessary because when
        # incorporating the effect of self-loop edges, we perform computations
        # only on the destination nodes' features. By doing so, we ensure the
        # feature dimensions match and prevent any misuse of incorrect node
        # features.
        inputs_dst = {
            k: v[: g.number_of_dst_nodes(k)] for k, v in inputs.items()
        }

        # Apply the convolution operation on the graph. mod_kwargs are
        # additional arguments for each relation function defined in the
        # HeteroGraphConv. In this case, it's the weights for each relation.
        hs = self.conv(g, inputs, mod_kwargs=weight_dict)

        def _apply(ntype, h):
            # Apply the `loop_weight` to the input node features, effectively
            # acting as a residual connection. This allows the model to refine
            # node embeddings based on its current features.
            h = h + self.loop_weights[ntype](inputs_dst[ntype])
            if self.activation:
                h = self.activation(h)
            return self.dropout(h)

        # Apply the function defined above for each node type. This will update
        # the node features using the `loop_weights`, apply the activation
        # function and dropout.
        return {ntype: _apply(ntype, h) for ntype, h in hs.items()}


class EntityClassify(nn.Module):
309
    def __init__(self, g, in_size, out_size):
310
311
312
        super(EntityClassify, self).__init__()
        self.in_size = in_size
        self.hidden_size = 64
313
        self.out_size = out_size
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340

        # Generate and sort a list of unique edge types from the input graph.
        # eg. ['writes', 'cites']
        self.relation_names = list(set(g.etypes))
        self.relation_names.sort()
        self.dropout = 0.5

        self.layers = nn.ModuleList()

        # First layer: transform input features to hidden features. Use ReLU
        # as the activation function and apply dropout for regularization.
        self.layers.append(
            RelGraphConvLayer(
                self.in_size,
                self.hidden_size,
                g.ntypes,
                self.relation_names,
                activation=F.relu,
                dropout=self.dropout,
            )
        )

        # Second layer: transform hidden features to output features. No
        # activation function is applied at this stage.
        self.layers.append(
            RelGraphConvLayer(
                self.hidden_size,
341
                self.out_size,
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
                g.ntypes,
                self.relation_names,
                activation=None,
            )
        )

    def reset_parameters(self):
        # Reset the parameters of each layer.
        for layer in self.layers:
            layer.reset_parameters()

    def forward(self, h, blocks):
        for layer, block in zip(self.layers, blocks):
            h = layer(block, h)
        return h


class Logger(object):
    r"""
    This class was taken directly from the PyG implementation and can be found
    here: https://github.com/snap-stanford/ogb/blob/master/examples/nodeproppre
    d/mag/logger.py

    This was done to ensure that performance was measured in precisely the same
    way
    """

    def __init__(self, runs):
        self.results = [[] for _ in range(runs)]

    def add_result(self, run, result):
        assert len(result) == 3
        assert run >= 0 and run < len(self.results)
        self.results[run].append(result)

    def print_statistics(self, run=None):
        if run is not None:
            result = 100 * th.tensor(self.results[run])
            argmax = result[:, 1].argmax().item()
            print(f"Run {run + 1:02d}:")
            print(f"Highest Train: {result[:, 0].max():.2f}")
            print(f"Highest Valid: {result[:, 1].max():.2f}")
            print(f"  Final Train: {result[argmax, 0]:.2f}")
            print(f"   Final Test: {result[argmax, 2]:.2f}")
        else:
            result = 100 * th.tensor(self.results)

            best_results = []
            for r in result:
                train1 = r[:, 0].max().item()
                valid = r[:, 1].max().item()
                train2 = r[r[:, 1].argmax(), 0].item()
                test = r[r[:, 1].argmax(), 2].item()
                best_results.append((train1, valid, train2, test))

            best_result = th.tensor(best_results)

            print("All runs:")
            r = best_result[:, 0]
            print(f"Highest Train: {r.mean():.2f} ± {r.std():.2f}")
            r = best_result[:, 1]
            print(f"Highest Valid: {r.mean():.2f} ± {r.std():.2f}")
            r = best_result[:, 2]
            print(f"  Final Train: {r.mean():.2f} ± {r.std():.2f}")
            r = best_result[:, 3]
            print(f"   Final Test: {r.mean():.2f} ± {r.std():.2f}")


410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
def extract_node_features(name, g, input_nodes, node_embed, feats, device):
    """Extract the node features from embedding layer or raw features."""
    if name == "ogbn-mag":
        # Extract node embeddings for the input nodes.
        node_features = extract_embed(node_embed, input_nodes)
        # Add the batch's raw "paper" features. Corresponds to the content
        # in the function `rel_graph_embed` comment.
        node_features.update(
            {"paper": g.ndata["feat"]["paper"][input_nodes["paper"].cpu()]}
        )
        node_features = {k: e.to(device) for k, e in node_features.items()}
    else:
        node_features = {
            ntype: feats[ntype][input_nodes[ntype].cpu()].to(device)
            for ntype in input_nodes
        }
        # Original feature data are stored in float16 while model weights are
        # float32, so we need to convert the features to float32.
        # [TODO] Enable mixed precision training on GPU.
        node_features = {k: v.float() for k, v in node_features.items()}
    return node_features


433
def train(
434
    dataset,
435
    g,
436
    feats,
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
    model,
    node_embed,
    optimizer,
    train_loader,
    split_idx,
    labels,
    logger,
    device,
    run,
):
    print("start training...")
    category = "paper"

    # Typically, the best Validation performance is obtained after
    # the 1st or 2nd epoch. This is why the max epoch is set to 3.
    for epoch in range(3):
        num_train = split_idx["train"][category].shape[0]
        model.train()

        total_loss = 0

        for input_nodes, seeds, blocks in tqdm(
            train_loader, desc=f"Epoch {epoch:02d}"
        ):
            # Move the input data onto the device.
            blocks = [blk.to(device) for blk in blocks]
            # We only predict the nodes with type "category".
            seeds = seeds[category]
            batch_size = seeds.shape[0]

467
468
469
470
471
            # Extract the node features from embedding layer or raw features.
            node_features = extract_node_features(
                dataset, g, input_nodes, node_embed, feats, device
            )
            lbl = labels[seeds.cpu()].to(device)
472
473
474
475

            # Reset gradients.
            optimizer.zero_grad()
            # Generate predictions.
476
            logits = model(node_features, blocks)[category]
477
478
479
480
481
482
483
484
485
486

            y_hat = logits.log_softmax(dim=-1)
            loss = F.nll_loss(y_hat, lbl)
            loss.backward()
            optimizer.step()

            total_loss += loss.item() * batch_size

        loss = total_loss / num_train

487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
        # Evaluate the model on the train/val/test set.
        train_acc = evaluate(
            dataset,
            g,
            feats,
            model,
            node_embed,
            labels,
            device,
            split_idx["train"],
        )
        valid_acc = evaluate(
            dataset,
            g,
            feats,
            model,
            node_embed,
            labels,
            device,
            split_idx["valid"],
        )
        test_key = "test" if dataset == "ogbn-mag" else "test-dev"
        test_acc = evaluate(
            dataset,
            g,
            feats,
            model,
            node_embed,
            labels,
            device,
            split_idx[test_key],
            save_test_submission=(dataset == "ogb-lsc-mag240m"),
        )
        logger.add_result(run, (train_acc, valid_acc, test_acc))
521
522
523
524
525
526
527
528
529
530
531
532
533
        print(
            f"Run: {run + 1:02d}, "
            f"Epoch: {epoch +1 :02d}, "
            f"Loss: {loss:.4f}, "
            f"Train: {100 * train_acc:.2f}%, "
            f"Valid: {100 * valid_acc:.2f}%, "
            f"Test: {100 * test_acc:.2f}%"
        )

    return logger


@th.no_grad()
534
535
536
537
538
539
540
541
542
543
544
def evaluate(
    dataset,
    g,
    feats,
    model,
    node_embed,
    labels,
    device,
    idx,
    save_test_submission=False,
):
545
546
547
    # Switches the model to evaluation mode.
    model.eval()
    category = "paper"
548
549
550
551
552
553
554
    if dataset == "ogbn-mag":
        evaluator = Evaluator(name="ogbn-mag")
    else:
        evaluator = MAG240MEvaluator()

    sampler = dgl.dataloading.MultiLayerNeighborSampler([25, 10])
    dataloader = dgl.dataloading.DataLoader(
555
        g,
556
        idx,
557
        sampler,
558
        batch_size=4096,
559
560
561
562
563
564
565
        shuffle=False,
        num_workers=0,
        device=device,
    )

    # To store the predictions.
    y_hats = list()
566
    y_true = list()
567

568
    for input_nodes, seeds, blocks in tqdm(dataloader, desc="Inference"):
569
570
        blocks = [blk.to(device) for blk in blocks]
        # We only predict the nodes with type "category".
571
572
573
        node_features = extract_node_features(
            dataset, g, input_nodes, node_embed, feats, device
        )
574
575

        # Generate predictions.
576
        logits = model(node_features, blocks)[category]
577
578
579
580
        # Apply softmax to the logits and get the prediction by selecting the
        # argmax.
        y_hat = logits.log_softmax(dim=-1).argmax(dim=1, keepdims=True)
        y_hats.append(y_hat.cpu())
581
        y_true.append(labels[seeds["paper"].cpu()])
582
583

    y_pred = th.cat(y_hats, dim=0)
584
    y_true = th.cat(y_true, dim=0)
585
586
    y_true = th.unsqueeze(y_true, 1)

587
588
589
    if dataset == "ogb-lsc-mag240m":
        y_pred = y_pred.view(-1)
        y_true = y_true.view(-1)
590

591
592
593
594
595
    if save_test_submission:
        evaluator.save_test_submission(
            input_dict={"y_pred": y_pred}, dir_path=".", mode="test-dev"
        )
    return evaluator.eval({"y_true": y_true, "y_pred": y_pred})["acc"]
596
597
598


def main(args):
599
    device = "cuda:0" if th.cuda.is_available() and args.num_gpus > 0 else "cpu"
600
601
602
603
604

    # Initialize a logger.
    logger = Logger(args.runs)

    # Prepare the data.
605
606
607
608
609
    g, labels, num_classes, split_idx, train_loader, feats = prepare_data(
        args, device
    )

    feat_size = 128 if args.dataset == "ogbn-mag" else 768
610
611

    # Create the embedding layer and move it to the appropriate device.
612
613
614
615
616
617
618
    embed_layer = None
    if args.dataset == "ogbn-mag":
        embed_layer = rel_graph_embed(g, feat_size).to(device)
        print(
            "Number of embedding parameters: "
            f"{sum(p.numel() for p in embed_layer.parameters())}"
        )
619
620

    # Initialize the entity classification model.
621
    model = EntityClassify(g, feat_size, num_classes).to(device)
622
623
624
625
626
627
628
629

    print(
        "Number of model parameters: "
        f"{sum(p.numel() for p in model.parameters())}"
    )

    for run in range(args.runs):
        try:
630
631
            if embed_layer is not None:
                embed_layer.reset_parameters()
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
            model.reset_parameters()
        except:
            # Old pytorch version doesn't support reset_parameters() API.
            ##################################################################
            # [Why we need to reset the parameters?]
            # If parameters are not reset, the model will start with the
            # parameters learned from the last run, potentially resulting
            # in biased outcomes or sub-optimal performance if the model was
            # previously stuck in a poor local minimum.
            ##################################################################
            pass

        # `itertools.chain()` is a function in Python's itertools module.
        # It is used to flatten a list of iterables, making them act as
        # one big iterable.
        # In this context, the following code is used to create a single
        # iterable over the parameters of both the model and the embed_layer,
        # which is passed to the optimizer. The optimizer then updates all
        # these parameters during the training process.
        all_params = itertools.chain(
652
653
            model.parameters(),
            [] if embed_layer is None else embed_layer.parameters(),
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
        )
        optimizer = th.optim.Adam(all_params, lr=0.01)

        # `expected_max`` is the number of physical cores on your machine.
        # The `logical` parameter, when set to False, ensures that the count
        # returned is the number of physical cores instead of logical cores
        # (which could be higher due to technologies like Hyper-Threading).
        expected_max = int(psutil.cpu_count(logical=False))
        if args.num_workers >= expected_max:
            print(
                "[ERROR] You specified num_workers are larger than physical"
                f"cores, please set any number less than {expected_max}",
                file=sys.stderr,
            )
        logger = train(
669
            args.dataset,
670
            g,
671
            feats,
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
            model,
            embed_layer,
            optimizer,
            train_loader,
            split_idx,
            labels,
            logger,
            device,
            run,
        )
        logger.print_statistics(run)

    print("Final performance: ")
    logger.print_statistics()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="RGCN")
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
    parser.add_argument(
        "--dataset",
        type=str,
        default="ogbn-mag",
        help="Dataset for train: ogbn-mag, ogb-lsc-mag240m",
    )
    parser.add_argument(
        "--num_gpus",
        type=int,
        default=0,
        help="Number of GPUs. Use 0 for CPU training.",
    )
    parser.add_argument(
        "--runs",
        type=int,
        default=5,
        help="Number of runs. Each run will train the model from scratch.",
    )
    parser.add_argument(
        "--num_workers",
        type=int,
        default=0,
        help="Number of worker processes for data loading.",
    )
    parser.add_argument(
        "--rootdir",
        type=str,
        default="./",
        help="Directory to download the OGB dataset.",
    )
    parser.add_argument(
        "--graph_path",
        type=str,
        default="./graph.dgl",
        help="Path to the graph file.",
    )
    parser.add_argument(
        "--paper_feature_path",
        type=str,
        default="./paper-feat.npy",
        help="Path to the features of paper nodes.",
    )
    parser.add_argument(
        "--author_feature_path",
        type=str,
        default="./author-feat.npy",
        help="Path to the features of author nodes.",
    )
    parser.add_argument(
        "--inst_feature_path",
        type=str,
        default="./inst-feat.npy",
        help="Path to the features of institution nodes.",
    )
744
745
746
747

    args = parser.parse_args()

    main(args)