main.py 5.18 KB
Newer Older
1
2
3
import torch
import torch.nn as nn
from classify import evaluate_embeds
4
5
6
7
from label_utils import (
    get_labeled_nodes_label_attribute,
    remove_unseen_classes_from_training,
)
8
from model import GCN, RECT_L
9
10
from utils import load_data, process_classids, svd_feature

11
12

def main(args):
13
14
15
    g, features, labels, train_mask, test_mask, n_classes, cuda = load_data(
        args
    )
16
    # adopt any number of classes as the unseen classes (the first three classes by default)
17
18
19
20
21
22
23
    removed_class = args.removed_class
    if len(removed_class) > n_classes:
        raise ValueError(
            "unseen number is greater than the number of classes: {}".format(
                len(removed_class)
            )
        )
24
25
    for i in removed_class:
        if i not in labels:
26
            raise ValueError("class out of bounds: {}".format(i))
27
28

    # remove these unseen classes from the training set, to construct the zero-shot label setting
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    train_mask_zs = remove_unseen_classes_from_training(
        train_mask=train_mask, labels=labels, removed_class=removed_class
    )
    print(
        "after removing the unseen classes, seen class labeled node num:",
        sum(train_mask_zs).item(),
    )

    if args.model_opt == "RECT-L":
        model = RECT_L(
            g=g,
            in_feats=args.n_hidden,
            n_hidden=args.n_hidden,
            activation=nn.PReLU(),
        )
44
45
46
47

        if cuda:
            model.cuda()
        features = svd_feature(features=features, d=args.n_hidden)
48
49
50
51
52
53
54
55
56
57
58
        attribute_labels = get_labeled_nodes_label_attribute(
            train_mask_zs=train_mask_zs,
            labels=labels,
            features=features,
            cuda=cuda,
        )
        loss_fcn = nn.MSELoss(reduction="sum")
        optimizer = torch.optim.Adam(
            model.parameters(), lr=args.lr, weight_decay=args.weight_decay
        )

59
60
61
62
63
        for epoch in range(args.n_epochs):
            model.train()
            optimizer.zero_grad()
            logits = model(features)
            loss_train = loss_fcn(attribute_labels, logits[train_mask_zs])
64
65
66
67
68
            print(
                "Epoch {:d} | Train Loss {:.5f}".format(
                    epoch + 1, loss_train.item()
                )
            )
69
70
71
72
            loss_train.backward()
            optimizer.step()
        model.eval()
        embeds = model.embed(features)
73
74
75
76
77
78
79
80
81
82

    elif args.model_opt == "GCN":
        model = GCN(
            g=g,
            in_feats=features.shape[1],
            n_hidden=args.n_hidden,
            n_classes=n_classes - len(removed_class),
            activation=nn.PReLU(),
            dropout=args.dropout,
        )
83
84
85
86

        if cuda:
            model.cuda()
        loss_fcn = nn.CrossEntropyLoss()
87
88
89
90
        optimizer = torch.optim.Adam(
            model.parameters(), lr=args.lr, weight_decay=args.weight_decay
        )

91
92
93
94
95
96
        for epoch in range(args.n_epochs):
            model.train()
            logits = model(features)
            labels_train = process_classids(labels_temp=labels[train_mask_zs])
            loss_train = loss_fcn(logits[train_mask_zs], labels_train)
            optimizer.zero_grad()
97
98
99
100
101
            print(
                "Epoch {:d} | Train Loss {:.5f}".format(
                    epoch + 1, loss_train.item()
                )
            )
102
103
104
105
            loss_train.backward()
            optimizer.step()
        model.eval()
        embeds = model.embed(features)
106
107

    elif args.model_opt == "NodeFeats":
108
        embeds = svd_feature(features)
109

110
    # evaluate the quality of embedding results with the original balanced labels, to assess the model performance (as suggested in the paper)
111
112
113
114
115
116
117
118
    res = evaluate_embeds(
        features=embeds,
        labels=labels,
        train_mask=train_mask,
        test_mask=test_mask,
        n_classes=n_classes,
        cuda=cuda,
    )
119
    print("Test Accuracy of {:s}: {:.4f}".format(args.model_opt, res))
120
121
122


if __name__ == "__main__":
123
    import argparse
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160

    parser = argparse.ArgumentParser(description="MODEL")
    parser.add_argument(
        "--model-opt",
        type=str,
        default="RECT-L",
        choices=["RECT-L", "GCN", "NodeFeats"],
        help="model option",
    )
    parser.add_argument(
        "--dataset",
        type=str,
        default="cora",
        choices=["cora", "citeseer"],
        help="dataset",
    )
    parser.add_argument(
        "--dropout", type=float, default=0.0, help="dropout probability"
    )
    parser.add_argument("--gpu", type=int, default=0, help="gpu")
    parser.add_argument(
        "--removed-class",
        type=int,
        nargs="*",
        default=[0, 1, 2],
        help="remove the unseen classes",
    )
    parser.add_argument("--lr", type=float, default=1e-3, help="learning rate")
    parser.add_argument(
        "--n-epochs", type=int, default=200, help="number of training epochs"
    )
    parser.add_argument(
        "--n-hidden", type=int, default=200, help="number of hidden gcn units"
    )
    parser.add_argument(
        "--weight-decay", type=float, default=5e-4, help="Weight for L2 loss"
    )
161
162
163
    args = parser.parse_args()

    main(args)