main.py 3.75 KB
Newer Older
1
2
3
4
5
import argparse
from model import Grace
from aug import aug
from dataset import load

6
import numpy as np
7
8
9
10
11
12
13
14
import torch as th
import torch.nn as nn

from eval import label_classification
import warnings

warnings.filterwarnings('ignore')

15
16
17
18
19

def count_parameters(model):
    return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])


20
parser = argparse.ArgumentParser()
21
parser.add_argument('--dataname', type=str, default='cora')
22
parser.add_argument('--gpu', type=int, default=0)
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
parser.add_argument('--split', type=str, default='random')

parser.add_argument('--epochs', type=int, default=500, help='Number of training periods.')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay.')
parser.add_argument('--temp', type=float, default=1.0, help='Temperature.')

parser.add_argument('--act_fn', type=str, default='relu')

parser.add_argument("--hid_dim", type=int, default=256, help='Hidden layer dim.')
parser.add_argument("--out_dim", type=int, default=256, help='Output layer dim.')

parser.add_argument("--num_layers", type=int, default=2, help='Number of GNN layers.')
parser.add_argument('--der1', type=float, default=0.2, help='Drop edge ratio of the 1st augmentation.')
parser.add_argument('--der2', type=float, default=0.2, help='Drop edge ratio of the 2nd augmentation.')
parser.add_argument('--dfr1', type=float, default=0.2, help='Drop feature ratio of the 1st augmentation.')
parser.add_argument('--dfr2', type=float, default=0.2, help='Drop feature ratio of the 2nd augmentation.')

41
42
43
44
45
46
47
48
49
50
args = parser.parse_args()

if args.gpu != -1 and th.cuda.is_available():
    args.device = 'cuda:{}'.format(args.gpu)
else:
    args.device = 'cpu'

if __name__ == '__main__':

    # Step 1: Load hyperparameters =================================================================== #
51
52
53
    lr = args.lr
    hid_dim = args.hid_dim
    out_dim = args.out_dim
54

55
56
    num_layers = args.num_layers
    act_fn = ({'relu': nn.ReLU(), 'prelu': nn.PReLU()})[args.act_fn]
57

58
59
60
61
    drop_edge_rate_1 = args.der1
    drop_edge_rate_2 = args.der2
    drop_feature_rate_1 = args.dfr1
    drop_feature_rate_2 = args.dfr2
62

63
64
65
    temp = args.temp
    epochs = args.epochs
    wd = args.wd
66
67
68
69
70
71
72
73

    # Step 2: Prepare data =================================================================== #
    graph, feat, labels, train_mask, test_mask = load(args.dataname)
    in_dim = feat.shape[1]

    # Step 3: Create model =================================================================== #
    model = Grace(in_dim, hid_dim, out_dim, num_layers, act_fn, temp)
    model = model.to(args.device)
74
    print(f'# params: {count_parameters(model)}')
75
76
77

    optimizer = th.optim.Adam(model.parameters(), lr=lr, weight_decay=wd)

78
    # Step 4: Training =======================================================================
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
    for epoch in range(epochs):
        model.train()
        optimizer.zero_grad()
        graph1, feat1 = aug(graph, feat, drop_feature_rate_1, drop_edge_rate_1)
        graph2, feat2 = aug(graph, feat, drop_feature_rate_2, drop_edge_rate_2)

        graph1 = graph1.to(args.device)
        graph2 = graph2.to(args.device)

        feat1 = feat1.to(args.device)
        feat2 = feat2.to(args.device)

        loss = model(graph1, graph2, feat1, feat2)
        loss.backward()
        optimizer.step()

        print(f'Epoch={epoch:03d}, loss={loss.item():.4f}')

    # Step 5: Linear evaluation ============================================================== #
98
99
    print("=== Final ===")

100
101
102
103
104
105
    graph = graph.add_self_loop()
    graph = graph.to(args.device)
    feat = feat.to(args.device)
    embeds = model.get_embedding(graph, feat)

    '''Evaluation Embeddings  '''
106
    label_classification(embeds, labels, train_mask, test_mask, split=args.split)