train.py 3.36 KB
Newer Older
1
import torch
2
import torch.nn as nn
3
import torch.nn.functional as F
4
import dgl.nn as dglnn
5
from dgl.data import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset
6
7
from dgl import AddSelfLoop
import argparse
8

9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
class GAT(nn.Module):
    def __init__(self,in_size, hid_size, out_size, heads):
        super().__init__()
        self.gat_layers = nn.ModuleList()
        # two-layer GAT
        self.gat_layers.append(dglnn.GATConv(in_size, hid_size, heads[0], feat_drop=0.6, attn_drop=0.6, activation=F.elu))
        self.gat_layers.append(dglnn.GATConv(hid_size*heads[0], out_size, heads[1], feat_drop=0.6, attn_drop=0.6, activation=None))
        
    def forward(self, g, inputs):
        h = inputs
        for i, layer in enumerate(self.gat_layers):
            h = layer(g, h)
            if i == 1:  # last layer 
                h = h.mean(1)
            else:       # other layer(s)
                h = h.flatten(1)
        return h
    
def evaluate(g, features, labels, mask, model):
28
29
    model.eval()
    with torch.no_grad():
30
        logits = model(g, features)
31
32
        logits = logits[mask]
        labels = labels[mask]
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
        _, indices = torch.max(logits, dim=1)
        correct = torch.sum(indices == labels)
        return correct.item() * 1.0 / len(labels)
    
def train(g, features, labels, masks, model):
    # define train/val samples, loss function and optimizer
    train_mask = masks[0]
    val_mask = masks[1]
    loss_fcn = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=5e-3, weight_decay=5e-4)

    #training loop        
    for epoch in range(200):
        model.train()
        logits = model(g, features)
        loss = loss_fcn(logits[train_mask], labels[train_mask])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        acc = evaluate(g, features, labels, val_mask, model)
        print("Epoch {:05d} | Loss {:.4f} | Accuracy {:.4f} "
              . format(epoch, loss.item(), acc))
        
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", type=str, default="cora",
                        help="Dataset name ('cora', 'citeseer', 'pubmed').")
    args = parser.parse_args()
    print(f'Training with DGL built-in GATConv module.')
    
63
    # load and preprocess dataset
64
    transform = AddSelfLoop()  # by default, it will first remove self-loops to prevent duplication
65
    if args.dataset == 'cora':
66
        data = CoraGraphDataset(transform=transform)
67
    elif args.dataset == 'citeseer':
68
        data = CiteseerGraphDataset(transform=transform)
69
    elif args.dataset == 'pubmed':
70
        data = PubmedGraphDataset(transform=transform)
71
    else:
72
73
        raise ValueError('Unknown dataset: {}'.format(args.dataset))
    g = data[0]
74
75
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    g = g.int().to(device)
76
77
    features = g.ndata['feat']
    labels = g.ndata['label']
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
    masks = g.ndata['train_mask'], g.ndata['val_mask'], g.ndata['test_mask']
    
    # create GAT model    
    in_size = features.shape[1]
    out_size = data.num_classes
    model = GAT(in_size, 8, out_size, heads=[8,1]).to(device)
    
    # model training
    print('Training...')
    train(g, features, labels, masks, model)

    # test the model
    print('Testing...')
    acc = evaluate(g, features, labels, masks[2], model)
    print("Test accuracy {:.4f}".format(acc))