train.py 6.59 KB
Newer Older
1
2
3
4
5
import argparse
import time
import numpy as np
import torch as th
import torch.nn.functional as F
6
import torch.nn.init as INIT
7
8
9
10
11
12
13
14
15
import torch.optim as optim
from torch.utils.data import DataLoader

import dgl
import dgl.data as data

from tree_lstm import TreeLSTM

def main(args):
16
17
18
19
    np.random.seed(args.seed)
    th.manual_seed(args.seed)
    th.cuda.manual_seed(args.seed)

20
    cuda = args.gpu >= 0
21
    device = th.device('cuda:{}'.format(args.gpu)) if cuda else th.device('cpu')
22
23
    if cuda:
        th.cuda.set_device(args.gpu)
24

25
26
27
    trainset = data.SST()
    train_loader = DataLoader(dataset=trainset,
                              batch_size=args.batch_size,
28
29
                              collate_fn=data.SST.batcher(device),
                              shuffle=True,
30
                              num_workers=0)
31
32
33
34
35
36
37
38
39
40
41
42
43
    devset = data.SST(mode='dev')
    dev_loader = DataLoader(dataset=devset,
                            batch_size=100,
                            collate_fn=data.SST.batcher(device),
                            shuffle=False,
                            num_workers=0)

    testset = data.SST(mode='test')
    test_loader = DataLoader(dataset=testset,
                             batch_size=100,
                             collate_fn=data.SST.batcher(device),
                             shuffle=False,
                             num_workers=0)
44
45
46
47
48

    model = TreeLSTM(trainset.num_vocabs,
                     args.x_size,
                     args.h_size,
                     trainset.num_classes,
49
                     args.dropout,
50
                     cell_type='childsum' if args.child_sum else 'nary',
51
                     pretrained_emb = trainset.pretrained_emb).to(device)
52
    print(model)
53
54
55
56
57
58
    params_ex_emb =[x for x in list(model.parameters()) if x.requires_grad and x.size(0)!=trainset.num_vocabs]
    params_emb = list(model.embedding.parameters())

    optimizer = optim.Adagrad([
        {'params':params_ex_emb, 'lr':args.lr, 'weight_decay':args.weight_decay},
        {'params':params_emb, 'lr':0.1*args.lr}])
59

60
61
62
    dur = []
    for epoch in range(args.epochs):
        t_epoch = time.time()
63
64
65
66
67
68
        model.train()
        for step, batch in enumerate(train_loader):
            g = batch.graph
            n = g.number_of_nodes()
            h = th.zeros((n, args.h_size)).to(device)
            c = th.zeros((n, args.h_size)).to(device)
69
            if step >= 3:
70
71
72
                t0 = time.time() # tik

            logits = model(batch, h, c)
73
            logp = F.log_softmax(logits, 1)
74
            loss = F.nll_loss(logp, batch.label, reduction='elementwise_mean') 
75
76
77
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
78

79
            if step >= 3:
80
                dur.append(time.time() - t0) # tok
81
82
83

            if step > 0 and step % args.log_every == 0:
                pred = th.argmax(logits, 1)
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
                acc = th.sum(th.eq(batch.label, pred))
                root_ids = [i for i in range(batch.graph.number_of_nodes()) if batch.graph.out_degree(i)==0]
                root_acc = np.sum(batch.label.cpu().data.numpy()[root_ids] == pred.cpu().data.numpy()[root_ids])
                print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | Acc {:.4f} | Root Acc {:.4f} | Time(s) {:.4f}".format(
                    epoch, step, loss.item(), 1.0*acc.item()/len(batch.label), 1.0*root_acc/len(root_ids), np.mean(dur)))
        print('Epoch {:05d} training time {:.4f}s'.format(epoch, time.time() - t_epoch))

        # test on dev set
        accs = []
        root_accs = []
        model.eval()
        for step, batch in enumerate(dev_loader):
            g = batch.graph
            n = g.number_of_nodes()
            with th.no_grad():
                h = th.zeros((n, args.h_size)).to(device)
                c = th.zeros((n, args.h_size)).to(device)
                logits = model(batch, h, c)

            pred = th.argmax(logits, 1)
            acc = th.sum(th.eq(batch.label, pred)).item()
            accs.append([acc, len(batch.label)])
            root_ids = [i for i in range(batch.graph.number_of_nodes()) if batch.graph.out_degree(i)==0]
            root_acc = np.sum(batch.label.cpu().data.numpy()[root_ids] == pred.cpu().data.numpy()[root_ids])
            root_accs.append([root_acc, len(root_ids)])
        for param_group in optimizer.param_groups:
            param_group['lr'] = max(1e-5, param_group['lr']*0.99) #10
111

112
113
114
115
        dev_acc = 1.0*np.sum([x[0] for x in accs])/np.sum([x[1] for x in accs])
        dev_root_acc = 1.0*np.sum([x[0] for x in root_accs])/np.sum([x[1] for x in root_accs])
        print("Epoch {:05d} | Dev Acc {:.4f} | Root Acc {:.4f}".format(
            epoch, dev_acc, dev_root_acc))
116
117

        # test
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
        accs = []
        root_accs = []
        model.eval()
        for step, batch in enumerate(test_loader):
            g = batch.graph
            n = g.number_of_nodes()
            with th.no_grad():
                h = th.zeros((n, args.h_size)).to(device)
                c = th.zeros((n, args.h_size)).to(device)
                logits = model(batch, h, c)

            pred = th.argmax(logits, 1)
            acc = th.sum(th.eq(batch.label, pred)).item()
            accs.append([acc, len(batch.label)])
            root_ids = [i for i in range(batch.graph.number_of_nodes()) if batch.graph.out_degree(i)==0]
            root_acc = np.sum(batch.label.cpu().data.numpy()[root_ids] == pred.cpu().data.numpy()[root_ids])
            root_accs.append([root_acc, len(root_ids)])
        #lr decay
        for param_group in optimizer.param_groups:
            param_group['lr'] = max(1e-5, param_group['lr']*0.99) #10
138

139
140
141
142
        test_acc = 1.0*np.sum([x[0] for x in accs])/np.sum([x[1] for x in accs])
        test_root_acc = 1.0*np.sum([x[0] for x in root_accs])/np.sum([x[1] for x in root_accs])
        print("Epoch {:05d} | Test Acc {:.4f} | Root Acc {:.4f}".format(
            epoch, test_acc, test_root_acc))
143
144
145
146

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
147
    parser.add_argument('--seed', type=int, default=12110)
148
    parser.add_argument('--batch-size', type=int, default=25)
149
    parser.add_argument('--child-sum', action='store_true')
150
151
    parser.add_argument('--x-size', type=int, default=300)
    parser.add_argument('--h-size', type=int, default=150)
152
153
154
155
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--log-every', type=int, default=5)
    parser.add_argument('--lr', type=float, default=0.05)
    parser.add_argument('--weight-decay', type=float, default=1e-4)
156
    parser.add_argument('--dropout', type=float, default=0.3)
157
    args = parser.parse_args()
158
    print(args)
159
    main(args)