"vscode:/vscode.git/clone" did not exist on "3234189b60092a2408fdf4f412eae3346404ae27"
train_sampling.py 8.26 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import dgl.nn.pytorch as dglnn
import time
import argparse
import tqdm

12
from model import SAGE
13
from load_graph import load_reddit, inductive_split, load_ogb
14

15
16
17
18
def compute_acc(pred, labels):
    """
    Compute the accuracy of prediction given the labels.
    """
19
    labels = labels.long()
20
21
    return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)

22
def evaluate(model, g, nfeat, labels, val_nid, device):
23
    """
24
    Evaluate the model on the validation set specified by ``val_nid``.
25
26
27
    g : The entire graph.
    inputs : The features of all the nodes.
    labels : The labels of all the nodes.
28
    val_nid : the node Ids for validation.
29
30
31
32
    device : The GPU device to evaluate on.
    """
    model.eval()
    with th.no_grad():
33
        pred = model.inference(g, nfeat, device, args.batch_size, args.num_workers)
34
    model.train()
35
    return compute_acc(pred[val_nid], labels[val_nid].to(pred.device))
36

37
def load_subtensor(nfeat, labels, seeds, input_nodes, device):
38
    """
39
    Extracts features and labels for a subset of nodes
40
    """
41
42
    batch_inputs = nfeat[input_nodes].to(device)
    batch_labels = labels[seeds].to(device)
43
44
45
    return batch_inputs, batch_labels

#### Entry point
46
def run(args, device, data):
47
    # Unpack data
48
49
50
    n_classes, train_g, val_g, test_g, train_nfeat, train_labels, \
    val_nfeat, val_labels, test_nfeat, test_labels = data
    in_feats = train_nfeat.shape[1]
51
52
53
54
    test_nid = test_g.ndata.pop('test_mask',
        ~(test_g.ndata['train_mask'] | test_g.ndata['val_mask'])).nonzero().squeeze()
    train_nid = train_g.ndata.pop('train_mask').nonzero().squeeze()
    val_nid = val_g.ndata.pop('val_mask').nonzero().squeeze()
55

56
    if args.graph_device == 'gpu':
57
58
59
60
        train_nid = train_nid.to(device)
        # copy only the csc to the GPU
        train_g = train_g.formats(['csc'])
        train_g = train_g.to(device)
61
62
63
64
65
66
        args.num_workers = 0
    elif args.graph_device == 'uva':
        train_nid = train_nid.to(device)
        train_g = train_g.formats(['csc'])
        train_g.pin_memory_()
        args.num_workers = 0
67

68
    # Create PyTorch DataLoader for constructing blocks
69
    sampler = dgl.dataloading.MultiLayerNeighborSampler(
70
        [int(fanout) for fanout in args.fan_out.split(',')])
71
    dataloader = dgl.dataloading.NodeDataLoader(
72
        train_g,
73
74
        train_nid,
        sampler,
75
        device=device,
76
77
78
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=False,
79
        num_workers=args.num_workers)
80
81

    # Define model and optimizer
82
83
    model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)
    model = model.to(device)
84
85
86
87
88
89
90
91
    loss_fcn = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # Training loop
    avg = 0
    iter_tput = []
    for epoch in range(args.num_epochs):
        tic = time.time()
92
93
94

        # Loop over the dataloader to sample the computation dependency graph as a list of
        # blocks.
xiang song(charlie.song)'s avatar
xiang song(charlie.song) committed
95
        tic_step = time.time()
96
        for step, (input_nodes, seeds, blocks) in enumerate(dataloader):
97
            # Load the input features as well as output labels
98
99
            batch_inputs, batch_labels = load_subtensor(train_nfeat, train_labels,
                                                        seeds, input_nodes, device)
100
            blocks = [block.int().to(device) for block in blocks]
101
102
103
104
105
106
107
108

            # Compute loss and prediction
            batch_pred = model(blocks, batch_inputs)
            loss = loss_fcn(batch_pred, batch_labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

109
110
            iter_tput.append(len(seeds) / (time.time() - tic_step))
            if step % args.log_every == 0:
111
                acc = compute_acc(batch_pred, batch_labels)
112
                gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
maqy1995's avatar
maqy1995 committed
113
                print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(
114
                    epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
xiang song(charlie.song)'s avatar
xiang song(charlie.song) committed
115
            tic_step = time.time()
116
117

        toc = time.time()
118
119
120
121
        print('Epoch Time(s): {:.4f}'.format(toc - tic))
        if epoch >= 5:
            avg += toc - tic
        if epoch % args.eval_every == 0 and epoch != 0:
122
            eval_acc = evaluate(model, val_g, val_nfeat, val_labels, val_nid, device)
123
            print('Eval Acc {:.4f}'.format(eval_acc))
124
            test_acc = evaluate(model, test_g, test_nfeat, test_labels, test_nid, device)
125
            print('Test Acc: {:.4f}'.format(test_acc))
126
127

    print('Avg epoch time: {}'.format(avg / (epoch - 4)))
128
129

if __name__ == '__main__':
130
    argparser = argparse.ArgumentParser()
131
    argparser.add_argument('--gpu', type=int, default=0,
132
                           help="GPU device ID. Use -1 for CPU training")
133
    argparser.add_argument('--dataset', type=str, default='reddit')
134
135
136
    argparser.add_argument('--num-epochs', type=int, default=20)
    argparser.add_argument('--num-hidden', type=int, default=16)
    argparser.add_argument('--num-layers', type=int, default=2)
137
    argparser.add_argument('--fan-out', type=str, default='10,25')
138
139
140
141
    argparser.add_argument('--batch-size', type=int, default=1000)
    argparser.add_argument('--log-every', type=int, default=20)
    argparser.add_argument('--eval-every', type=int, default=5)
    argparser.add_argument('--lr', type=float, default=0.003)
142
    argparser.add_argument('--dropout', type=float, default=0.5)
143
    argparser.add_argument('--num-workers', type=int, default=4,
144
                           help="Number of sampling processes. Use 0 for no extra process.")
145
    argparser.add_argument('--inductive', action='store_true',
146
                           help="Inductive learning setting")
147
148
149
150
    argparser.add_argument('--graph-device', choices=('cpu', 'gpu', 'uva'), default='cpu',
                           help="Device to perform the sampling. "
                                "Must have 0 workers for 'gpu' and 'uva'")
    argparser.add_argument('--data-device', choices=('cpu', 'gpu', 'uva'), default='gpu',
151
152
153
                           help="By default the script puts all node features and labels "
                                "on GPU when using it to save time for data copy. This may "
                                "be undesired if they cannot fit in GPU memory at once. "
154
155
156
                                "Use 'cpu' to keep the features on host memory and "
                                "'uva' to enable UnifiedTensor (GPU zero-copy access on "
                                "pinned host memory).")
157
    args = argparser.parse_args()
xiang song(charlie.song)'s avatar
xiang song(charlie.song) committed
158

159
160
161
162
    if args.gpu >= 0:
        device = th.device('cuda:%d' % args.gpu)
    else:
        device = th.device('cpu')
163
164
165
166
        assert args.graph_device == 'cpu', \
               f"Must have GPUs to enable {args.graph_device} sampling."
        assert args.data_device == 'cpu', \
               f"Must have GPUs to enable {args.data_device} feature storage."
167

168
169
    if args.dataset == 'reddit':
        g, n_classes = load_reddit()
170
171
    elif args.dataset == 'ogbn-products':
        g, n_classes = load_ogb('ogbn-products')
172
173
    else:
        raise Exception('unknown dataset')
174
175
176

    if args.inductive:
        train_g, val_g, test_g = inductive_split(g)
177
178
179
180
181
182
        train_nfeat = train_g.ndata.pop('features')
        val_nfeat = val_g.ndata.pop('features')
        test_nfeat = test_g.ndata.pop('features')
        train_labels = train_g.ndata.pop('labels')
        val_labels = val_g.ndata.pop('labels')
        test_labels = test_g.ndata.pop('labels')
183
184
    else:
        train_g = val_g = test_g = g
185
186
187
        train_nfeat = val_nfeat = test_nfeat = g.ndata.pop('features')
        train_labels = val_labels = test_labels = g.ndata.pop('labels')

188
    if args.data_device == 'gpu':
189
190
        train_nfeat = train_nfeat.to(device)
        train_labels = train_labels.to(device)
191
192
193
    elif args.data_device == 'uva':
        train_nfeat = dgl.contrib.UnifiedTensor(train_nfeat, device=device)
        train_labels = dgl.contrib.UnifiedTensor(train_labels, device=device)
194

195
    # Pack data
196
197
    data = n_classes, train_g, val_g, test_g, train_nfeat, train_labels, \
           val_nfeat, val_labels, test_nfeat, test_labels
198

199
    run(args, device, data)