entity_classify_mp.py 25.2 KB
Newer Older
1
2
3
4
5
6
7
8
"""
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/tkipf/relational-gcn
Difference compared to tkipf/relation-gcn
* l2norm applied to all weights
* remove nodes that won't be touched
"""
9
import argparse, gc
10
11
12
13
14
15
16
17
18
19
20
21
22
import numpy as np
import time
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
import dgl
from dgl import DGLGraph
from functools import partial

23
from dgl.data.rdf import AIFBDataset, MUTAGDataset, BGSDataset, AMDataset
24
25
26
from model import RelGraphEmbedLayer
from dgl.nn import RelGraphConv
from utils import thread_wrapped_func
27
import tqdm
28
29

from ogb.nodeproppred import DglNodePropPredDataset
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

class EntityClassify(nn.Module):
    """ Entity classification class for RGCN
    Parameters
    ----------
    device : int
        Device to run the layer.
    num_nodes : int
        Number of nodes.
    h_dim : int
        Hidden dim size.
    out_dim : int
        Output dim size.
    num_rels : int
        Numer of relation types.
45
    num_bases : int, optional
46
        Number of bases. If is none, use number of relations.
47
48
        Default None
    num_hidden_layers : int, optional
49
        Number of hidden RelGraphConv Layer
50
51
52
53
54
55
56
57
        Default 1
    dropout : float, optional
        Dropout.
        Default 0
    use_self_loop : bool, optional
        Use self loop if True.
        Default True
    low_mem : bool, optional
58
59
        True to use low memory implementation of relation message passing function
        trade speed with memory consumption
60
61
62
63
        Default True
    layer_norm : bool, optional
        True to use layer norm.
        Default False
64
65
66
67
68
69
70
71
72
73
74
    """
    def __init__(self,
                 device,
                 num_nodes,
                 h_dim,
                 out_dim,
                 num_rels,
                 num_bases=None,
                 num_hidden_layers=1,
                 dropout=0,
                 use_self_loop=False,
75
                 low_mem=True,
76
                 layer_norm=False):
77
78
79
80
81
82
83
84
85
86
87
        super(EntityClassify, self).__init__()
        self.device = th.device(device if device >= 0 else 'cpu')
        self.num_nodes = num_nodes
        self.h_dim = h_dim
        self.out_dim = out_dim
        self.num_rels = num_rels
        self.num_bases = None if num_bases < 0 else num_bases
        self.num_hidden_layers = num_hidden_layers
        self.dropout = dropout
        self.use_self_loop = use_self_loop
        self.low_mem = low_mem
88
        self.layer_norm = layer_norm
89
90
91
92
93
94

        self.layers = nn.ModuleList()
        # i2h
        self.layers.append(RelGraphConv(
            self.h_dim, self.h_dim, self.num_rels, "basis",
            self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
95
            low_mem=self.low_mem, dropout=self.dropout, layer_norm = layer_norm))
96
97
98
99
100
        # h2h
        for idx in range(self.num_hidden_layers):
            self.layers.append(RelGraphConv(
                self.h_dim, self.h_dim, self.num_rels, "basis",
                self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
101
                low_mem=self.low_mem, dropout=self.dropout, layer_norm = layer_norm))
102
103
104
105
106
        # h2o
        self.layers.append(RelGraphConv(
            self.h_dim, self.out_dim, self.num_rels, "basis",
            self.num_bases, activation=None,
            self_loop=self.use_self_loop,
107
            low_mem=self.low_mem, layer_norm = layer_norm))
108
109
110
111
112
113
114
115
116
117
118

    def forward(self, blocks, feats, norm=None):
        if blocks is None:
            # full graph training
            blocks = [self.g] * len(self.layers)
        h = feats
        for layer, block in zip(self.layers, blocks):
            block = block.to(self.device)
            h = layer(block, h, block.edata['etype'], block.edata['norm'])
        return h

119
120
121
122
123
124
125
126
def gen_norm(g):
    _, v, eid = g.all_edges(form='all')
    _, inverse_index, count = th.unique(v, return_inverse=True, return_counts=True)
    degrees = count[inverse_index]
    norm = th.ones(eid.shape[0], device=eid.device) / degrees
    norm = norm.unsqueeze(1)
    g.edata['norm'] = norm

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
class NeighborSampler:
    """Neighbor sampler
    Parameters
    ----------
    g : DGLHeterograph
        Full graph
    target_idx : tensor
        The target training node IDs in g
    fanouts : list of int
        Fanout of each hop starting from the seed nodes. If a fanout is None,
        sample full neighbors.
    """
    def __init__(self, g, target_idx, fanouts):
        self.g = g
        self.target_idx = target_idx
        self.fanouts = fanouts

    """Do neighbor sample
    Parameters
    ----------
    seeds :
        Seed nodes
    Returns
    -------
    tensor
        Seed nodes, also known as target nodes
    blocks
        Sampled subgraphs
    """
    def sample_blocks(self, seeds):
        blocks = []
        etypes = []
        norms = []
        ntypes = []
        seeds = th.tensor(seeds).long()
        cur = self.target_idx[seeds]
        for fanout in self.fanouts:
            if fanout is None or fanout == -1:
                frontier = dgl.in_subgraph(self.g, cur)
            else:
                frontier = dgl.sampling.sample_neighbors(self.g, cur, fanout)
            block = dgl.to_block(frontier, cur)
169
            gen_norm(block)
170
171
172
173
            cur = block.srcdata[dgl.NID]
            blocks.insert(0, block)
        return seeds, blocks

174
175
176
177
178
def evaluate(model, embed_layer, eval_loader, node_feats):
    model.eval()
    embed_layer.eval()
    eval_logits = []
    eval_seeds = []
179

180
    with th.no_grad():
181
        th.cuda.empty_cache()
182
183
        for sample_data in tqdm.tqdm(eval_loader):
            seeds, blocks = sample_data
184

185
            feats = embed_layer(blocks[0].srcdata[dgl.NID],
186
187
188
                                blocks[0].srcdata['ntype'],
                                blocks[0].srcdata['type_id'],
                                node_feats)
189
190
191
            logits = model(blocks, feats)
            eval_logits.append(logits.cpu().detach())
            eval_seeds.append(seeds.cpu().detach())
192

193
194
195
    eval_logits = th.cat(eval_logits)
    eval_seeds = th.cat(eval_seeds)

196
    return eval_logits, eval_seeds
197

198
@thread_wrapped_func
199
def run(proc_id, n_gpus, n_cpus, args, devices, dataset, split, queue=None):
200
    dev_id = devices[proc_id] if devices[proc_id] != 'cpu' else -1
201
    g, node_feats, num_of_ntype, num_classes, num_rels, target_idx, \
202
        train_idx, val_idx, test_idx, labels = dataset
203
204
205
206
207
    if split is not None:
        train_seed, val_seed, test_seed = split
        train_idx = train_idx[train_seed]
        val_idx = val_idx[val_seed]
        test_idx = test_idx[test_seed]
208

209
    fanouts = [int(fanout) for fanout in args.fanout.split(',')]
210
    node_tids = g.ndata[dgl.NTYPE]
211
    sampler = NeighborSampler(g, target_idx, fanouts)
212
213
214
215
216
217
218
    loader = DataLoader(dataset=train_idx.numpy(),
                        batch_size=args.batch_size,
                        collate_fn=sampler.sample_blocks,
                        shuffle=True,
                        num_workers=args.num_workers)

    # validation sampler
219
    val_sampler = NeighborSampler(g, target_idx, fanouts)
220
    val_loader = DataLoader(dataset=val_idx.numpy(),
221
                            batch_size=args.batch_size,
222
223
224
225
                            collate_fn=val_sampler.sample_blocks,
                            shuffle=False,
                            num_workers=args.num_workers)

226
    # test sampler
227
228
    test_sampler = NeighborSampler(g, target_idx, [None] * args.n_layers)
    test_loader = DataLoader(dataset=test_idx.numpy(),
229
                             batch_size=args.eval_batch_size,
230
231
232
233
                             collate_fn=test_sampler.sample_blocks,
                             shuffle=False,
                             num_workers=args.num_workers)

234
    world_size = n_gpus
235
236
237
238
    if n_gpus > 1:
        dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
            master_ip='127.0.0.1', master_port='12345')
        backend = 'nccl'
239
240

        # using sparse embedding or usig mix_cpu_gpu model (embedding model can not be stored in GPU)
241
        if args.dgl_sparse is False:
242
            backend = 'gloo'
243
        print("backend using {}".format(backend))
244
245
246
247
248
249
250
        th.distributed.init_process_group(backend=backend,
                                          init_method=dist_init_method,
                                          world_size=world_size,
                                          rank=dev_id)

    # node features
    # None for one-hot feature, if not none, it should be the feature tensor.
251
    #
252
253
254
255
256
257
    embed_layer = RelGraphEmbedLayer(dev_id,
                                     g.number_of_nodes(),
                                     node_tids,
                                     num_of_ntype,
                                     node_feats,
                                     args.n_hidden,
258
                                     dgl_sparse=args.dgl_sparse)
259
260

    # create model
261
    # all model params are in device.
262
263
264
265
266
267
268
269
270
    model = EntityClassify(dev_id,
                           g.number_of_nodes(),
                           args.n_hidden,
                           num_classes,
                           num_rels,
                           num_bases=args.n_bases,
                           num_hidden_layers=args.n_layers - 2,
                           dropout=args.dropout,
                           use_self_loop=args.use_self_loop,
271
272
                           low_mem=args.low_mem,
                           layer_norm=args.layer_norm)
273

274
    if dev_id >= 0 and n_gpus == 1:
275
276
277
        th.cuda.set_device(dev_id)
        labels = labels.to(dev_id)
        model.cuda(dev_id)
278
279
        # with dgl_sparse emb, only node embedding is not in GPU
        if args.dgl_sparse:
280
281
282
            embed_layer.cuda(dev_id)

    if n_gpus > 1:
283
284
        labels = labels.to(dev_id)
        model.cuda(dev_id)
285
        model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)
286
287
288
289
290
291
292
        if args.dgl_sparse:
            embed_layer.cuda(dev_id)
            if len(list(embed_layer.parameters())) > 0:
                embed_layer = DistributedDataParallel(embed_layer, device_ids=[dev_id], output_device=dev_id)
        else:
            if len(list(embed_layer.parameters())) > 0:
                embed_layer = DistributedDataParallel(embed_layer, device_ids=None, output_device=None)
293
294

    # optimizer
295
296
    dense_params = list(model.parameters())
    if args.node_feats:
297
        if  n_gpus > 1:
298
            dense_params += list(embed_layer.module.embeds.parameters())
299
        else:
300
301
302
303
            dense_params += list(embed_layer.embeds.parameters())
    optimizer = th.optim.Adam(dense_params, lr=args.lr, weight_decay=args.l2norm)

    if args.dgl_sparse:
304
        all_params = list(model.parameters()) + list(embed_layer.parameters())
305
        optimizer = th.optim.Adam(all_params, lr=args.lr, weight_decay=args.l2norm)
306
307
308
309
310
311
312
313
314
315
316
        if n_gpus > 1 and isinstance(embed_layer, DistributedDataParallel):
            dgl_emb = embed_layer.module.dgl_emb
        else:
            dgl_emb = embed_layer.dgl_emb
        emb_optimizer = dgl.optim.SparseAdam(params=dgl_emb, lr=args.sparse_lr, eps=1e-8) if len(dgl_emb) > 0 else None
    else:
        if n_gpus > 1:
            embs = list(embed_layer.module.node_embeds.parameters())
        else:
            embs = list(embed_layer.node_embeds.parameters())
        emb_optimizer = th.optim.SparseAdam(embs, lr=args.sparse_lr) if len(embs) > 0 else None
317
318
319
320
321
322

    # training loop
    print("start training...")
    forward_time = []
    backward_time = []

323
324
325
326
    train_time = 0
    validation_time = 0
    test_time = 0
    last_val_acc = 0.0
327
    do_test = False
328
329
    if n_gpus > 1 and n_cpus - args.num_workers > 0:
        th.set_num_threads(n_cpus-args.num_workers)
330
    for epoch in range(args.n_epochs):
331
        tstart = time.time()
332
        model.train()
333
        embed_layer.train()
334
335
336
337

        for i, sample_data in enumerate(loader):
            seeds, blocks = sample_data
            t0 = time.time()
338
            feats = embed_layer(blocks[0].srcdata[dgl.NID],
339
                                blocks[0].srcdata['ntype'],
340
                                blocks[0].srcdata['type_id'],
341
342
343
344
                                node_feats)
            logits = model(blocks, feats)
            loss = F.cross_entropy(logits, labels[seeds])
            t1 = time.time()
345
            optimizer.zero_grad()
346
            if emb_optimizer is not None:
347
348
                emb_optimizer.zero_grad()

349
            loss.backward()
350
            if emb_optimizer is not None:
351
                emb_optimizer.step()
352
            optimizer.step()
353
354
355
356
357
            t2 = time.time()

            forward_time.append(t1 - t0)
            backward_time.append(t2 - t1)
            train_acc = th.sum(logits.argmax(dim=1) == labels[seeds]).item() / len(seeds)
358
359
360
            if i % 100 and proc_id == 0:
                print("Train Accuracy: {:.4f} | Train Loss: {:.4f}".
                    format(train_acc, loss.item()))
361
        gc.collect()
362
        print("Epoch {:05d}:{:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}".
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
            format(epoch, args.n_epochs, forward_time[-1], backward_time[-1]))
        tend = time.time()
        train_time += (tend - tstart)

        def collect_eval():
            eval_logits = []
            eval_seeds = []
            for i in range(n_gpus):
                log = queue.get()
                eval_l, eval_s = log
                eval_logits.append(eval_l)
                eval_seeds.append(eval_s)
            eval_logits = th.cat(eval_logits)
            eval_seeds = th.cat(eval_seeds)
            eval_loss = F.cross_entropy(eval_logits, labels[eval_seeds].cpu()).item()
            eval_acc = th.sum(eval_logits.argmax(dim=1) == labels[eval_seeds].cpu()).item() / len(eval_seeds)

            return eval_loss, eval_acc

        vstart = time.time()
383
384
385
386
387
388
389
        if (queue is not None) or (proc_id == 0):
            val_logits, val_seeds = evaluate(model, embed_layer, val_loader, node_feats)
            if queue is not None:
                queue.put((val_logits, val_seeds))

            # gather evaluation result from multiple processes
            if proc_id == 0:
390
391
392
                val_loss, val_acc = collect_eval() if queue is not None else \
                    (F.cross_entropy(val_logits, labels[val_seeds].cpu()).item(), \
                    th.sum(val_logits.argmax(dim=1) == labels[val_seeds].cpu()).item() / len(val_seeds))
393

394
395
                do_test = val_acc > last_val_acc
                last_val_acc = val_acc
396
397
                print("Validation Accuracy: {:.4f} | Validation loss: {:.4f}".
                        format(val_acc, val_loss))
398
399
        if n_gpus > 1:
            th.distributed.barrier()
400
401
402
403
404
            if proc_id == 0:
                for i in range(1, n_gpus):
                    queue.put(do_test)
            else:
                do_test = queue.get()
405

406
407
        vend = time.time()
        validation_time += (vend - vstart)
408

409
        if epoch > 0 and do_test:
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
            tstart = time.time()
            if (queue is not None) or (proc_id == 0):
                test_logits, test_seeds = evaluate(model, embed_layer, test_loader, node_feats)
                if queue is not None:
                    queue.put((test_logits, test_seeds))

                # gather evaluation result from multiple processes
                if proc_id == 0:
                    test_loss, test_acc = collect_eval() if queue is not None else \
                        (F.cross_entropy(test_logits, labels[test_seeds].cpu()).item(), \
                        th.sum(test_logits.argmax(dim=1) == labels[test_seeds].cpu()).item() / len(test_seeds))
                    print("Test Accuracy: {:.4f} | Test loss: {:.4f}".format(test_acc, test_loss))
                    print()
            tend = time.time()
            test_time += (tend-tstart)

            # sync for test
            if n_gpus > 1:
                th.distributed.barrier()
429
430
431
432

    print("{}/{} Mean forward time: {:4f}".format(proc_id, n_gpus,
                                                  np.mean(forward_time[len(forward_time) // 4:])))
    print("{}/{} Mean backward time: {:4f}".format(proc_id, n_gpus,
433
                                                   np.mean(backward_time[len(backward_time) // 4:])))
434
435
436
    if proc_id == 0:
        print("Test Accuracy: {:.4f} | Test loss: {:.4f}".format(test_acc, test_loss))
        print("Train {}s, valid {}s, test {}s".format(train_time, validation_time, test_time))
437
438
439
440
441

def main(args, devices):
    # load graph data
    ogb_dataset = False
    if args.dataset == 'aifb':
442
        dataset = AIFBDataset()
443
    elif args.dataset == 'mutag':
444
        dataset = MUTAGDataset()
445
    elif args.dataset == 'bgs':
446
        dataset = BGSDataset()
447
    elif args.dataset == 'am':
448
        dataset = AMDataset()
449
450
451
    elif args.dataset == 'ogbn-mag':
        dataset = DglNodePropPredDataset(name=args.dataset)
        ogb_dataset = True
452
453
454
    else:
        raise ValueError()

455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
    if ogb_dataset is True:
        split_idx = dataset.get_idx_split()
        train_idx = split_idx["train"]['paper']
        val_idx = split_idx["valid"]['paper']
        test_idx = split_idx["test"]['paper']
        hg_orig, labels = dataset[0]
        subgs = {}
        for etype in hg_orig.canonical_etypes:
            u, v = hg_orig.all_edges(etype=etype)
            subgs[etype] = (u, v)
            subgs[(etype[2], 'rev-'+etype[1], etype[0])] = (v, u)
        hg = dgl.heterograph(subgs)
        hg.nodes['paper'].data['feat'] = hg_orig.nodes['paper'].data['feat']
        labels = labels['paper'].squeeze()

        num_rels = len(hg.canonical_etypes)
        num_of_ntype = len(hg.ntypes)
        num_classes = dataset.num_classes
        if args.dataset == 'ogbn-mag':
            category = 'paper'
        print('Number of relations: {}'.format(num_rels))
        print('Number of class: {}'.format(num_classes))
        print('Number of train: {}'.format(len(train_idx)))
        print('Number of valid: {}'.format(len(val_idx)))
        print('Number of test: {}'.format(len(test_idx)))

481
    else:
482
483
484
485
486
487
488
489
490
491
        # Load from hetero-graph
        hg = dataset[0]

        num_rels = len(hg.canonical_etypes)
        num_of_ntype = len(hg.ntypes)
        category = dataset.predict_category
        num_classes = dataset.num_classes
        train_mask = hg.nodes[category].data.pop('train_mask')
        test_mask = hg.nodes[category].data.pop('test_mask')
        labels = hg.nodes[category].data.pop('labels')
492
493
        train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
        test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
494
495
496
497
498
499
500
501
502

        # AIFB, MUTAG, BGS and AM datasets do not provide validation set split.
        # Split train set into train and validation if args.validation is set
        # otherwise use train set as the validation set.
        if args.validation:
            val_idx = train_idx[:len(train_idx) // 5]
            train_idx = train_idx[len(train_idx) // 5:]
        else:
            val_idx = train_idx
503

504
505
506
507
508
509
510
511
    node_feats = []
    for ntype in hg.ntypes:
        if len(hg.nodes[ntype].data) == 0 or args.node_feats is False:
            node_feats.append(hg.number_of_nodes(ntype))
        else:
            assert len(hg.nodes[ntype].data) == 1
            feat = hg.nodes[ntype].data.pop('feat')
            node_feats.append(feat.share_memory_())
512

513
514
515
516
517
    # get target category id
    category_id = len(hg.ntypes)
    for i, ntype in enumerate(hg.ntypes):
        if ntype == category:
            category_id = i
518
519
520
521
522
523
524
525
526
        print('{}:{}'.format(i, ntype))

    g = dgl.to_homogeneous(hg)
    g.ndata['ntype'] = g.ndata[dgl.NTYPE]
    g.ndata['ntype'].share_memory_()
    g.edata['etype'] = g.edata[dgl.ETYPE]
    g.edata['etype'].share_memory_()
    g.ndata['type_id'] = g.ndata[dgl.NID]
    g.ndata['type_id'].share_memory_()
527
528
529
530
531
532
533
    node_ids = th.arange(g.number_of_nodes())

    # find out the target node ids
    node_tids = g.ndata[dgl.NTYPE]
    loc = (node_tids == category_id)
    target_idx = node_ids[loc]
    target_idx.share_memory_()
534
535
536
    train_idx.share_memory_()
    val_idx.share_memory_()
    test_idx.share_memory_()
537
538
539
    # Create csr/coo/csc formats before launching training processes with multi-gpu.
    # This avoids creating certain formats in each sub-process, which saves momory and CPU.
    g.create_formats_()
540
541

    n_gpus = len(devices)
542
    n_cpus = mp.cpu_count()
543
544
    # cpu
    if devices[0] == -1:
545
        run(0, 0, n_cpus, args, ['cpu'],
546
547
            (g, node_feats, num_of_ntype, num_classes, num_rels, target_idx,
             train_idx, val_idx, test_idx, labels), None, None)
548
549
    # gpu
    elif n_gpus == 1:
550
        run(0, n_gpus, n_cpus, args, devices,
551
552
            (g, node_feats, num_of_ntype, num_classes, num_rels, target_idx,
            train_idx, val_idx, test_idx, labels), None, None)
553
554
    # multi gpu
    else:
555
        queue = mp.Queue(n_gpus)
556
557
        procs = []
        num_train_seeds = train_idx.shape[0]
558
559
560
561
562
        num_valid_seeds = val_idx.shape[0]
        num_test_seeds = test_idx.shape[0]
        train_seeds = th.randperm(num_train_seeds)
        valid_seeds = th.randperm(num_valid_seeds)
        test_seeds = th.randperm(num_test_seeds)
563
        tseeds_per_proc = num_train_seeds // n_gpus
564
565
        vseeds_per_proc = num_valid_seeds // n_gpus
        tstseeds_per_proc = num_test_seeds // n_gpus
566
        for proc_id in range(n_gpus):
567
568
569
570
571
572
573
574
575
576
577
578
579
580
            # we have multi-gpu for training, evaluation and testing
            # so split trian set, valid set and test set into num-of-gpu parts.
            proc_train_seeds = train_seeds[proc_id * tseeds_per_proc :
                                           (proc_id + 1) * tseeds_per_proc \
                                           if (proc_id + 1) * tseeds_per_proc < num_train_seeds \
                                           else num_train_seeds]
            proc_valid_seeds = valid_seeds[proc_id * vseeds_per_proc :
                                           (proc_id + 1) * vseeds_per_proc \
                                           if (proc_id + 1) * vseeds_per_proc < num_valid_seeds \
                                           else num_valid_seeds]
            proc_test_seeds = test_seeds[proc_id * tstseeds_per_proc :
                                         (proc_id + 1) * tstseeds_per_proc \
                                         if (proc_id + 1) * tstseeds_per_proc < num_test_seeds \
                                         else num_test_seeds]
581
            p = mp.Process(target=run, args=(proc_id, n_gpus, n_cpus // n_gpus, args, devices,
582
583
584
585
                                             (g, node_feats, num_of_ntype, num_classes, num_rels, target_idx,
                                             train_idx, val_idx, test_idx, labels),
                                             (proc_train_seeds, proc_valid_seeds, proc_test_seeds),
                                             queue))
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
            p.start()
            procs.append(p)
        for p in procs:
            p.join()


def config():
    parser = argparse.ArgumentParser(description='RGCN')
    parser.add_argument("--dropout", type=float, default=0,
            help="dropout probability")
    parser.add_argument("--n-hidden", type=int, default=16,
            help="number of hidden units")
    parser.add_argument("--gpu", type=str, default='0',
            help="gpu")
    parser.add_argument("--lr", type=float, default=1e-2,
            help="learning rate")
602
603
    parser.add_argument("--sparse-lr", type=float, default=2e-2,
            help="sparse embedding learning rate")
604
605
606
607
608
609
610
611
612
613
    parser.add_argument("--n-bases", type=int, default=-1,
            help="number of filter weight matrices, default: -1 [use all]")
    parser.add_argument("--n-layers", type=int, default=2,
            help="number of propagation rounds")
    parser.add_argument("-e", "--n-epochs", type=int, default=50,
            help="number of training epochs")
    parser.add_argument("-d", "--dataset", type=str, required=True,
            help="dataset to use")
    parser.add_argument("--l2norm", type=float, default=0,
            help="l2 norm coef")
614
    parser.add_argument("--fanout", type=str, default="4, 4",
615
616
617
618
619
620
621
622
            help="Fan-out of neighbor sampling.")
    parser.add_argument("--use-self-loop", default=False, action='store_true',
            help="include self feature as a special relation")
    fp = parser.add_mutually_exclusive_group(required=False)
    fp.add_argument('--validation', dest='validation', action='store_true')
    fp.add_argument('--testing', dest='validation', action='store_false')
    parser.add_argument("--batch-size", type=int, default=100,
            help="Mini-batch size. ")
623
    parser.add_argument("--eval-batch-size", type=int, default=32,
624
            help="Mini-batch size. ")
625
626
627
628
    parser.add_argument("--num-workers", type=int, default=0,
            help="Number of workers for dataloader.")
    parser.add_argument("--low-mem", default=False, action='store_true',
            help="Whether use low mem RelGraphCov")
629
    parser.add_argument("--dgl-sparse", default=False, action='store_true',
630
            help='Use sparse embedding for node embeddings.')
631
632
633
634
    parser.add_argument('--node-feats', default=False, action='store_true',
            help='Whether use node features')
    parser.add_argument('--layer-norm', default=False, action='store_true',
            help='Use layer norm')
635
636
637
638
639
640
641
642
643
    parser.set_defaults(validation=True)
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = config()
    devices = list(map(int, args.gpu.split(',')))
    print(args)
    main(args, devices)