Unverified Commit 704bcaf6 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files
parent 6bc82161
import argparse
import time
import dgl
import mxnet as mx
import numpy as np
from dgl.data import (
CiteseerGraphDataset,
CoraGraphDataset,
PubmedGraphDataset,
register_data_args,
)
from dgl.nn.mxnet.conv import APPNPConv
from mxnet import gluon, nd
from mxnet.gluon import nn
import dgl
from dgl.data import (CiteseerGraphDataset, CoraGraphDataset,
PubmedGraphDataset, register_data_args)
from dgl.nn.mxnet.conv import APPNPConv
class APPNP(nn.Block):
def __init__(
......
......@@ -11,17 +11,21 @@ Pytorch implementation: https://github.com/Diego999/pyGAT
import argparse
import time
import dgl
import mxnet as mx
import networkx as nx
import numpy as np
from dgl.data import (
CiteseerGraphDataset,
CoraGraphDataset,
PubmedGraphDataset,
register_data_args,
)
from gat import GAT
from mxnet import gluon
from utils import EarlyStopping
import dgl
from dgl.data import (CiteseerGraphDataset, CoraGraphDataset,
PubmedGraphDataset, register_data_args)
def elu(data):
return mx.nd.LeakyReLU(data, act_type="elu")
......@@ -132,7 +136,6 @@ def main(args):
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GAT")
register_data_args(parser)
parser.add_argument(
......
......@@ -5,11 +5,10 @@ References:
- Paper: https://arxiv.org/abs/1609.02907
- Code: https://github.com/tkipf/gcn
"""
import mxnet as mx
from mxnet import gluon
import dgl
import mxnet as mx
from dgl.nn.mxnet import GraphConv
from mxnet import gluon
class GCN(gluon.Block):
......
......@@ -5,48 +5,44 @@ Code: https://github.com/tkipf/gcn
GCN with batch processing
"""
import argparse
import numpy as np
import time
import mxnet as mx
from mxnet import gluon
import dgl
import dgl.function as fn
from dgl.data import register_data_args
from dgl.data import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset
import mxnet as mx
import numpy as np
from dgl.data import (
CiteseerGraphDataset,
CoraGraphDataset,
PubmedGraphDataset,
register_data_args,
)
from mxnet import gluon
class GCNLayer(gluon.Block):
def __init__(self,
g,
out_feats,
activation,
dropout):
def __init__(self, g, out_feats, activation, dropout):
super(GCNLayer, self).__init__()
self.g = g
self.dense = gluon.nn.Dense(out_feats, activation)
self.dropout = dropout
def forward(self, h):
self.g.ndata['h'] = h * self.g.ndata['out_norm']
self.g.update_all(fn.copy_u(u='h', out='m'),
fn.sum(msg='m', out='accum'))
accum = self.g.ndata.pop('accum')
accum = self.dense(accum * self.g.ndata['in_norm'])
self.g.ndata["h"] = h * self.g.ndata["out_norm"]
self.g.update_all(
fn.copy_u(u="h", out="m"), fn.sum(msg="m", out="accum")
)
accum = self.g.ndata.pop("accum")
accum = self.dense(accum * self.g.ndata["in_norm"])
if self.dropout:
accum = mx.nd.Dropout(accum, p=self.dropout)
h = self.g.ndata.pop('h')
h = mx.nd.concat(h / self.g.ndata['out_norm'], accum, dim=1)
h = self.g.ndata.pop("h")
h = mx.nd.concat(h / self.g.ndata["out_norm"], accum, dim=1)
return h
class GCN(gluon.Block):
def __init__(self,
g,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
def __init__(self, g, n_hidden, n_classes, n_layers, activation, dropout):
super(GCN, self).__init__()
self.inp_layer = gluon.nn.Dense(n_hidden, activation)
self.dropout = dropout
......@@ -55,7 +51,6 @@ class GCN(gluon.Block):
self.layers.add(GCNLayer(g, n_hidden, activation, dropout))
self.out_layer = gluon.nn.Dense(n_classes)
def forward(self, features):
emb_inp = [features, self.inp_layer(features)]
if self.dropout:
......@@ -75,14 +70,14 @@ def evaluate(model, features, labels, mask):
def main(args):
# load and preprocess dataset
if args.dataset == 'cora':
if args.dataset == "cora":
data = CoraGraphDataset()
elif args.dataset == 'citeseer':
elif args.dataset == "citeseer":
data = CiteseerGraphDataset()
elif args.dataset == 'pubmed':
elif args.dataset == "pubmed":
data = PubmedGraphDataset()
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
raise ValueError("Unknown dataset: {}".format(args.dataset))
g = data[0]
if args.gpu < 0:
......@@ -93,55 +88,64 @@ def main(args):
ctx = mx.gpu(args.gpu)
g = g.to(ctx)
features = g.ndata['feat']
labels = mx.nd.array(g.ndata['label'], dtype="float32", ctx=ctx)
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
features = g.ndata["feat"]
labels = mx.nd.array(g.ndata["label"], dtype="float32", ctx=ctx)
train_mask = g.ndata["train_mask"]
val_mask = g.ndata["val_mask"]
test_mask = g.ndata["test_mask"]
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar()))
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar(),
)
)
# add self loop
if args.self_loop:
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# normalization
in_degs = g.in_degrees().astype('float32')
out_degs = g.out_degrees().astype('float32')
in_degs = g.in_degrees().astype("float32")
out_degs = g.out_degrees().astype("float32")
in_norm = mx.nd.power(in_degs, -0.5)
out_norm = mx.nd.power(out_degs, -0.5)
if cuda:
in_norm = in_norm.as_in_context(ctx)
out_norm = out_norm.as_in_context(ctx)
g.ndata['in_norm'] = mx.nd.expand_dims(in_norm, 1)
g.ndata['out_norm'] = mx.nd.expand_dims(out_norm, 1)
model = GCN(g,
args.n_hidden,
n_classes,
args.n_layers,
'relu',
args.dropout,
)
g.ndata["in_norm"] = mx.nd.expand_dims(in_norm, 1)
g.ndata["out_norm"] = mx.nd.expand_dims(out_norm, 1)
model = GCN(
g,
args.n_hidden,
n_classes,
args.n_layers,
"relu",
args.dropout,
)
model.initialize(ctx=ctx)
n_train_samples = train_mask.sum().asscalar()
loss_fcn = gluon.loss.SoftmaxCELoss()
# use optimizer
print(model.collect_params())
trainer = gluon.Trainer(model.collect_params(), 'adam',
{'learning_rate': args.lr, 'wd': args.weight_decay})
trainer = gluon.Trainer(
model.collect_params(),
"adam",
{"learning_rate": args.lr, "wd": args.weight_decay},
)
# initialize graph
dur = []
......@@ -160,36 +164,53 @@ def main(args):
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(
epoch, np.mean(dur), loss.asscalar(), acc, n_edges / np.mean(dur) / 1000))
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.asscalar(),
acc,
n_edges / np.mean(dur) / 1000,
)
)
# test set accuracy
acc = evaluate(model, features, labels, test_mask)
print("Test accuracy {:.2%}".format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GCN")
register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers")
parser.add_argument("--normalization",
choices=['sym','left'], default=None,
help="graph normalization types (default=None)")
parser.add_argument("--self-loop", action='store_true',
help="graph self-loop (default=False)")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
parser.add_argument(
"--dropout", type=float, default=0.5, help="dropout probability"
)
parser.add_argument("--gpu", type=int, default=-1, help="gpu")
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
parser.add_argument(
"--n-epochs", type=int, default=200, help="number of training epochs"
)
parser.add_argument(
"--n-hidden", type=int, default=16, help="number of hidden gcn units"
)
parser.add_argument(
"--n-layers", type=int, default=1, help="number of hidden gcn layers"
)
parser.add_argument(
"--normalization",
choices=["sym", "left"],
default=None,
help="graph normalization types (default=None)",
)
parser.add_argument(
"--self-loop",
action="store_true",
help="graph self-loop (default=False)",
)
parser.add_argument(
"--weight-decay", type=float, default=5e-4, help="Weight for L2 loss"
)
args = parser.parse_args()
print(args)
......
......@@ -2,14 +2,14 @@
import argparse
import time
import dgl
import mxnet as mx
import numpy as np
from dgl.data import CiteseerGraphDataset, CoraGraphDataset, PubmedGraphDataset
from gcn import GCN
from mxnet import gluon
import dgl
from dgl.data import CiteseerGraphDataset, CoraGraphDataset, PubmedGraphDataset
# from gcn_mp import GCN
# from gcn_spmv import GCN
......
......@@ -4,13 +4,13 @@ MxNet compatible dataloader
import math
import dgl
import numpy as np
from mxnet import nd
from mxnet.gluon.data import DataLoader, Sampler
from sklearn.model_selection import StratifiedKFold
import dgl
class SubsetRandomSampler(Sampler):
def __init__(self, indices):
......@@ -52,7 +52,6 @@ class GraphDataLoader:
fold_idx=0,
split_ratio=0.7,
):
self.shuffle = shuffle
self.seed = seed
......
......@@ -6,11 +6,11 @@ Author's implementation: https://github.com/weihua916/powerful-gnns
"""
import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon import nn
from dgl.nn.mxnet.conv import GINConv
from dgl.nn.mxnet.glob import AvgPooling, MaxPooling, SumPooling
from mxnet import gluon, nd
from mxnet.gluon import nn
class ApplyNodeFunc(nn.Block):
......
......@@ -3,14 +3,14 @@ from parser import Parser
import mxnet as mx
import numpy as np
from dataloader import GraphDataLoader, collate
from dataloader import collate, GraphDataLoader
from dgl.data.gindt import GINDataset
from gin import GIN
from mxnet import gluon, nd
from mxnet.gluon import nn
from tqdm import tqdm
from dgl.data.gindt import GINDataset
def train(args, net, trainloader, trainer, criterion, epoch):
running_loss = 0
......@@ -71,7 +71,6 @@ def eval_net(args, net, dataloader, criterion):
def main(args):
# set up seeds, args.seed supported
mx.random.seed(0)
np.random.seed(seed=0)
......
......@@ -7,17 +7,21 @@ Simple reference implementation of GraphSAGE.
import argparse
import time
import dgl
import mxnet as mx
import networkx as nx
import numpy as np
from dgl.data import (
CiteseerGraphDataset,
CoraGraphDataset,
PubmedGraphDataset,
register_data_args,
)
from dgl.nn.mxnet.conv import SAGEConv
from mxnet import gluon, nd
from mxnet.gluon import nn
import dgl
from dgl.data import (CiteseerGraphDataset, CoraGraphDataset,
PubmedGraphDataset, register_data_args)
from dgl.nn.mxnet.conv import SAGEConv
class GraphSAGE(nn.Block):
def __init__(
......
import argparse
import time
import dgl
import mxnet as mx
import networkx as nx
import numpy as np
from dgl.data import (
CiteseerGraphDataset,
CoraGraphDataset,
PubmedGraphDataset,
register_data_args,
)
from dgl.nn.mxnet.conv import GMMConv
from mxnet import gluon, nd
from mxnet.gluon import nn
import dgl
from dgl.data import (CiteseerGraphDataset, CoraGraphDataset,
PubmedGraphDataset, register_data_args)
from dgl.nn.mxnet.conv import GMMConv
class MoNet(nn.Block):
def __init__(
......
......@@ -9,43 +9,66 @@ Difference compared to tkipf/relation-gcn
"""
import argparse
import numpy as np
import time
from functools import partial
import dgl
import mxnet as mx
from mxnet import gluon
import mxnet.ndarray as F
import dgl
import numpy as np
from dgl.data.rdf import AIFBDataset, AMDataset, BGSDataset, MUTAGDataset
from dgl.nn.mxnet import RelGraphConv
from functools import partial
from dgl.data.rdf import AIFBDataset, MUTAGDataset, BGSDataset, AMDataset
from model import BaseRGCN
from mxnet import gluon
class EntityClassify(BaseRGCN):
def build_input_layer(self):
return RelGraphConv(self.num_nodes, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
return RelGraphConv(
self.num_nodes,
self.h_dim,
self.num_rels,
"basis",
self.num_bases,
activation=F.relu,
self_loop=self.use_self_loop,
dropout=self.dropout,
)
def build_hidden_layer(self, idx):
return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
return RelGraphConv(
self.h_dim,
self.h_dim,
self.num_rels,
"basis",
self.num_bases,
activation=F.relu,
self_loop=self.use_self_loop,
dropout=self.dropout,
)
def build_output_layer(self):
return RelGraphConv(self.h_dim, self.out_dim, self.num_rels, "basis",
self.num_bases, activation=None,
self_loop=self.use_self_loop)
return RelGraphConv(
self.h_dim,
self.out_dim,
self.num_rels,
"basis",
self.num_bases,
activation=None,
self_loop=self.use_self_loop,
)
def main(args):
# load graph data
if args.dataset == 'aifb':
if args.dataset == "aifb":
dataset = AIFBDataset()
elif args.dataset == 'mutag':
elif args.dataset == "mutag":
dataset = MUTAGDataset()
elif args.dataset == 'bgs':
elif args.dataset == "bgs":
dataset = BGSDataset()
elif args.dataset == 'am':
elif args.dataset == "am":
dataset = AMDataset()
else:
raise ValueError()
......@@ -56,27 +79,31 @@ def main(args):
num_rels = len(hg.canonical_etypes)
category = dataset.predict_category
num_classes = dataset.num_classes
train_mask = hg.nodes[category].data.pop('train_mask')
test_mask = hg.nodes[category].data.pop('test_mask')
train_idx = mx.nd.array(np.nonzero(train_mask.asnumpy())[0], dtype='int64')
test_idx = mx.nd.array(np.nonzero(test_mask.asnumpy())[0], dtype='int64')
labels = mx.nd.array(hg.nodes[category].data.pop('labels'), dtype='int64')
train_mask = hg.nodes[category].data.pop("train_mask")
test_mask = hg.nodes[category].data.pop("test_mask")
train_idx = mx.nd.array(np.nonzero(train_mask.asnumpy())[0], dtype="int64")
test_idx = mx.nd.array(np.nonzero(test_mask.asnumpy())[0], dtype="int64")
labels = mx.nd.array(hg.nodes[category].data.pop("labels"), dtype="int64")
# split dataset into train, validate, test
if args.validation:
val_idx = train_idx[:len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5:]
val_idx = train_idx[: len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5 :]
else:
val_idx = train_idx
# calculate norm for each edge type and store in edge
for canonical_etype in hg.canonical_etypes:
u, v, eid = hg.all_edges(form='all', etype=canonical_etype)
u, v, eid = hg.all_edges(form="all", etype=canonical_etype)
v = v.asnumpy()
_, inverse_index, count = np.unique(v, return_inverse=True, return_counts=True)
_, inverse_index, count = np.unique(
v, return_inverse=True, return_counts=True
)
degrees = count[inverse_index]
norm = np.ones(eid.shape[0]) / degrees
hg.edges[canonical_etype].data['norm'] = mx.nd.expand_dims(mx.nd.array(norm), axis=1)
hg.edges[canonical_etype].data["norm"] = mx.nd.expand_dims(
mx.nd.array(norm), axis=1
)
# get target category id
category_id = len(hg.ntypes)
......@@ -84,20 +111,20 @@ def main(args):
if ntype == category:
category_id = i
g = dgl.to_homogeneous(hg, edata=['norm'])
g = dgl.to_homogeneous(hg, edata=["norm"])
num_nodes = g.number_of_nodes()
node_ids = mx.nd.arange(num_nodes)
edge_norm = g.edata['norm']
edge_norm = g.edata["norm"]
edge_type = g.edata[dgl.ETYPE]
# find out the target node ids in g
node_tids = g.ndata[dgl.NTYPE]
loc = (node_tids == category_id)
loc = mx.nd.array(np.nonzero(loc.asnumpy())[0], dtype='int64')
loc = node_tids == category_id
loc = mx.nd.array(np.nonzero(loc.asnumpy())[0], dtype="int64")
target_idx = node_ids[loc]
# since the nodes are featureless, the input feature is then the node id.
feats = mx.nd.arange(num_nodes, dtype='int32')
feats = mx.nd.arange(num_nodes, dtype="int32")
# check cuda
use_cuda = args.gpu >= 0
......@@ -113,19 +140,25 @@ def main(args):
ctx = mx.cpu(0)
# create model
model = EntityClassify(num_nodes,
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers - 2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
gpu_id=args.gpu)
model = EntityClassify(
num_nodes,
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers - 2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
gpu_id=args.gpu,
)
model.initialize(ctx=ctx)
# optimizer
trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': args.lr, 'wd': args.l2norm})
trainer = gluon.Trainer(
model.collect_params(),
"adam",
{"learning_rate": args.lr, "wd": args.l2norm},
)
loss_fcn = gluon.loss.SoftmaxCELoss(from_logits=False)
# training loop
......@@ -145,52 +178,91 @@ def main(args):
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
print("Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}".
format(epoch, forward_time[-1], backward_time[-1]))
print(
"Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}".format(
epoch, forward_time[-1], backward_time[-1]
)
)
train_acc = F.sum(mx.nd.cast(pred[train_idx].argmax(axis=1), 'int64') == labels[train_idx]).asscalar() / train_idx.shape[0]
val_acc = F.sum(mx.nd.cast(pred[val_idx].argmax(axis=1), 'int64') == labels[val_idx]).asscalar() / len(val_idx)
print("Train Accuracy: {:.4f} | Validation Accuracy: {:.4f}".format(train_acc, val_acc))
train_acc = (
F.sum(
mx.nd.cast(pred[train_idx].argmax(axis=1), "int64")
== labels[train_idx]
).asscalar()
/ train_idx.shape[0]
)
val_acc = F.sum(
mx.nd.cast(pred[val_idx].argmax(axis=1), "int64") == labels[val_idx]
).asscalar() / len(val_idx)
print(
"Train Accuracy: {:.4f} | Validation Accuracy: {:.4f}".format(
train_acc, val_acc
)
)
print()
logits = model.forward(g, feats, edge_type, edge_norm)
logits = logits[target_idx]
test_acc = F.sum(mx.nd.cast(logits[test_idx].argmax(axis=1), 'int64') == labels[test_idx]).asscalar() / len(test_idx)
test_acc = F.sum(
mx.nd.cast(logits[test_idx].argmax(axis=1), "int64") == labels[test_idx]
).asscalar() / len(test_idx)
print("Test Accuracy: {:.4f}".format(test_acc))
print()
print("Mean forward time: {:4f}".format(np.mean(forward_time[len(forward_time) // 4:])))
print("Mean backward time: {:4f}".format(np.mean(backward_time[len(backward_time) // 4:])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
parser.add_argument("--dropout", type=float, default=0,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-bases", type=int, default=-1,
help="number of filter weight matrices, default: -1 [use all]")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("-e", "--n-epochs", type=int, default=50,
help="number of training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--l2norm", type=float, default=0,
help="l2 norm coef")
parser.add_argument("--use-self-loop", default=False, action='store_true',
help="include self feature as a special relation")
print(
"Mean forward time: {:4f}".format(
np.mean(forward_time[len(forward_time) // 4 :])
)
)
print(
"Mean backward time: {:4f}".format(
np.mean(backward_time[len(backward_time) // 4 :])
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="RGCN")
parser.add_argument(
"--dropout", type=float, default=0, help="dropout probability"
)
parser.add_argument(
"--n-hidden", type=int, default=16, help="number of hidden units"
)
parser.add_argument("--gpu", type=int, default=-1, help="gpu")
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
parser.add_argument(
"--n-bases",
type=int,
default=-1,
help="number of filter weight matrices, default: -1 [use all]",
)
parser.add_argument(
"--n-layers", type=int, default=2, help="number of propagation rounds"
)
parser.add_argument(
"-e",
"--n-epochs",
type=int,
default=50,
help="number of training epochs",
)
parser.add_argument(
"-d", "--dataset", type=str, required=True, help="dataset to use"
)
parser.add_argument("--l2norm", type=float, default=0, help="l2 norm coef")
parser.add_argument(
"--use-self-loop",
default=False,
action="store_true",
help="include self feature as a special relation",
)
fp = parser.add_mutually_exclusive_group(required=False)
fp.add_argument('--validation', dest='validation', action='store_true')
fp.add_argument('--testing', dest='validation', action='store_false')
fp.add_argument("--validation", dest="validation", action="store_true")
fp.add_argument("--testing", dest="validation", action="store_false")
parser.set_defaults(validation=True)
args = parser.parse_args()
print(args)
args.bfs_level = args.n_layers + 1 # pruning used nodes for memory
args.bfs_level = args.n_layers + 1 # pruning used nodes for memory
main(args)
"""DataLoader utils."""
import dgl
from gluoncv.data.batchify import Pad
from mxnet import nd
import dgl
def dgl_mp_batchify_fn(data):
if isinstance(data[0], tuple):
......
......@@ -8,13 +8,15 @@ import pickle
import warnings
from collections import Counter
import dgl
import mxnet as mx
import numpy as np
from gluoncv.data.base import VisionDataset
from gluoncv.data.transforms.presets.rcnn import (
FasterRCNNDefaultTrainTransform, FasterRCNNDefaultValTransform)
import dgl
FasterRCNNDefaultTrainTransform,
FasterRCNNDefaultValTransform,
)
class VGRelation(VisionDataset):
......
......@@ -5,7 +5,7 @@ import mxnet as mx
from data import *
from gluoncv.data.transforms import presets
from gluoncv.utilz import download
from model import RelDN, faster_rcnn_resnet101_v1d_custom
from model import faster_rcnn_resnet101_v1d_custom, RelDN
from utils import *
import dgl
......
import pickle
import dgl
import gluoncv as gcv
import mxnet as mx
import numpy as np
from mxnet import nd
from mxnet.gluon import nn
import dgl
from dgl.nn.mxnet import GraphConv
from dgl.utils import toindex
from mxnet import nd
from mxnet.gluon import nn
__all__ = ["RelDN"]
......
......@@ -11,19 +11,26 @@ import gluoncv as gcv
import mxnet as mx
import numpy as np
from data import *
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv import data as gdata, utils as gutils
from gluoncv.data.batchify import Append, FasterRCNNTrainBatchify, Tuple
from gluoncv.data.transforms.presets.rcnn import (
FasterRCNNDefaultTrainTransform, FasterRCNNDefaultValTransform)
FasterRCNNDefaultTrainTransform,
FasterRCNNDefaultValTransform,
)
from gluoncv.model_zoo import get_model
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from gluoncv.utils.metrics.rcnn import (RCNNAccMetric, RCNNL1LossMetric,
RPNAccMetric, RPNL1LossMetric)
from gluoncv.utils.metrics.rcnn import (
RCNNAccMetric,
RCNNL1LossMetric,
RPNAccMetric,
RPNL1LossMetric,
)
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.parallel import Parallel, Parallelizable
from model import (faster_rcnn_resnet50_v1b_custom,
faster_rcnn_resnet101_v1d_custom)
from model import (
faster_rcnn_resnet101_v1d_custom,
faster_rcnn_resnet50_v1b_custom,
)
from mxnet import autograd, gluon
from mxnet.contrib import amp
......
......@@ -28,6 +28,7 @@ use_overlap = args.overlap
PATH_TO_DATASETS = os.path.expanduser(args.json_path)
path_to_json = os.path.join(PATH_TO_DATASETS, "rel_annotations_train.json")
# format in y1y2x1x2
def with_overlap(boxA, boxB):
xA = max(boxA[2], boxB[2])
......
......@@ -7,7 +7,7 @@ import numpy as np
from data import *
from gluoncv.data.batchify import Pad
from gluoncv.utils import makedirs
from model import RelDN, faster_rcnn_resnet101_v1d_custom
from model import faster_rcnn_resnet101_v1d_custom, RelDN
from mxnet import gluon, nd
from utils import *
......
import dgl
import numpy as np
from mxnet import nd
import dgl
def bbox_improve(bbox):
"""bbox encoding"""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment