Unverified Commit d78a3a4b authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Black auto fix. (#4640)



* auto fix

* add more

* sort
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 23d09057
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time')
@utils.parametrize('graph_name', ['cora', 'livejournal'])
@utils.parametrize('format', ['coo'])
@utils.benchmark("time")
@utils.parametrize("graph_name", ["cora", "livejournal"])
@utils.parametrize("format", ["coo"])
def track_time(graph_name, format):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
......
import time
import dgl
import torch
import dgl
from .. import utils
@utils.benchmark('time')
@utils.parametrize('batch_size', [4, 32, 256, 1024])
@utils.benchmark("time")
@utils.parametrize("batch_size", [4, 32, 256, 1024])
def track_time(batch_size):
device = utils.get_bench_device()
ds = dgl.data.QM7bDataset()
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['cora', 'ogbn-arxiv'])
@utils.parametrize('format', ['coo', 'csr'])
@utils.parametrize('feat_size', [8, 128, 512])
@utils.parametrize('reduce_type', ['u->e', 'u+v'])
@utils.benchmark("time", timeout=600)
@utils.parametrize("graph_name", ["cora", "ogbn-arxiv"])
@utils.parametrize("format", ["coo", "csr"])
@utils.parametrize("feat_size", [8, 128, 512])
@utils.parametrize("reduce_type", ["u->e", "u+v"])
def track_time(graph_name, format, feat_size, reduce_type):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
graph.ndata['h'] = torch.randn(
(graph.num_nodes(), feat_size), device=device)
graph.ndata["h"] = torch.randn(
(graph.num_nodes(), feat_size), device=device
)
reduce_builtin_dict = {
'u->e': fn.copy_u('h', 'x'),
'u+v': fn.u_add_v('h', 'h', 'x'),
"u->e": fn.copy_u("h", "x"),
"u+v": fn.u_add_v("h", "h", "x"),
}
# dry run
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize('num_relations', [5, 50, 500])
@utils.parametrize('format', ['coo', 'csr'])
@utils.parametrize('feat_size', [8, 128, 512])
@utils.parametrize('reduce_type', ['u->e']) #, 'e->u'])
def track_time( num_relations, format, feat_size, reduce_type):
@utils.benchmark("time", timeout=600)
@utils.parametrize("num_relations", [5, 50, 500])
@utils.parametrize("format", ["coo", "csr"])
@utils.parametrize("feat_size", [8, 128, 512])
@utils.parametrize("reduce_type", ["u->e"]) # , 'e->u'])
def track_time(num_relations, format, feat_size, reduce_type):
device = utils.get_bench_device()
dd = {}
candidate_edges = [dgl.data.CoraGraphDataset(verbose=False)[0].edges(), dgl.data.PubmedGraphDataset(verbose=False)[
0].edges(), dgl.data.CiteseerGraphDataset(verbose=False)[0].edges()]
candidate_edges = [
dgl.data.CoraGraphDataset(verbose=False)[0].edges(),
dgl.data.PubmedGraphDataset(verbose=False)[0].edges(),
dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(),
]
for i in range(num_relations):
dd[('n1', 'e_{}'.format(i), 'n2')] = candidate_edges[i %
len(candidate_edges)]
dd[("n1", "e_{}".format(i), "n2")] = candidate_edges[
i % len(candidate_edges)
]
graph = dgl.heterograph(dd)
graph = graph.to(device)
graph.nodes['n1'].data['h'] = torch.randn(
(graph.num_nodes('n1'), feat_size), device=device)
graph.nodes['n2'].data['h'] = torch.randn(
(graph.num_nodes('n2'), feat_size), device=device)
graph.nodes["n1"].data["h"] = torch.randn(
(graph.num_nodes("n1"), feat_size), device=device
)
graph.nodes["n2"].data["h"] = torch.randn(
(graph.num_nodes("n2"), feat_size), device=device
)
reduce_builtin_dict = {
'u->e': fn.copy_u('h', 'x'),
"u->e": fn.copy_u("h", "x"),
# 'e->u': fn.copy_e('h', 'x'),
}
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['ogbn-arxiv'])
@utils.parametrize('format', ['coo'])
@utils.parametrize('feat_size', [4, 32, 256])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e'])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max'])
@utils.benchmark("time", timeout=600)
@utils.parametrize("graph_name", ["ogbn-arxiv"])
@utils.parametrize("format", ["coo"])
@utils.parametrize("feat_size", [4, 32, 256])
@utils.parametrize("msg_type", ["copy_u", "u_mul_e"])
@utils.parametrize("reduce_type", ["sum", "mean", "max"])
def track_time(graph_name, format, feat_size, msg_type, reduce_type):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
graph.ndata['h'] = torch.randn(
(graph.num_nodes(), feat_size), device=device)
graph.edata['e'] = torch.randn(
(graph.num_edges(), 1), device=device)
graph.ndata["h"] = torch.randn(
(graph.num_nodes(), feat_size), device=device
)
graph.edata["e"] = torch.randn((graph.num_edges(), 1), device=device)
msg_builtin_dict = {
'copy_u': fn.copy_u('h', 'x'),
'u_mul_e': fn.u_mul_e('h', 'e', 'x'),
"copy_u": fn.copy_u("h", "x"),
"u_mul_e": fn.u_mul_e("h", "e", "x"),
}
reduce_builtin_dict = {
'sum': fn.sum('x', 'h_new'),
'mean': fn.mean('x', 'h_new'),
'max': fn.max('x', 'h_new'),
"sum": fn.sum("x", "h_new"),
"mean": fn.mean("x", "h_new"),
"max": fn.max("x", "h_new"),
}
# dry run
graph.update_all(msg_builtin_dict[msg_type],
reduce_builtin_dict[reduce_type])
graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
# timing
with utils.Timer() as t:
for i in range(3):
graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type])
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
return t.elapsed_secs / 3
import time
import dgl
import torch
import numpy as np
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['ogbn-arxiv', 'reddit', 'ogbn-proteins'])
@utils.parametrize('format', ['csc'])
@utils.parametrize('feat_size', [4, 32, 256])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e'])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max'])
@utils.benchmark("time", timeout=600)
@utils.parametrize("graph_name", ["ogbn-arxiv", "reddit", "ogbn-proteins"])
@utils.parametrize("format", ["csc"])
@utils.parametrize("feat_size", [4, 32, 256])
@utils.parametrize("msg_type", ["copy_u", "u_mul_e"])
@utils.parametrize("reduce_type", ["sum", "mean", "max"])
def track_time(graph_name, format, feat_size, msg_type, reduce_type):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
graph.ndata['h'] = torch.randn(
(graph.num_nodes(), feat_size), device=device)
graph.edata['e'] = torch.randn(
(graph.num_edges(), 1), device=device)
graph.ndata["h"] = torch.randn(
(graph.num_nodes(), feat_size), device=device
)
graph.edata["e"] = torch.randn((graph.num_edges(), 1), device=device)
msg_builtin_dict = {
'copy_u': fn.copy_u('h', 'x'),
'u_mul_e': fn.u_mul_e('h', 'e', 'x'),
"copy_u": fn.copy_u("h", "x"),
"u_mul_e": fn.u_mul_e("h", "e", "x"),
}
reduce_builtin_dict = {
'sum': fn.sum('x', 'h_new'),
'mean': fn.mean('x', 'h_new'),
'max': fn.max('x', 'h_new'),
"sum": fn.sum("x", "h_new"),
"mean": fn.mean("x", "h_new"),
"max": fn.max("x", "h_new"),
}
# dry run
for i in range(3):
graph.update_all(msg_builtin_dict[msg_type],
reduce_builtin_dict[reduce_type])
graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
# timing
with utils.Timer() as t:
for i in range(10):
graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type])
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
return t.elapsed_secs / 10
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
# edge_ids is not supported on cuda
# @utils.skip_if_gpu()
@utils.benchmark('time', timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.parametrize('format', ['coo', 'csr', 'csc'])
@utils.parametrize('fraction', [0.01, 0.1])
@utils.parametrize('return_uv', [True, False])
@utils.benchmark("time", timeout=1200)
@utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
@utils.parametrize("format", ["coo", "csr", "csc"])
@utils.parametrize("fraction", [0.01, 0.1])
@utils.parametrize("return_uv", [True, False])
def track_time(graph_name, format, fraction, return_uv):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
coo_graph = utils.get_graph(graph_name, 'coo')
coo_graph = utils.get_graph(graph_name, "coo")
graph = graph.to(device)
eids = np.random.choice(
np.arange(graph.num_edges(), dtype=np.int64), int(graph.num_edges()*fraction))
np.arange(graph.num_edges(), dtype=np.int64),
int(graph.num_edges() * fraction),
)
eids = torch.tensor(eids, device="cpu", dtype=torch.int64)
u, v = coo_graph.find_edges(eids)
del coo_graph, eids
......
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.skip_if_gpu()
@utils.benchmark('time')
@utils.parametrize('graph_name', ['livejournal', 'reddit'])
@utils.parametrize('format', ['coo'])
@utils.parametrize('seed_egdes_num', [500, 5000, 50000])
@utils.benchmark("time")
@utils.parametrize("graph_name", ["livejournal", "reddit"])
@utils.parametrize("format", ["coo"])
@utils.parametrize("seed_egdes_num", [500, 5000, 50000])
def track_time(graph_name, format, seed_egdes_num):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.parametrize('format', ['coo']) # csc is not supported
@utils.parametrize('fraction', [0.01, 0.1])
@utils.benchmark("time", timeout=600)
@utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
@utils.parametrize("format", ["coo"]) # csc is not supported
@utils.parametrize("fraction", [0.01, 0.1])
def track_time(graph_name, format, fraction):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
eids = np.random.choice(
np.arange(graph.num_edges(), dtype=np.int64), int(graph.num_edges()*fraction))
np.arange(graph.num_edges(), dtype=np.int64),
int(graph.num_edges() * fraction),
)
eids = torch.tensor(eids, device=device, dtype=torch.int64)
# dry run
for i in range(10):
out = graph.find_edges(i)
out = graph.find_edges(torch.arange(
i*10, dtype=torch.int64, device=device))
out = graph.find_edges(
torch.arange(i * 10, dtype=torch.int64, device=device)
)
# timing
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.parametrize('format',
[('coo', 'csc'), ('csc', 'coo'),
('coo', 'csr'), ('csr', 'coo'),
('csr', 'csc'), ('csc', 'csr')])
@utils.benchmark("time", timeout=600)
@utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
@utils.parametrize(
"format",
[
("coo", "csc"),
("csc", "coo"),
("coo", "csr"),
("csr", "coo"),
("csr", "csc"),
("csc", "csr"),
],
)
def track_time(graph_name, format):
from_format, to_format = format
device = utils.get_bench_device()
......
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time')
@utils.parametrize('num_relations', [5, 50, 500])
@utils.benchmark("time")
@utils.parametrize("num_relations", [5, 50, 500])
def track_time(num_relations):
dd = {}
candidate_edges = [dgl.data.CoraGraphDataset(verbose=False)[0].edges(), dgl.data.PubmedGraphDataset(verbose=False)[
0].edges(), dgl.data.CiteseerGraphDataset(verbose=False)[0].edges()]
candidate_edges = [
dgl.data.CoraGraphDataset(verbose=False)[0].edges(),
dgl.data.PubmedGraphDataset(verbose=False)[0].edges(),
dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(),
]
for i in range(num_relations):
dd[('n1', 'e_{}'.format(i), 'n2')] = candidate_edges[i %
len(candidate_edges)]
dd[("n1", "e_{}".format(i), "n2")] = candidate_edges[
i % len(candidate_edges)
]
# dry run
graph = dgl.heterograph(dd)
......
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.skip_if_gpu()
@utils.benchmark('time')
@utils.parametrize('size', ["small", "large"])
@utils.benchmark("time")
@utils.parametrize("size", ["small", "large"])
def track_time(size):
edge_list = {
"small": dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(),
"large": utils.get_livejournal().edges()
"large": utils.get_livejournal().edges(),
}
# dry run
......
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.skip_if_gpu()
@utils.benchmark('time')
@utils.parametrize('size', ["small", "large"])
@utils.parametrize('scipy_format', ["coo", "csr"])
@utils.benchmark("time")
@utils.parametrize("size", ["small", "large"])
@utils.parametrize("scipy_format", ["coo", "csr"])
def track_time(size, scipy_format):
matrix_dict = {
"small": dgl.data.CiteseerGraphDataset(verbose=False)[0].adjacency_matrix(scipy_fmt=scipy_format),
"large": utils.get_livejournal().adjacency_matrix(scipy_fmt=scipy_format)
"small": dgl.data.CiteseerGraphDataset(verbose=False)[
0
].adjacency_matrix(scipy_fmt=scipy_format),
"large": utils.get_livejournal().adjacency_matrix(
scipy_fmt=scipy_format
),
}
# dry run
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.benchmark('time', timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.benchmark("time", timeout=1200)
@utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
# in_degrees on coo is not supported on cuda
@utils.parametrize_cpu('format', ['coo', 'csc'])
@utils.parametrize_gpu('format', ['csc'])
@utils.parametrize('fraction', [0.01, 0.1])
@utils.parametrize_cpu("format", ["coo", "csc"])
@utils.parametrize_gpu("format", ["csc"])
@utils.parametrize("fraction", [0.01, 0.1])
def track_time(graph_name, format, fraction):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
nids = np.random.choice(
np.arange(graph.num_nodes(), dtype=np.int64), int(graph.num_nodes()*fraction))
np.arange(graph.num_nodes(), dtype=np.int64),
int(graph.num_nodes() * fraction),
)
nids = torch.tensor(nids, device=device, dtype=torch.int64)
# dry run
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.benchmark('time', timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.benchmark("time", timeout=1200)
@utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
# in_edges on coo is not supported on cuda
@utils.parametrize_cpu('format', ['coo', 'csc'])
@utils.parametrize_gpu('format', ['csc'])
@utils.parametrize('fraction', [0.01, 0.1])
@utils.parametrize_cpu("format", ["coo", "csc"])
@utils.parametrize_gpu("format", ["csc"])
@utils.parametrize("fraction", [0.01, 0.1])
def track_time(graph_name, format, fraction):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
nids = np.random.choice(
np.arange(graph.num_nodes(), dtype=np.int64), int(graph.num_nodes()*fraction))
np.arange(graph.num_nodes(), dtype=np.int64),
int(graph.num_nodes() * fraction),
)
nids = torch.tensor(nids, device=device, dtype=torch.int64)
# dry run
......
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
import torch
import dgl
import dgl.function as fn
from .. import utils
@utils.benchmark('time')
@utils.parametrize('graph_name', ['livejournal', 'reddit'])
@utils.parametrize('format', ['csc']) # coo is not supported
@utils.parametrize('seed_nodes_num', [200, 5000, 20000])
@utils.benchmark("time")
@utils.parametrize("graph_name", ["livejournal", "reddit"])
@utils.parametrize("format", ["csc"]) # coo is not supported
@utils.parametrize("seed_nodes_num", [200, 5000, 20000])
def track_time(graph_name, format, seed_nodes_num):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.benchmark('time', timeout=60)
@utils.parametrize('graph_name', ['cora'])
@utils.parametrize('format', ['coo', 'csr'])
@utils.parametrize('k', [1, 3, 5])
@utils.benchmark("time", timeout=60)
@utils.parametrize("graph_name", ["cora"])
@utils.parametrize("format", ["coo", "csr"])
@utils.parametrize("k", [1, 3, 5])
def track_time(graph_name, format, k):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.benchmark('time', timeout=60)
@utils.parametrize('k', [8, 64])
@utils.parametrize('size', [1000, 10000])
@utils.parametrize('dim', [4, 32, 256])
@utils.parametrize_cpu('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree', 'nn-descent'])
@utils.parametrize_gpu('algorithm', ['bruteforce-blas', 'bruteforce', 'bruteforce-sharemem', 'nn-descent'])
@utils.benchmark("time", timeout=60)
@utils.parametrize("k", [8, 64])
@utils.parametrize("size", [1000, 10000])
@utils.parametrize("dim", [4, 32, 256])
@utils.parametrize_cpu(
"algorithm", ["bruteforce-blas", "bruteforce", "kd-tree", "nn-descent"]
)
@utils.parametrize_gpu(
"algorithm",
["bruteforce-blas", "bruteforce", "bruteforce-sharemem", "nn-descent"],
)
def track_time(size, dim, k, algorithm):
device = utils.get_bench_device()
features = np.random.RandomState(42).randn(size, dim)
......
import time
import dgl
import torch
import numpy as np
import torch
import dgl
from .. import utils
@utils.skip_if_gpu()
@utils.benchmark('time', timeout=1200)
@utils.parametrize('graph_name', ['reddit'])
@utils.parametrize('k', [2, 4, 8])
@utils.benchmark("time", timeout=1200)
@utils.parametrize("graph_name", ["reddit"])
@utils.parametrize("k", [2, 4, 8])
def track_time(graph_name, k):
device = utils.get_bench_device()
data = utils.process_data(graph_name)
......
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
from .. import utils
@utils.benchmark('time')
@utils.parametrize('graph_name', ['pubmed','ogbn-arxiv'])
@utils.parametrize('feat_dim', [4, 32, 256])
@utils.parametrize('aggr_type', ['mean', 'gcn', 'pool'])
@utils.benchmark("time")
@utils.parametrize("graph_name", ["pubmed", "ogbn-arxiv"])
@utils.parametrize("feat_dim", [4, 32, 256])
@utils.parametrize("aggr_type", ["mean", "gcn", "pool"])
def track_time(graph_name, feat_dim, aggr_type):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name).to(device)
feat = torch.randn((graph.num_nodes(), feat_dim), device=device)
model = SAGEConv(feat_dim, feat_dim, aggr_type, activation=F.relu, bias=False).to(device)
model = SAGEConv(
feat_dim, feat_dim, aggr_type, activation=F.relu, bias=False
).to(device)
# dry run
for i in range(3):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment