Unverified Commit d78a3a4b authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Black auto fix. (#4640)



* auto fix

* add more

* sort
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 23d09057
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
import dgl
import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('graph_name', ['cora', 'livejournal']) @utils.parametrize("graph_name", ["cora", "livejournal"])
@utils.parametrize('format', ['coo']) @utils.parametrize("format", ["coo"])
def track_time(graph_name, format): def track_time(graph_name, format):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
...@@ -21,7 +22,7 @@ def track_time(graph_name, format): ...@@ -21,7 +22,7 @@ def track_time(graph_name, format):
g = graph.add_self_loop() g = graph.add_self_loop()
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(3): for i in range(3):
edges = graph.add_self_loop() edges = graph.add_self_loop()
......
import time import time
import dgl
import torch import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time')
@utils.parametrize('batch_size', [4, 32, 256, 1024]) @utils.benchmark("time")
@utils.parametrize("batch_size", [4, 32, 256, 1024])
def track_time(batch_size): def track_time(batch_size):
device = utils.get_bench_device() device = utils.get_bench_device()
ds = dgl.data.QM7bDataset() ds = dgl.data.QM7bDataset()
...@@ -20,7 +23,7 @@ def track_time(batch_size): ...@@ -20,7 +23,7 @@ def track_time(batch_size):
g = dgl.batch(graphs) g = dgl.batch(graphs)
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(100): for i in range(100):
g = dgl.batch(graphs) g = dgl.batch(graphs)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
import dgl.function as fn import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time', timeout=600) @utils.benchmark("time", timeout=600)
@utils.parametrize('graph_name', ['cora', 'ogbn-arxiv']) @utils.parametrize("graph_name", ["cora", "ogbn-arxiv"])
@utils.parametrize('format', ['coo', 'csr']) @utils.parametrize("format", ["coo", "csr"])
@utils.parametrize('feat_size', [8, 128, 512]) @utils.parametrize("feat_size", [8, 128, 512])
@utils.parametrize('reduce_type', ['u->e', 'u+v']) @utils.parametrize("reduce_type", ["u->e", "u+v"])
def track_time(graph_name, format, feat_size, reduce_type): def track_time(graph_name, format, feat_size, reduce_type):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
graph = graph.to(device) graph = graph.to(device)
graph.ndata['h'] = torch.randn( graph.ndata["h"] = torch.randn(
(graph.num_nodes(), feat_size), device=device) (graph.num_nodes(), feat_size), device=device
)
reduce_builtin_dict = { reduce_builtin_dict = {
'u->e': fn.copy_u('h', 'x'), "u->e": fn.copy_u("h", "x"),
'u+v': fn.u_add_v('h', 'h', 'x'), "u+v": fn.u_add_v("h", "h", "x"),
} }
# dry run # dry run
...@@ -29,7 +32,7 @@ def track_time(graph_name, format, feat_size, reduce_type): ...@@ -29,7 +32,7 @@ def track_time(graph_name, format, feat_size, reduce_type):
graph.apply_edges(reduce_builtin_dict[reduce_type]) graph.apply_edges(reduce_builtin_dict[reduce_type])
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(10): for i in range(10):
graph.apply_edges(reduce_builtin_dict[reduce_type]) graph.apply_edges(reduce_builtin_dict[reduce_type])
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
import dgl.function as fn import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time', timeout=600) @utils.benchmark("time", timeout=600)
@utils.parametrize('num_relations', [5, 50, 500]) @utils.parametrize("num_relations", [5, 50, 500])
@utils.parametrize('format', ['coo', 'csr']) @utils.parametrize("format", ["coo", "csr"])
@utils.parametrize('feat_size', [8, 128, 512]) @utils.parametrize("feat_size", [8, 128, 512])
@utils.parametrize('reduce_type', ['u->e']) #, 'e->u']) @utils.parametrize("reduce_type", ["u->e"]) # , 'e->u'])
def track_time(num_relations, format, feat_size, reduce_type):
def track_time( num_relations, format, feat_size, reduce_type):
device = utils.get_bench_device() device = utils.get_bench_device()
dd = {} dd = {}
candidate_edges = [dgl.data.CoraGraphDataset(verbose=False)[0].edges(), dgl.data.PubmedGraphDataset(verbose=False)[ candidate_edges = [
0].edges(), dgl.data.CiteseerGraphDataset(verbose=False)[0].edges()] dgl.data.CoraGraphDataset(verbose=False)[0].edges(),
dgl.data.PubmedGraphDataset(verbose=False)[0].edges(),
dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(),
]
for i in range(num_relations): for i in range(num_relations):
dd[('n1', 'e_{}'.format(i), 'n2')] = candidate_edges[i % dd[("n1", "e_{}".format(i), "n2")] = candidate_edges[
len(candidate_edges)] i % len(candidate_edges)
]
graph = dgl.heterograph(dd) graph = dgl.heterograph(dd)
graph = graph.to(device) graph = graph.to(device)
graph.nodes['n1'].data['h'] = torch.randn( graph.nodes["n1"].data["h"] = torch.randn(
(graph.num_nodes('n1'), feat_size), device=device) (graph.num_nodes("n1"), feat_size), device=device
graph.nodes['n2'].data['h'] = torch.randn( )
(graph.num_nodes('n2'), feat_size), device=device) graph.nodes["n2"].data["h"] = torch.randn(
(graph.num_nodes("n2"), feat_size), device=device
)
reduce_builtin_dict = { reduce_builtin_dict = {
'u->e': fn.copy_u('h', 'x'), "u->e": fn.copy_u("h", "x"),
# 'e->u': fn.copy_e('h', 'x'), # 'e->u': fn.copy_e('h', 'x'),
} }
...@@ -40,7 +46,7 @@ def track_time( num_relations, format, feat_size, reduce_type): ...@@ -40,7 +46,7 @@ def track_time( num_relations, format, feat_size, reduce_type):
graph.apply_edges(reduce_builtin_dict[reduce_type]) graph.apply_edges(reduce_builtin_dict[reduce_type])
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(10): for i in range(10):
graph.apply_edges(reduce_builtin_dict[reduce_type]) graph.apply_edges(reduce_builtin_dict[reduce_type])
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
import dgl.function as fn import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time', timeout=600) @utils.benchmark("time", timeout=600)
@utils.parametrize('graph_name', ['ogbn-arxiv']) @utils.parametrize("graph_name", ["ogbn-arxiv"])
@utils.parametrize('format', ['coo']) @utils.parametrize("format", ["coo"])
@utils.parametrize('feat_size', [4, 32, 256]) @utils.parametrize("feat_size", [4, 32, 256])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e']) @utils.parametrize("msg_type", ["copy_u", "u_mul_e"])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max']) @utils.parametrize("reduce_type", ["sum", "mean", "max"])
def track_time(graph_name, format, feat_size, msg_type, reduce_type): def track_time(graph_name, format, feat_size, msg_type, reduce_type):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
graph = graph.to(device) graph = graph.to(device)
graph.ndata['h'] = torch.randn( graph.ndata["h"] = torch.randn(
(graph.num_nodes(), feat_size), device=device) (graph.num_nodes(), feat_size), device=device
graph.edata['e'] = torch.randn( )
(graph.num_edges(), 1), device=device) graph.edata["e"] = torch.randn((graph.num_edges(), 1), device=device)
msg_builtin_dict = { msg_builtin_dict = {
'copy_u': fn.copy_u('h', 'x'), "copy_u": fn.copy_u("h", "x"),
'u_mul_e': fn.u_mul_e('h', 'e', 'x'), "u_mul_e": fn.u_mul_e("h", "e", "x"),
} }
reduce_builtin_dict = { reduce_builtin_dict = {
'sum': fn.sum('x', 'h_new'), "sum": fn.sum("x", "h_new"),
'mean': fn.mean('x', 'h_new'), "mean": fn.mean("x", "h_new"),
'max': fn.max('x', 'h_new'), "max": fn.max("x", "h_new"),
} }
# dry run # dry run
graph.update_all(msg_builtin_dict[msg_type], graph.update_all(
reduce_builtin_dict[reduce_type]) msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(3): for i in range(3):
graph.update_all( graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]) msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
return t.elapsed_secs / 3 return t.elapsed_secs / 3
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
import dgl.function as fn import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time', timeout=600) @utils.benchmark("time", timeout=600)
@utils.parametrize('graph_name', ['ogbn-arxiv', 'reddit', 'ogbn-proteins']) @utils.parametrize("graph_name", ["ogbn-arxiv", "reddit", "ogbn-proteins"])
@utils.parametrize('format', ['csc']) @utils.parametrize("format", ["csc"])
@utils.parametrize('feat_size', [4, 32, 256]) @utils.parametrize("feat_size", [4, 32, 256])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e']) @utils.parametrize("msg_type", ["copy_u", "u_mul_e"])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max']) @utils.parametrize("reduce_type", ["sum", "mean", "max"])
def track_time(graph_name, format, feat_size, msg_type, reduce_type): def track_time(graph_name, format, feat_size, msg_type, reduce_type):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
graph = graph.to(device) graph = graph.to(device)
graph.ndata['h'] = torch.randn( graph.ndata["h"] = torch.randn(
(graph.num_nodes(), feat_size), device=device) (graph.num_nodes(), feat_size), device=device
graph.edata['e'] = torch.randn( )
(graph.num_edges(), 1), device=device) graph.edata["e"] = torch.randn((graph.num_edges(), 1), device=device)
msg_builtin_dict = { msg_builtin_dict = {
'copy_u': fn.copy_u('h', 'x'), "copy_u": fn.copy_u("h", "x"),
'u_mul_e': fn.u_mul_e('h', 'e', 'x'), "u_mul_e": fn.u_mul_e("h", "e", "x"),
} }
reduce_builtin_dict = { reduce_builtin_dict = {
'sum': fn.sum('x', 'h_new'), "sum": fn.sum("x", "h_new"),
'mean': fn.mean('x', 'h_new'), "mean": fn.mean("x", "h_new"),
'max': fn.max('x', 'h_new'), "max": fn.max("x", "h_new"),
} }
# dry run # dry run
for i in range(3): for i in range(3):
graph.update_all(msg_builtin_dict[msg_type], graph.update_all(
reduce_builtin_dict[reduce_type]) msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(10): for i in range(10):
graph.update_all( graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]) msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type]
)
return t.elapsed_secs / 10 return t.elapsed_secs / 10
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
# edge_ids is not supported on cuda # edge_ids is not supported on cuda
# @utils.skip_if_gpu() # @utils.skip_if_gpu()
@utils.benchmark('time', timeout=1200) @utils.benchmark("time", timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster']) @utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal']) @utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
@utils.parametrize('format', ['coo', 'csr', 'csc']) @utils.parametrize("format", ["coo", "csr", "csc"])
@utils.parametrize('fraction', [0.01, 0.1]) @utils.parametrize("fraction", [0.01, 0.1])
@utils.parametrize('return_uv', [True, False]) @utils.parametrize("return_uv", [True, False])
def track_time(graph_name, format, fraction, return_uv): def track_time(graph_name, format, fraction, return_uv):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
coo_graph = utils.get_graph(graph_name, 'coo') coo_graph = utils.get_graph(graph_name, "coo")
graph = graph.to(device) graph = graph.to(device)
eids = np.random.choice( eids = np.random.choice(
np.arange(graph.num_edges(), dtype=np.int64), int(graph.num_edges()*fraction)) np.arange(graph.num_edges(), dtype=np.int64),
int(graph.num_edges() * fraction),
)
eids = torch.tensor(eids, device="cpu", dtype=torch.int64) eids = torch.tensor(eids, device="cpu", dtype=torch.int64)
u, v = coo_graph.find_edges(eids) u, v = coo_graph.find_edges(eids)
del coo_graph, eids del coo_graph, eids
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
import dgl
import dgl.function as fn
from .. import utils from .. import utils
@utils.skip_if_gpu() @utils.skip_if_gpu()
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('graph_name', ['livejournal', 'reddit']) @utils.parametrize("graph_name", ["livejournal", "reddit"])
@utils.parametrize('format', ['coo']) @utils.parametrize("format", ["coo"])
@utils.parametrize('seed_egdes_num', [500, 5000, 50000]) @utils.parametrize("seed_egdes_num", [500, 5000, 50000])
def track_time(graph_name, format, seed_egdes_num): def track_time(graph_name, format, seed_egdes_num):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
...@@ -25,7 +26,7 @@ def track_time(graph_name, format, seed_egdes_num): ...@@ -25,7 +26,7 @@ def track_time(graph_name, format, seed_egdes_num):
dgl.edge_subgraph(graph, seed_edges) dgl.edge_subgraph(graph, seed_edges)
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(3): for i in range(3):
dgl.edge_subgraph(graph, seed_edges) dgl.edge_subgraph(graph, seed_edges)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time', timeout=600) @utils.benchmark("time", timeout=600)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster']) @utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal']) @utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
@utils.parametrize('format', ['coo']) # csc is not supported @utils.parametrize("format", ["coo"]) # csc is not supported
@utils.parametrize('fraction', [0.01, 0.1]) @utils.parametrize("fraction", [0.01, 0.1])
def track_time(graph_name, format, fraction): def track_time(graph_name, format, fraction):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
graph = graph.to(device) graph = graph.to(device)
eids = np.random.choice( eids = np.random.choice(
np.arange(graph.num_edges(), dtype=np.int64), int(graph.num_edges()*fraction)) np.arange(graph.num_edges(), dtype=np.int64),
int(graph.num_edges() * fraction),
)
eids = torch.tensor(eids, device=device, dtype=torch.int64) eids = torch.tensor(eids, device=device, dtype=torch.int64)
# dry run # dry run
for i in range(10): for i in range(10):
out = graph.find_edges(i) out = graph.find_edges(i)
out = graph.find_edges(torch.arange( out = graph.find_edges(
i*10, dtype=torch.int64, device=device)) torch.arange(i * 10, dtype=torch.int64, device=device)
)
# timing # timing
with utils.Timer() as t: with utils.Timer() as t:
for i in range(10): for i in range(10):
edges = graph.find_edges(eids) edges = graph.find_edges(eids)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time', timeout=600) @utils.benchmark("time", timeout=600)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster']) @utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal']) @utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
@utils.parametrize('format', @utils.parametrize(
[('coo', 'csc'), ('csc', 'coo'), "format",
('coo', 'csr'), ('csr', 'coo'), [
('csr', 'csc'), ('csc', 'csr')]) ("coo", "csc"),
("csc", "coo"),
("coo", "csr"),
("csr", "coo"),
("csr", "csc"),
("csc", "csr"),
],
)
def track_time(graph_name, format): def track_time(graph_name, format):
from_format, to_format = format from_format, to_format = format
device = utils.get_bench_device() device = utils.get_bench_device()
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
import dgl
import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('num_relations', [5, 50, 500]) @utils.parametrize("num_relations", [5, 50, 500])
def track_time(num_relations): def track_time(num_relations):
dd = {} dd = {}
candidate_edges = [dgl.data.CoraGraphDataset(verbose=False)[0].edges(), dgl.data.PubmedGraphDataset(verbose=False)[ candidate_edges = [
0].edges(), dgl.data.CiteseerGraphDataset(verbose=False)[0].edges()] dgl.data.CoraGraphDataset(verbose=False)[0].edges(),
dgl.data.PubmedGraphDataset(verbose=False)[0].edges(),
dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(),
]
for i in range(num_relations): for i in range(num_relations):
dd[('n1', 'e_{}'.format(i), 'n2')] = candidate_edges[i % dd[("n1", "e_{}".format(i), "n2")] = candidate_edges[
len(candidate_edges)] i % len(candidate_edges)
]
# dry run # dry run
graph = dgl.heterograph(dd) graph = dgl.heterograph(dd)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
import dgl
import dgl.function as fn
from .. import utils from .. import utils
@utils.skip_if_gpu() @utils.skip_if_gpu()
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('size', ["small", "large"]) @utils.parametrize("size", ["small", "large"])
def track_time(size): def track_time(size):
edge_list = { edge_list = {
"small": dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(), "small": dgl.data.CiteseerGraphDataset(verbose=False)[0].edges(),
"large": utils.get_livejournal().edges() "large": utils.get_livejournal().edges(),
} }
# dry run # dry run
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
import dgl
import dgl.function as fn
from .. import utils from .. import utils
@utils.skip_if_gpu() @utils.skip_if_gpu()
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('size', ["small", "large"]) @utils.parametrize("size", ["small", "large"])
@utils.parametrize('scipy_format', ["coo", "csr"]) @utils.parametrize("scipy_format", ["coo", "csr"])
def track_time(size, scipy_format): def track_time(size, scipy_format):
matrix_dict = { matrix_dict = {
"small": dgl.data.CiteseerGraphDataset(verbose=False)[0].adjacency_matrix(scipy_fmt=scipy_format), "small": dgl.data.CiteseerGraphDataset(verbose=False)[
"large": utils.get_livejournal().adjacency_matrix(scipy_fmt=scipy_format) 0
].adjacency_matrix(scipy_fmt=scipy_format),
"large": utils.get_livejournal().adjacency_matrix(
scipy_fmt=scipy_format
),
} }
# dry run # dry run
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time', timeout=1200) @utils.benchmark("time", timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster']) @utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal']) @utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
# in_degrees on coo is not supported on cuda # in_degrees on coo is not supported on cuda
@utils.parametrize_cpu('format', ['coo', 'csc']) @utils.parametrize_cpu("format", ["coo", "csc"])
@utils.parametrize_gpu('format', ['csc']) @utils.parametrize_gpu("format", ["csc"])
@utils.parametrize('fraction', [0.01, 0.1]) @utils.parametrize("fraction", [0.01, 0.1])
def track_time(graph_name, format, fraction): def track_time(graph_name, format, fraction):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
graph = graph.to(device) graph = graph.to(device)
nids = np.random.choice( nids = np.random.choice(
np.arange(graph.num_nodes(), dtype=np.int64), int(graph.num_nodes()*fraction)) np.arange(graph.num_nodes(), dtype=np.int64),
int(graph.num_nodes() * fraction),
)
nids = torch.tensor(nids, device=device, dtype=torch.int64) nids = torch.tensor(nids, device=device, dtype=torch.int64)
# dry run # dry run
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time', timeout=1200) @utils.benchmark("time", timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster']) @utils.parametrize_cpu("graph_name", ["cora", "livejournal", "friendster"])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal']) @utils.parametrize_gpu("graph_name", ["cora", "livejournal"])
# in_edges on coo is not supported on cuda # in_edges on coo is not supported on cuda
@utils.parametrize_cpu('format', ['coo', 'csc']) @utils.parametrize_cpu("format", ["coo", "csc"])
@utils.parametrize_gpu('format', ['csc']) @utils.parametrize_gpu("format", ["csc"])
@utils.parametrize('fraction', [0.01, 0.1]) @utils.parametrize("fraction", [0.01, 0.1])
def track_time(graph_name, format, fraction): def track_time(graph_name, format, fraction):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
graph = graph.to(device) graph = graph.to(device)
nids = np.random.choice( nids = np.random.choice(
np.arange(graph.num_nodes(), dtype=np.int64), int(graph.num_nodes()*fraction)) np.arange(graph.num_nodes(), dtype=np.int64),
int(graph.num_nodes() * fraction),
)
nids = torch.tensor(nids, device=device, dtype=torch.int64) nids = torch.tensor(nids, device=device, dtype=torch.int64)
# dry run # dry run
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
import dgl
import dgl.function as fn
from .. import utils from .. import utils
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('graph_name', ['livejournal', 'reddit']) @utils.parametrize("graph_name", ["livejournal", "reddit"])
@utils.parametrize('format', ['csc']) # coo is not supported @utils.parametrize("format", ["csc"]) # coo is not supported
@utils.parametrize('seed_nodes_num', [200, 5000, 20000]) @utils.parametrize("seed_nodes_num", [200, 5000, 20000])
def track_time(graph_name, format, seed_nodes_num): def track_time(graph_name, format, seed_nodes_num):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time', timeout=60) @utils.benchmark("time", timeout=60)
@utils.parametrize('graph_name', ['cora']) @utils.parametrize("graph_name", ["cora"])
@utils.parametrize('format', ['coo', 'csr']) @utils.parametrize("format", ["coo", "csr"])
@utils.parametrize('k', [1, 3, 5]) @utils.parametrize("k", [1, 3, 5])
def track_time(graph_name, format, k): def track_time(graph_name, format, k):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format) graph = utils.get_graph(graph_name, format)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.benchmark('time', timeout=60)
@utils.parametrize('k', [8, 64]) @utils.benchmark("time", timeout=60)
@utils.parametrize('size', [1000, 10000]) @utils.parametrize("k", [8, 64])
@utils.parametrize('dim', [4, 32, 256]) @utils.parametrize("size", [1000, 10000])
@utils.parametrize_cpu('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree', 'nn-descent']) @utils.parametrize("dim", [4, 32, 256])
@utils.parametrize_gpu('algorithm', ['bruteforce-blas', 'bruteforce', 'bruteforce-sharemem', 'nn-descent']) @utils.parametrize_cpu(
"algorithm", ["bruteforce-blas", "bruteforce", "kd-tree", "nn-descent"]
)
@utils.parametrize_gpu(
"algorithm",
["bruteforce-blas", "bruteforce", "bruteforce-sharemem", "nn-descent"],
)
def track_time(size, dim, k, algorithm): def track_time(size, dim, k, algorithm):
device = utils.get_bench_device() device = utils.get_bench_device()
features = np.random.RandomState(42).randn(size, dim) features = np.random.RandomState(42).randn(size, dim)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import torch
import dgl
from .. import utils from .. import utils
@utils.skip_if_gpu() @utils.skip_if_gpu()
@utils.benchmark('time', timeout=1200) @utils.benchmark("time", timeout=1200)
@utils.parametrize('graph_name', ['reddit']) @utils.parametrize("graph_name", ["reddit"])
@utils.parametrize('k', [2, 4, 8]) @utils.parametrize("k", [2, 4, 8])
def track_time(graph_name, k): def track_time(graph_name, k):
device = utils.get_bench_device() device = utils.get_bench_device()
data = utils.process_data(graph_name) data = utils.process_data(graph_name)
......
import time import time
import dgl
import torch
import numpy as np import numpy as np
import dgl.function as fn import torch
from dgl.nn.pytorch import SAGEConv
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
from .. import utils from .. import utils
@utils.benchmark('time') @utils.benchmark("time")
@utils.parametrize('graph_name', ['pubmed','ogbn-arxiv']) @utils.parametrize("graph_name", ["pubmed", "ogbn-arxiv"])
@utils.parametrize('feat_dim', [4, 32, 256]) @utils.parametrize("feat_dim", [4, 32, 256])
@utils.parametrize('aggr_type', ['mean', 'gcn', 'pool']) @utils.parametrize("aggr_type", ["mean", "gcn", "pool"])
def track_time(graph_name, feat_dim, aggr_type): def track_time(graph_name, feat_dim, aggr_type):
device = utils.get_bench_device() device = utils.get_bench_device()
graph = utils.get_graph(graph_name).to(device) graph = utils.get_graph(graph_name).to(device)
feat = torch.randn((graph.num_nodes(), feat_dim), device=device) feat = torch.randn((graph.num_nodes(), feat_dim), device=device)
model = SAGEConv(feat_dim, feat_dim, aggr_type, activation=F.relu, bias=False).to(device) model = SAGEConv(
feat_dim, feat_dim, aggr_type, activation=F.relu, bias=False
).to(device)
# dry run # dry run
for i in range(3): for i in range(3):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment