Unverified Commit 0aca5660 authored by Jinjing Zhou's avatar Jinjing Zhou Committed by GitHub
Browse files

[Regression] Fix regression tests (#2639)



* add bench jenkins

* instance type

* fix

* fix

* fix

* 111

* test

* 111

* 111

* fix

* test

* run

* fix

* fix

* fix

* fix

* fix

* publish results

* 111

* regression

* launch ec2 script

* fix

* add

* run on master

* change

* rrr

* run gpu

* fix

* fix

* try fix

* fix

* ff

* fix

* fix

* fix

* refactor

* fix

* fix

* update

* fix

* fix

* fix

* fix

* remove import torchtext

* add shm size

* update

* fix

* fix

* fix

* fix

* fix this!!!!

* 111

* fix

* remove verbose

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* update readme

* fix

* fix

* fix

* change asv default to head

* commit sage and rgcn

* fix

* update

* add benchmarks

* add

* fix

* update

* remove RandomState

* tmp remove

* new batch

* fix

* fix

* fix

* address comment

* fix warning

* fix

* fix

* fix

* fix

* add multiupdate all

* address comment

* fix

* add benchmarks

* add

* fix timing

* fix

* push

* add -v

* [Example] NGCF (#2564)

* ngcf

* ngcf

* update
Co-authored-by: default avatarzhjwy9343 <6593865@qq.com>

* Revert "[Example] NGCF (#2564)" (#2611)

This reverts commit a75e04f408c719289f478ca129784e05655d8def.

* fix

* change task

* fix

* fix

* fix2

* enable tensoradapter when benchmark

* minor fix

* trigger ci

* fix

* fix

* fix

* fix

* fix

* fix

* lint

* fix
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
Co-authored-by: default avatarKounianhuaDu <51199171+KounianhuaDu@users.noreply.github.com>
Co-authored-by: default avatarzhjwy9343 <6593865@qq.com>
parent 117dd252
......@@ -8,7 +8,7 @@ from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['cora', 'reddit'])
@utils.parametrize('graph_name', ['cora', 'ogbn-arxiv'])
@utils.parametrize('format', ['coo', 'csr'])
@utils.parametrize('feat_size', [8, 128, 512])
@utils.parametrize('reduce_type', ['u->e', 'u+v'])
......
......@@ -7,11 +7,10 @@ import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=7200)
@utils.parametrize('graph_name', ['cora', 'reddit'])
@utils.parametrize('format', ['coo', 'csr'])
@utils.parametrize_cpu('feat_size', [8, 128, 512])
@utils.parametrize_gpu('feat_size', [8, 32, 256])
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['ogbn-arxiv'])
@utils.parametrize('format', ['coo'])
@utils.parametrize('feat_size', [4, 32, 256])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e'])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max'])
def track_time(graph_name, format, feat_size, msg_type, reduce_type):
......@@ -25,7 +24,7 @@ def track_time(graph_name, format, feat_size, msg_type, reduce_type):
msg_builtin_dict = {
'copy_u': fn.copy_u('h', 'x'),
'u_mul_e': fn.u_mul_e('h', 'e','x'),
'u_mul_e': fn.u_mul_e('h', 'e', 'x'),
}
reduce_builtin_dict = {
......@@ -35,13 +34,14 @@ def track_time(graph_name, format, feat_size, msg_type, reduce_type):
}
# dry run
graph.update_all(msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type])
graph.update_all(msg_builtin_dict[msg_type],
reduce_builtin_dict[reduce_type])
# timing
with utils.Timer() as t:
for i in range(3):
graph.update_all(msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type])
graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type])
return t.elapsed_secs / 3
import time
import dgl
import torch
import numpy as np
import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['ogbn-arxiv', 'reddit', 'ogbn-proteins'])
@utils.parametrize('format', ['csc'])
@utils.parametrize('feat_size', [4, 32, 256])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e'])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max'])
def track_time(graph_name, format, feat_size, msg_type, reduce_type):
device = utils.get_bench_device()
graph = utils.get_graph(graph_name, format)
graph = graph.to(device)
graph.ndata['h'] = torch.randn(
(graph.num_nodes(), feat_size), device=device)
graph.edata['e'] = torch.randn(
(graph.num_edges(), 1), device=device)
msg_builtin_dict = {
'copy_u': fn.copy_u('h', 'x'),
'u_mul_e': fn.u_mul_e('h', 'e', 'x'),
}
reduce_builtin_dict = {
'sum': fn.sum('x', 'h_new'),
'mean': fn.mean('x', 'h_new'),
'max': fn.max('x', 'h_new'),
}
# dry run
graph.update_all(msg_builtin_dict[msg_type],
reduce_builtin_dict[reduce_type])
# timing
with utils.Timer() as t:
for i in range(3):
graph.update_all(
msg_builtin_dict[msg_type], reduce_builtin_dict[reduce_type])
return t.elapsed_secs / 3
......@@ -6,7 +6,7 @@ import numpy as np
from .. import utils
@utils.benchmark('time', timeout=1200)
@utils.benchmark('time', timeout=600)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.parametrize('format', ['coo']) # csc is not supported
......
......@@ -6,7 +6,7 @@ import numpy as np
from .. import utils
@utils.benchmark('time', timeout=1200)
@utils.benchmark('time', timeout=600)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
@utils.parametrize('format',
......
......@@ -8,8 +8,8 @@ from .. import utils
@utils.benchmark('time', timeout=60)
@utils.parametrize('k', [3, 5, 10])
@utils.parametrize('size', [50, 200, ])
@utils.parametrize('dim', [16, 64, 128])
@utils.parametrize('size', [50, 200, 1000])
@utils.parametrize('dim', [16, 128, 512])
def track_time(size, dim, k):
device = utils.get_bench_device()
features = np.random.randn(size, dim)
......
......@@ -15,8 +15,7 @@ def track_time(graph_name, k):
data = utils.process_data(graph_name)
graph = data[0]
# dry run
dry_run_data = utils.process_data('pubmed')
gg = dgl.transform.metis_partition(dry_run_data[0], k)
gg = dgl.transform.metis_partition(graph, k)
# timing
with utils.Timer() as t:
......
......@@ -8,7 +8,7 @@ from .. import utils
@utils.benchmark('time', timeout=7200)
@utils.parametrize('graph_name', ['cora', 'pubmed'])
@utils.parametrize('graph_name', ['ogbn-arxiv', 'pubmed'])
@utils.parametrize('format', ['coo']) # only coo supports udf
@utils.parametrize('feat_size', [8, 32, 128, 512])
@utils.parametrize('reduce_type', ['u->e', 'u+v'])
......
......@@ -7,10 +7,10 @@ import dgl.function as fn
from .. import utils
@utils.benchmark('time', timeout=7200)
@utils.parametrize('graph_name', ['cora', 'pubmed'])
@utils.benchmark('time', timeout=600)
@utils.parametrize('graph_name', ['pubmed', 'ogbn-arxiv'])
@utils.parametrize('format', ['coo']) # only coo supports udf
@utils.parametrize('feat_size', [8, 32, 128, 512])
@utils.parametrize('feat_size', [8, 64, 512])
@utils.parametrize('msg_type', ['copy_u', 'u_mul_e'])
@utils.parametrize('reduce_type', ['sum', 'mean', 'max'])
def track_time(graph_name, format, feat_size, msg_type, reduce_type):
......@@ -39,7 +39,7 @@ def track_time(graph_name, format, feat_size, msg_type, reduce_type):
# timing
with utils.Timer() as t:
for i in range(3):
graph.update_all(msg_udf_dict[msg_type], reduct_udf_dict[reduce_type])
graph.update_all(msg_udf_dict[msg_type],
reduct_udf_dict[reduce_type])
return t.elapsed_secs / 3
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment