Unverified Commit 14251504 authored by nv-dlasalle's avatar nv-dlasalle Committed by GitHub
Browse files

Fix test naming (#4023)


Co-authored-by: default avatarMufei Li <mufeili1996@gmail.com>
parent 7c598aac
......@@ -10,7 +10,7 @@ import networkx as nx
import unittest, pytest
from dgl import DGLError
import test_utils
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
from scipy.sparse import rand
rfuncs = {'sum': fn.sum, 'max': fn.max, 'min': fn.min, 'mean': fn.mean}
......@@ -39,7 +39,7 @@ def create_test_heterograph(idtype):
return g
@parametrize_dtype
@parametrize_idtype
def test_unary_copy_u(idtype):
def _test(mfunc):
......@@ -92,7 +92,7 @@ def test_unary_copy_u(idtype):
_test(fn.copy_u)
@parametrize_dtype
@parametrize_idtype
def test_unary_copy_e(idtype):
def _test(mfunc):
......@@ -148,7 +148,7 @@ def test_unary_copy_e(idtype):
_test(fn.copy_e)
@parametrize_dtype
@parametrize_idtype
def test_binary_op(idtype):
def _test(lhs, rhs, binary_op):
......
......@@ -6,7 +6,7 @@ import networkx as nx
from dgl import DGLGraph
from collections import defaultdict as ddict
import unittest
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
D = 5
reduce_msg_shapes = set()
......@@ -89,7 +89,7 @@ def generate_graph(idtype, grad=False):
def test_compatible():
g = generate_graph_old()
@parametrize_dtype
@parametrize_idtype
def test_batch_setter_getter(idtype):
def _pfc(x):
return list(F.zerocopy_to_numpy(x)[:,0])
......@@ -159,7 +159,7 @@ def test_batch_setter_getter(idtype):
v = F.tensor([6, 9, 7], g.idtype)
assert _pfc(g.edges[u, v].data['l']) == [1.0, 1.0, 0.0]
@parametrize_dtype
@parametrize_idtype
def test_batch_setter_autograd(idtype):
g = generate_graph(idtype, grad=True)
h1 = g.ndata['h']
......@@ -290,7 +290,7 @@ def _test_nx_conversion():
assert F.allclose(g.edata['h'], F.tensor([[1., 2.], [1., 2.],
[2., 3.], [2., 3.]]))
@parametrize_dtype
@parametrize_idtype
def test_apply_nodes(idtype):
def _upd(nodes):
return {'h' : nodes.data['h'] * 2}
......@@ -302,7 +302,7 @@ def test_apply_nodes(idtype):
g.apply_nodes(lambda nodes : {'h' : nodes.data['h'] * 0.}, u)
assert F.allclose(F.gather_row(g.ndata['h'], u), F.zeros((4, D)))
@parametrize_dtype
@parametrize_idtype
def test_apply_edges(idtype):
def _upd(edges):
return {'w' : edges.data['w'] * 2}
......@@ -316,7 +316,7 @@ def test_apply_edges(idtype):
eid = F.tensor(g.edge_ids(u, v))
assert F.allclose(F.gather_row(g.edata['w'], eid), F.zeros((6, D)))
@parametrize_dtype
@parametrize_idtype
def test_update_routines(idtype):
g = generate_graph(idtype)
......@@ -353,7 +353,7 @@ def test_update_routines(idtype):
assert(reduce_msg_shapes == {(1, 8, D), (9, 1, D)})
reduce_msg_shapes.clear()
@parametrize_dtype
@parametrize_idtype
def test_update_all_0deg(idtype):
# test#1
g = dgl.graph(([1, 2, 3, 4], [0, 0, 0, 0]), idtype=idtype, device=F.ctx())
......@@ -384,7 +384,7 @@ def test_update_all_0deg(idtype):
# should fallback to apply
assert F.allclose(new_repr, 2*old_repr)
@parametrize_dtype
@parametrize_idtype
def test_pull_0deg(idtype):
g = dgl.graph(([0], [1]), idtype=idtype, device=F.ctx())
def _message(edges):
......@@ -450,7 +450,7 @@ def test_dynamic_addition():
assert len(g.edata['h1']) == len(g.edata['h2'])
@parametrize_dtype
@parametrize_idtype
def test_repr(idtype):
g = dgl.graph(([0, 0, 1], [1, 2, 2]), num_nodes=10, idtype=idtype, device=F.ctx())
repr_string = g.__repr__()
......@@ -460,7 +460,7 @@ def test_repr(idtype):
repr_string = g.__repr__()
print(repr_string)
@parametrize_dtype
@parametrize_idtype
def test_local_var(idtype):
g = dgl.graph(([0, 1, 2, 3], [1, 2, 3, 4]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.zeros((g.number_of_nodes(), 3))
......@@ -518,7 +518,7 @@ def test_local_var(idtype):
assert F.allclose(g.edata['w'], F.tensor([[1.], [0.]]))
foo(g)
@parametrize_dtype
@parametrize_idtype
def test_local_scope(idtype):
g = dgl.graph(([0, 1, 2, 3], [1, 2, 3, 4]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.zeros((g.number_of_nodes(), 3))
......@@ -590,7 +590,7 @@ def test_local_scope(idtype):
assert F.allclose(g.edata['w'], F.tensor([[1.], [0.]]))
foo(g)
@parametrize_dtype
@parametrize_idtype
def test_isolated_nodes(idtype):
g = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=F.ctx())
assert g.number_of_nodes() == 5
......@@ -610,7 +610,7 @@ def test_isolated_nodes(idtype):
assert g.number_of_nodes('user') == 5
assert g.number_of_nodes('game') == 7
@parametrize_dtype
@parametrize_idtype
def test_send_multigraph(idtype):
g = dgl.graph(([0, 0, 0, 2], [1, 1, 1, 1]), idtype=idtype, device=F.ctx())
......@@ -636,7 +636,7 @@ def test_send_multigraph(idtype):
assert F.allclose(new_repr[1], answer(old_repr[0], old_repr[2], old_repr[3]))
assert F.allclose(new_repr[[0, 2]], F.zeros((2, 5)))
@parametrize_dtype
@parametrize_idtype
def test_issue_1088(idtype):
# This test ensures that message passing on a heterograph with one edge type
# would not crash (GitHub issue #1088).
......@@ -645,7 +645,7 @@ def test_issue_1088(idtype):
g.nodes['U'].data['x'] = F.randn((3, 3))
g.update_all(fn.copy_u('x', 'm'), fn.sum('m', 'y'))
@parametrize_dtype
@parametrize_idtype
def test_degree_bucket_edge_ordering(idtype):
import dgl.function as fn
g = dgl.graph(
......@@ -658,7 +658,7 @@ def test_degree_bucket_edge_ordering(idtype):
return {'n': F.sum(nodes.mailbox['eid'], 1)}
g.update_all(fn.copy_e('eid', 'eid'), reducer)
@parametrize_dtype
@parametrize_idtype
def test_issue_2484(idtype):
import dgl.function as fn
g = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
......
......@@ -2,7 +2,7 @@ import dgl
import numpy as np
import backend as F
import unittest
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
def tree1(idtype):
"""Generate a tree
......@@ -42,7 +42,7 @@ def tree2(idtype):
g.edata['h'] = F.randn((4, 10))
return g
@parametrize_dtype
@parametrize_idtype
def test_batch_unbatch(idtype):
t1 = tree1(idtype)
t2 = tree2(idtype)
......@@ -60,7 +60,7 @@ def test_batch_unbatch(idtype):
assert F.allclose(t2.ndata['h'], tt2.ndata['h'])
assert F.allclose(t2.edata['h'], tt2.edata['h'])
@parametrize_dtype
@parametrize_idtype
def test_batch_unbatch1(idtype):
t1 = tree1(idtype)
t2 = tree2(idtype)
......@@ -81,7 +81,7 @@ def test_batch_unbatch1(idtype):
assert F.allclose(t2.edata['h'], s3.edata['h'])
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
@parametrize_dtype
@parametrize_idtype
def test_batch_unbatch_frame(idtype):
"""Test module of node/edge frames of batched/unbatched DGLGraphs.
Also address the bug mentioned in https://github.com/dmlc/dgl/issues/1475.
......@@ -118,7 +118,7 @@ def test_batch_unbatch_frame(idtype):
assert F.allclose(_g2.ndata['h'], F.zeros((N2, D)))
assert F.allclose(_g2.edata['h'], F.zeros((E2, D)))
@parametrize_dtype
@parametrize_idtype
def test_batch_unbatch2(idtype):
# test setting/getting features after batch
a = dgl.graph(([], [])).astype(idtype).to(F.ctx())
......@@ -133,7 +133,7 @@ def test_batch_unbatch2(idtype):
assert F.allclose(c.ndata['h'], F.ones((7, 1)))
assert F.allclose(c.edata['w'], F.ones((5, 1)))
@parametrize_dtype
@parametrize_idtype
def test_batch_send_and_recv(idtype):
t1 = tree1(idtype)
t2 = tree2(idtype)
......@@ -150,7 +150,7 @@ def test_batch_send_and_recv(idtype):
assert F.asnumpy(t1.ndata['h'][1]) == 7
assert F.asnumpy(t2.ndata['h'][4]) == 2
@parametrize_dtype
@parametrize_idtype
def test_batch_propagate(idtype):
t1 = tree1(idtype)
t2 = tree2(idtype)
......@@ -178,7 +178,7 @@ def test_batch_propagate(idtype):
assert F.asnumpy(t1.ndata['h'][0]) == 9
assert F.asnumpy(t2.ndata['h'][1]) == 5
@parametrize_dtype
@parametrize_idtype
def test_batched_edge_ordering(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g1.add_nodes(6)
......@@ -195,7 +195,7 @@ def test_batched_edge_ordering(idtype):
r2 = g1.edata['h'][g1.edge_id(4, 5)]
assert F.array_equal(r1, r2)
@parametrize_dtype
@parametrize_idtype
def test_batch_no_edge(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g1.add_nodes(6)
......@@ -207,7 +207,7 @@ def test_batch_no_edge(idtype):
g3.add_nodes(1) # no edges
g = dgl.batch([g1, g3, g2]) # should not throw an error
@parametrize_dtype
@parametrize_idtype
def test_batch_keeps_empty_data(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g1.ndata["nh"] = F.tensor([])
......@@ -248,7 +248,7 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
ret[key] = F.tensor(new_batch_num_objs, dtype=F.dtype(batch_num_objs))
return ret
@parametrize_dtype
@parametrize_idtype
def test_set_batch_info(idtype):
ctx = F.ctx()
......
......@@ -4,7 +4,7 @@ import unittest
import pytest
from dgl.base import ALL
from utils import parametrize_dtype
from test_utils import parametrize_idtype
from test_utils import check_graph_equal, get_cases
......@@ -46,7 +46,7 @@ def check_equivalence_between_heterographs(g1, g2, node_attrs=None, edge_attrs=N
@pytest.mark.parametrize('gs', get_cases(['two_hetero_batch']))
@parametrize_dtype
@parametrize_idtype
def test_topology(gs, idtype):
"""Test batching two DGLHeteroGraphs where some nodes are isolated in some relations"""
g1, g2 = gs
......@@ -110,7 +110,7 @@ def test_topology(gs, idtype):
assert bg.batch_size == bg_local.batch_size
@parametrize_dtype
@parametrize_idtype
def test_batching_batched(idtype):
"""Test batching a DGLHeteroGraph and a BatchedDGLHeteroGraph."""
g1 = dgl.heterograph({
......@@ -168,7 +168,7 @@ def test_batching_batched(idtype):
check_equivalence_between_heterographs(g3, g6)
@parametrize_dtype
@parametrize_idtype
def test_features(idtype):
"""Test the features of batched DGLHeteroGraphs"""
g1 = dgl.heterograph({
......@@ -243,7 +243,7 @@ def test_features(idtype):
@unittest.skipIf(F.backend_name == 'mxnet', reason="MXNet does not support split array with zero-length segment.")
@parametrize_dtype
@parametrize_idtype
def test_empty_relation(idtype):
"""Test the features of batched DGLHeteroGraphs"""
g1 = dgl.heterograph({
......@@ -308,7 +308,7 @@ def test_empty_relation(idtype):
dgl.batch([g1, g2])
@parametrize_dtype
@parametrize_idtype
def test_unbatch2(idtype):
# batch 3 graphs but unbatch to 2
g1 = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
......@@ -333,7 +333,7 @@ def test_unbatch2(idtype):
check_graph_equal(g3, gg3)
@parametrize_dtype
@parametrize_idtype
def test_slice_batch(idtype):
g1 = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
......@@ -376,7 +376,7 @@ def test_slice_batch(idtype):
assert F.allclose(g_i.edges[ety].data[feat], g_slice.edges[ety].data[feat])
@parametrize_dtype
@parametrize_idtype
def test_batch_keeps_empty_data(idtype):
g1 = dgl.heterograph({("a", "to", "a"): ([], [])}
).astype(idtype).to(F.ctx())
......
......@@ -2,7 +2,7 @@ import numpy as np
import scipy.sparse as ssp
import pytest
import dgl
from utils import parametrize_dtype
from test_utils import parametrize_idtype
import backend as F
def _random_simple_graph(idtype, dtype, ctx, M, N, max_nnz, srctype, dsttype, etype):
......@@ -27,7 +27,7 @@ def _random_simple_graph(idtype, dtype, ctx, M, N, max_nnz, srctype, dsttype, et
A.edata['w'] = F.copy_to(F.tensor(val, dtype=dtype), ctx)
return a, A
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('dtype', [F.float32, F.float64])
def test_csrmm(idtype, dtype):
a, A = _random_simple_graph(idtype, dtype, F.ctx(), 500, 600, 9000, 'A', 'B', 'AB')
......@@ -39,7 +39,7 @@ def test_csrmm(idtype, dtype):
c = F.tensor((a * b).todense(), dtype=dtype)
assert F.allclose(C_adj, c)
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('dtype', [F.float32, F.float64])
@pytest.mark.parametrize('num_vtypes', [1, 2])
def test_csrmm_backward(idtype, dtype, num_vtypes):
......@@ -77,7 +77,7 @@ def test_csrmm_backward(idtype, dtype, num_vtypes):
assert np.allclose(a_dense_grad, A_spspmm_grad, rtol=1e-4, atol=1e-4)
assert np.allclose(b_dense_grad, B_spspmm_grad, rtol=1e-4, atol=1e-4)
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('dtype', [F.float32, F.float64])
def test_csrsum(idtype, dtype):
a, A = _random_simple_graph(idtype, dtype, F.ctx(), 500, 600, 9000, 'A', 'B', 'AB')
......@@ -89,7 +89,7 @@ def test_csrsum(idtype, dtype):
c = F.tensor((a + b).todense(), dtype=dtype)
assert F.allclose(C_adj, c)
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('dtype', [F.float32, F.float64])
@pytest.mark.parametrize('nelems', [1, 2])
def test_csrsum_backward(idtype, dtype, nelems):
......@@ -144,7 +144,7 @@ def test_csrsum_backward(idtype, dtype, nelems):
A_spspmm_grad = F.asnumpy(F.grad(A.edata['w']))
assert np.allclose(a_dense_grad, A_spspmm_grad, rtol=1e-4, atol=1e-4)
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('dtype', [F.float32, F.float64])
@pytest.mark.parametrize('A_nnz', [9000, 0])
@pytest.mark.parametrize('B_nnz', [9000, 0])
......@@ -158,7 +158,7 @@ def test_csrmask(idtype, dtype, A_nnz, B_nnz):
c = F.tensor(a.todense()[B_row, B_col], dtype)
assert F.allclose(C, c)
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('dtype', [F.float32, F.float64])
def test_csrmask_backward(idtype, dtype):
a, A = _random_simple_graph(idtype, dtype, F.ctx(), 3, 4, 6, 'A', 'B', 'AB')
......
......@@ -3,7 +3,7 @@ import backend as F
import dgl
from dgl.dataloading import NeighborSampler, negative_sampler, \
as_edge_prediction_sampler
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
def create_test_graph(idtype):
# test heterograph from the docstring, plus a user -- wishes -- game relation
......@@ -25,7 +25,7 @@ def create_test_graph(idtype):
return g
@parametrize_dtype
@parametrize_idtype
def test_edge_prediction_sampler(idtype):
g = create_test_graph(idtype)
sampler = NeighborSampler([10,10])
......
......@@ -10,7 +10,7 @@ import networkx as nx
import unittest, pytest
from dgl import DGLError
import test_utils
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
from scipy.sparse import rand
rfuncs = {'sum': fn.sum, 'max': fn.max, 'min': fn.min, 'mean': fn.mean}
......@@ -42,7 +42,7 @@ def create_test_heterograph(idtype):
@pytest.mark.parametrize('g', get_cases(['clique']))
@pytest.mark.parametrize('norm_by', ['src', 'dst'])
# @pytest.mark.parametrize('shp', edge_softmax_shapes)
@parametrize_dtype
@parametrize_idtype
def test_edge_softmax(g, norm_by, idtype):
print("params", norm_by, idtype)
......
......@@ -2,7 +2,7 @@ import dgl
import backend as F
import numpy as np
import unittest
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
from dgl.utils import Filter
def test_graph_filter():
......@@ -41,7 +41,7 @@ def test_graph_filter():
@unittest.skipIf(F._default_context_str == 'cpu',
reason="CPU not yet supported")
@parametrize_dtype
@parametrize_idtype
def test_array_filter(idtype):
f = Filter(F.copy_to(F.tensor([0,1,9,4,6,5,7], dtype=idtype), F.ctx()))
x = F.copy_to(F.tensor([0,3,9,11], dtype=idtype), F.ctx())
......
......@@ -4,7 +4,7 @@ from dgl.frame import Column
import numpy as np
import backend as F
import unittest
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
def test_column_subcolumn():
data = F.copy_to(F.tensor([[1., 1., 1., 1.],
......
......@@ -5,7 +5,7 @@ import dgl.function as fn
import time
import numpy as np
import unittest, pytest
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
iters = 5
n_edge_scale = 1
......
......@@ -9,7 +9,7 @@ import networkx as nx
import unittest, pytest
from dgl import DGLError
import test_utils
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
from utils import assert_is_identical_hetero
from scipy.sparse import rand
import multiprocessing as mp
......@@ -102,7 +102,7 @@ def create_test_heterograph5(idtype):
def get_redfn(name):
return getattr(F, name)
@parametrize_dtype
@parametrize_idtype
def test_create(idtype):
device = F.ctx()
g0 = create_test_heterograph(idtype)
......@@ -233,7 +233,7 @@ def test_create2():
g = dgl.heterograph(
{('A', 'AB', 'B'): ('csc', (indptr, indices, data))}, num_nodes_dict={'A': 20, 'B': 30})
@parametrize_dtype
@parametrize_idtype
def test_query(idtype):
g = create_test_heterograph(idtype)
......@@ -393,7 +393,7 @@ def test_query(idtype):
# test repr
print(g)
@parametrize_dtype
@parametrize_idtype
def test_empty_query(idtype):
g = dgl.graph(([1, 2, 3], [0, 4, 5]), idtype=idtype, device=F.ctx())
g.add_nodes(0)
......@@ -512,7 +512,7 @@ def _test_edge_ids():
eid = g2.edge_ids(0, 1, etype='follows')
assert eid == 0
@parametrize_dtype
@parametrize_idtype
def test_adj(idtype):
g = create_test_heterograph(idtype)
adj = F.sparse_to_numpy(g.adj(transpose=True, etype='follows'))
......@@ -568,7 +568,7 @@ def test_adj(idtype):
[1., 0., 0.],
[0., 1., 0.]]))
@parametrize_dtype
@parametrize_idtype
def test_inc(idtype):
g = create_test_heterograph(idtype)
adj = F.sparse_to_numpy(g['follows'].inc('in'))
......@@ -607,7 +607,7 @@ def test_inc(idtype):
[1., -1.],
[0., 1.]]))
@parametrize_dtype
@parametrize_idtype
def test_view(idtype):
# test single node type
g = dgl.heterograph({
......@@ -689,7 +689,7 @@ def test_view(idtype):
assert F.array_equal(g.dstnodes('user'), F.arange(0, 3, idtype))
g.dstnodes['user'].data.pop('h')
@parametrize_dtype
@parametrize_idtype
def test_view1(idtype):
# test relation view
HG = create_test_heterograph(idtype)
......@@ -818,7 +818,7 @@ def test_view1(idtype):
assert F.array_equal(f3, f4)
assert F.array_equal(g.edges(form='eid'), F.arange(0, 2, g.idtype))
@parametrize_dtype
@parametrize_idtype
def test_flatten(idtype):
def check_mapping(g, fg):
if len(fg.ntypes) == 1:
......@@ -924,7 +924,7 @@ def test_flatten(idtype):
check_mapping(g, fg)
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@parametrize_dtype
@parametrize_idtype
def test_to_device(idtype):
# TODO: rewrite this test case to accept different graphs so we
# can test reverse graph and batched graph
......@@ -966,7 +966,7 @@ def test_to_device(idtype):
g1.edges['plays'].data['e'] = F.copy_to(F.ones((4, 4)), F.cpu())
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['block']))
def test_to_device2(g, idtype):
g = g.astype(idtype)
......@@ -981,7 +981,7 @@ def test_to_device2(g, idtype):
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@unittest.skipIf(dgl.backend.backend_name != "pytorch", reason="Pinning graph inplace only supported for PyTorch")
@parametrize_dtype
@parametrize_idtype
def test_pin_memory_(idtype):
# TODO: rewrite this test case to accept different graphs so we
# can test reverse graph and batched graph
......@@ -1039,7 +1039,7 @@ def test_pin_memory_(idtype):
with pytest.raises(DGLError):
g1.pin_memory_()
@parametrize_dtype
@parametrize_idtype
def test_convert_bound(idtype):
def _test_bipartite_bound(data, card):
with pytest.raises(DGLError):
......@@ -1057,7 +1057,7 @@ def test_convert_bound(idtype):
_test_graph_bound(([0, 1], [1, 3]), 3)
@parametrize_dtype
@parametrize_idtype
def test_convert(idtype):
hg = create_test_heterograph(idtype)
hs = []
......@@ -1186,7 +1186,7 @@ def test_convert(idtype):
assert g.num_nodes() == 2
@unittest.skipIf(F._default_context_str == 'gpu', reason="Test on cpu is enough")
@parametrize_dtype
@parametrize_idtype
def test_to_homo_zero_nodes(idtype):
# Fix gihub issue #2870
g = dgl.heterograph({
......@@ -1198,7 +1198,7 @@ def test_to_homo_zero_nodes(idtype):
gg = dgl.to_homogeneous(g, ['x'])
assert 'x' in gg.ndata
@parametrize_dtype
@parametrize_idtype
def test_to_homo2(idtype):
# test the result homogeneous graph has nodes and edges sorted by their types
hg = create_test_heterograph(idtype)
......@@ -1228,7 +1228,7 @@ def test_to_homo2(idtype):
for i, count in enumerate(etype_count):
assert count == hg.num_edges(hg.canonical_etypes[i])
@parametrize_dtype
@parametrize_idtype
def test_invertible_conversion(idtype):
# Test whether to_homogeneous and to_heterogeneous are invertible
hg = create_test_heterograph(idtype)
......@@ -1236,7 +1236,7 @@ def test_invertible_conversion(idtype):
hg2 = dgl.to_heterogeneous(g, hg.ntypes, hg.etypes)
assert_is_identical_hetero(hg, hg2, True)
@parametrize_dtype
@parametrize_idtype
def test_metagraph_reachable(idtype):
g = create_test_heterograph(idtype)
x = F.randn((3, 5))
......@@ -1255,7 +1255,7 @@ def test_metagraph_reachable(idtype):
assert F.asnumpy(new_g.has_edges_between([0, 1], [1, 2])).all()
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="MXNet doesn't support bool tensor")
@parametrize_dtype
@parametrize_idtype
def test_subgraph_mask(idtype):
g = create_test_heterograph(idtype)
g_graph = g['follows']
......@@ -1297,7 +1297,7 @@ def test_subgraph_mask(idtype):
'wishes': F.tensor([False, True], dtype=F.bool)})
_check_subgraph(g, sg2)
@parametrize_dtype
@parametrize_idtype
def test_subgraph(idtype):
g = create_test_heterograph(idtype)
g_graph = g['follows']
......@@ -1446,7 +1446,7 @@ def test_subgraph(idtype):
sg5 = g.edge_type_subgraph(['follows', 'plays', 'wishes'])
_check_typed_subgraph1(g, sg5)
@parametrize_dtype
@parametrize_idtype
def test_apply(idtype):
def node_udf(nodes):
return {'h': nodes.data['h'] * 2}
......@@ -1484,7 +1484,7 @@ def test_apply(idtype):
with pytest.raises(DGLError):
g.apply_edges(edge_udf)
@parametrize_dtype
@parametrize_idtype
def test_level2(idtype):
#edges = {
# 'follows': ([0, 1], [1, 2]),
......@@ -1608,7 +1608,7 @@ def test_level2(idtype):
g.nodes['game'].data.clear()
@parametrize_dtype
@parametrize_idtype
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
def test_more_nnz(idtype):
g = dgl.graph(([0, 0, 0, 0, 0], [1, 1, 1, 1, 1]), idtype=idtype, device=F.ctx())
......@@ -1620,7 +1620,7 @@ def test_more_nnz(idtype):
ans = F.copy_to(F.tensor(ans, dtype=F.dtype(y)), ctx=F.ctx())
assert F.array_equal(y, ans)
@parametrize_dtype
@parametrize_idtype
def test_updates(idtype):
def msg_func(edges):
return {'m': edges.src['h']}
......@@ -1662,7 +1662,7 @@ def test_updates(idtype):
del g.nodes['game'].data['y']
@parametrize_dtype
@parametrize_idtype
def test_backward(idtype):
g = create_test_heterograph(idtype)
x = F.randn((3, 5))
......@@ -1681,7 +1681,7 @@ def test_backward(idtype):
[2., 2., 2., 2., 2.]]))
@parametrize_dtype
@parametrize_idtype
def test_empty_heterograph(idtype):
def assert_empty(g):
assert g.number_of_nodes('user') == 0
......@@ -1708,7 +1708,7 @@ def test_empty_heterograph(idtype):
assert g.number_of_edges('develops') == 2
assert g.number_of_nodes('developer') == 2
@parametrize_dtype
@parametrize_idtype
def test_types_in_function(idtype):
def mfunc1(edges):
assert edges.canonical_etype == ('user', 'follow', 'user')
......@@ -1763,7 +1763,7 @@ def test_types_in_function(idtype):
g.filter_nodes(filter_nodes2, ntype='game')
g.filter_edges(filter_edges2)
@parametrize_dtype
@parametrize_idtype
def test_stack_reduce(idtype):
#edges = {
# 'follows': ([0, 1], [1, 2]),
......@@ -1790,7 +1790,7 @@ def test_stack_reduce(idtype):
'stack')
assert g.nodes['game'].data['y'].shape == (g.number_of_nodes('game'), 1, 200)
@parametrize_dtype
@parametrize_idtype
def test_isolated_ntype(idtype):
g = dgl.heterograph({
('A', 'AB', 'B'): ([0, 1, 2], [1, 2, 3])},
......@@ -1817,7 +1817,7 @@ def test_isolated_ntype(idtype):
assert g.number_of_nodes('C') == 4
@parametrize_dtype
@parametrize_idtype
def test_ismultigraph(idtype):
g1 = dgl.heterograph({('A', 'AB', 'B'): ([0, 0, 1, 2], [1, 2, 5, 5])},
{'A': 6, 'B': 6}, idtype=idtype, device=F.ctx())
......@@ -1850,7 +1850,7 @@ def test_ismultigraph(idtype):
{'A': 6, 'C': 6}, idtype=idtype, device=F.ctx())
assert g.is_multigraph == True
@parametrize_dtype
@parametrize_idtype
def test_bipartite(idtype):
g1 = dgl.heterograph({('A', 'AB', 'B'): ([0, 0, 1], [1, 2, 5])},
idtype=idtype, device=F.ctx())
......@@ -1910,7 +1910,7 @@ def test_bipartite(idtype):
assert not g4.is_unibipartite
@parametrize_dtype
@parametrize_idtype
def test_dtype_cast(idtype):
g = dgl.graph(([0, 1, 0, 2], [0, 1, 1, 0]), idtype=idtype, device=F.ctx())
assert g.idtype == idtype
......@@ -1983,7 +1983,7 @@ def test_float_cast():
assert len(values) == len(pvalues)
assert F.allclose(values, F.tensor(pvalues), 0, 0)
@parametrize_dtype
@parametrize_idtype
def test_format(idtype):
# single relation
g = dgl.graph(([0, 1, 0, 2], [0, 1, 1, 0]), idtype=idtype, device=F.ctx())
......@@ -2020,7 +2020,7 @@ def test_format(idtype):
assert g.in_degrees(vid) == ind_arr[vid]
assert F.array_equal(in_degrees, g.in_degrees())
@parametrize_dtype
@parametrize_idtype
def test_edges_order(idtype):
# (0, 2), (1, 2), (0, 1), (0, 1), (2, 1)
g = dgl.graph((
......@@ -2033,7 +2033,7 @@ def test_edges_order(idtype):
assert F.array_equal(src, F.tensor([0, 0, 0, 1, 2], dtype=idtype))
assert F.array_equal(dst, F.tensor([1, 1, 2, 2, 1], dtype=idtype))
@parametrize_dtype
@parametrize_idtype
def test_reverse(idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 2, 4, 3 ,1, 3], [1, 2, 3, 2, 0, 0, 1]),
......@@ -2160,7 +2160,7 @@ def test_reverse(idtype):
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
@parametrize_dtype
@parametrize_idtype
def test_clone(idtype):
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
......@@ -2220,7 +2220,7 @@ def test_clone(idtype):
assert g.edges['plays'].data['h'].shape[0] != new_g.edges['plays'].data['h'].shape[0]
@parametrize_dtype
@parametrize_idtype
def test_add_edges(idtype):
# homogeneous graph
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
......@@ -2390,7 +2390,7 @@ def test_add_edges(idtype):
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 1, 1], dtype=idtype))
assert F.array_equal(g.edges['develops'].data['h'], F.tensor([0, 0, 2, 2], dtype=idtype))
@parametrize_dtype
@parametrize_idtype
def test_add_nodes(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
......@@ -2427,7 +2427,7 @@ def test_add_nodes(idtype):
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2, 2], dtype=idtype))
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="MXNet has error with (0,) shape tensor.")
@parametrize_dtype
@parametrize_idtype
def test_remove_edges(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
......@@ -2520,7 +2520,7 @@ def test_remove_edges(idtype):
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype))
@parametrize_dtype
@parametrize_idtype
def test_remove_nodes(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
......@@ -2618,7 +2618,7 @@ def test_remove_nodes(idtype):
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([0], dtype=idtype))
@parametrize_dtype
@parametrize_idtype
def test_frame(idtype):
g = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([0, 1, 2, 3], dtype=idtype), ctx=F.ctx())
......@@ -2659,7 +2659,7 @@ def test_frame(idtype):
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TensorFlow always create a new tensor")
@unittest.skipIf(F._default_context_str == 'cpu', reason="cpu do not have context change problem")
@parametrize_dtype
@parametrize_idtype
def test_frame_device(idtype):
g = dgl.graph(([0,1,2], [2,3,1]))
g.ndata['h'] = F.copy_to(F.tensor([1,1,1,2], dtype=idtype), ctx=F.cpu())
......@@ -2710,7 +2710,7 @@ def test_frame_device(idtype):
assert F.context(ng._node_frames[0]._columns['hh'].storage) == F.ctx()
assert F.context(ng._edge_frames[0]._columns['h'].storage) == F.cpu()
@parametrize_dtype
@parametrize_idtype
def test_create_block(idtype):
block = dgl.create_block(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
assert block.num_src_nodes() == 3
......@@ -2822,7 +2822,7 @@ def test_create_block(idtype):
assert hg.edges['AB'].data['x'] is eabx
assert hg.edges['BA'].data['x'] is ebax
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('fmt', ['coo', 'csr', 'csc'])
def test_adj_sparse(idtype, fmt):
if fmt == 'coo':
......
......@@ -4,7 +4,7 @@ import networkx as nx
import numpy as np
import backend as F
from itertools import product
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
import pytest
def udf_copy_src(edges):
......@@ -362,7 +362,7 @@ def test_all_binary_builtins():
_test(g, lhs, rhs, binary_op, reducer, partial, nid,
broadcast=broadcast)
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo-zero-degree']))
def test_mean_zero_degree(g, idtype):
g = g.astype(idtype).to(F.ctx())
......
import backend as F
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
import dgl
@parametrize_dtype
@parametrize_idtype
def test_heterograph_merge(idtype):
g1 = dgl.heterograph({("a", "to", "b"): ([0,1], [1,0])}).astype(idtype).to(F.ctx())
g1_n_edges = g1.num_edges(etype="to")
......
......@@ -10,7 +10,7 @@ import networkx as nx
import unittest, pytest
from dgl import DGLError
import test_utils
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
from scipy.sparse import rand
rfuncs = {'sum': fn.sum, 'max': fn.max, 'min': fn.min, 'mean': fn.mean}
feat_size = 2
......@@ -69,7 +69,7 @@ def create_test_heterograph_large(idtype):
assert g.device == F.ctx()
return g
@parametrize_dtype
@parametrize_idtype
def test_unary_copy_u(idtype):
def _test(mfunc, rfunc):
g = create_test_heterograph_2(idtype)
......@@ -134,7 +134,7 @@ def test_unary_copy_u(idtype):
_test(fn.copy_u, fn.min)
# _test('copy_u', 'mean')
@parametrize_dtype
@parametrize_idtype
def test_unary_copy_e(idtype):
def _test(mfunc, rfunc):
......@@ -219,7 +219,7 @@ def test_unary_copy_e(idtype):
_test(fn.copy_e, fn.min)
# _test('copy_e', 'mean')
@parametrize_dtype
@parametrize_idtype
def test_binary_op(idtype):
def _test(lhs, rhs, binary_op, reducer):
......
......@@ -2,12 +2,12 @@ from dgl.partition import NDArrayPartition
from dgl.distributed import graph_partition_book as gpb
import unittest
import backend as F
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
@unittest.skipIf(F._default_context_str == 'cpu', reason="NDArrayPartition only works on GPU.")
@parametrize_dtype
@parametrize_idtype
def test_get_node_partition_from_book(idtype):
node_map = {
"type_n": F.tensor([
......
......@@ -10,7 +10,7 @@ import pickle
import io
import unittest, pytest
import test_utils
from test_utils import parametrize_dtype, get_cases
from test_utils import parametrize_idtype, get_cases
from utils import assert_is_identical, assert_is_identical_hetero
def _assert_is_identical_nodeflow(nf1, nf2):
......@@ -92,7 +92,7 @@ def _global_message_func(nodes):
return {'x': nodes.data['x']}
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(exclude=['dglgraph', 'two_hetero_batch']))
def test_pickling_graph(g, idtype):
g = g.astype(idtype)
......@@ -165,7 +165,7 @@ def test_pickling_subgraph():
@unittest.skipIf(F._default_context_str != 'gpu', reason="Need GPU for pin")
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TensorFlow create graph on gpu when unpickle")
@parametrize_dtype
@parametrize_idtype
def test_pickling_is_pinned(idtype):
from copy import deepcopy
g = dgl.rand_graph(10, 20, idtype=idtype, device=F.cpu())
......
......@@ -3,7 +3,7 @@ import networkx as nx
import backend as F
import unittest
import utils as U
from utils import parametrize_dtype
from test_utils import parametrize_idtype
def create_graph(idtype):
g = dgl.from_networkx(nx.path_graph(5), idtype=idtype, device=F.ctx())
......@@ -17,7 +17,7 @@ def rfunc(nodes):
return {'x' : nodes.data['x'] + msg}
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@parametrize_dtype
@parametrize_idtype
def test_prop_nodes_bfs(idtype):
g = create_graph(idtype)
g.ndata['x'] = F.ones((5, 2))
......@@ -27,7 +27,7 @@ def test_prop_nodes_bfs(idtype):
F.tensor([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@parametrize_dtype
@parametrize_idtype
def test_prop_edges_dfs(idtype):
g = create_graph(idtype)
g.ndata['x'] = F.ones((5, 2))
......@@ -49,7 +49,7 @@ def test_prop_edges_dfs(idtype):
F.tensor([[3., 3.], [5., 5.], [7., 7.], [9., 9.], [5., 5.]]))
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@parametrize_dtype
@parametrize_idtype
def test_prop_nodes_topo(idtype):
# bi-directional chain
g = create_graph(idtype)
......
......@@ -5,9 +5,9 @@ import networkx as nx
import unittest
import pytest
from test_utils.graph_cases import get_cases
from utils import parametrize_dtype
from test_utils import parametrize_idtype
@parametrize_dtype
@parametrize_idtype
def test_sum_case1(idtype):
# NOTE: If you want to update this test case, remember to update the docstring
# example too!!!
......@@ -21,7 +21,7 @@ def test_sum_case1(idtype):
assert F.allclose(F.tensor([3., 6.]), dgl.sum_nodes(bg, 'h'))
assert F.allclose(F.tensor([.5, 1.7]), dgl.sum_nodes(bg, 'h', 'w'))
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph']))
@pytest.mark.parametrize('reducer', ['sum', 'max', 'mean'])
def test_reduce_readout(g, idtype, reducer):
......@@ -67,7 +67,7 @@ def test_reduce_readout(g, idtype, reducer):
subx.append(sx)
assert F.allclose(x, F.cat(subx, dim=0))
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph']))
@pytest.mark.parametrize('reducer', ['sum', 'max', 'mean'])
def test_weighted_reduce_readout(g, idtype, reducer):
......@@ -115,7 +115,7 @@ def test_weighted_reduce_readout(g, idtype, reducer):
subx.append(sx)
assert F.allclose(x, F.cat(subx, dim=0))
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph']))
@pytest.mark.parametrize('descending', [True, False])
def test_topk(g, idtype, descending):
......@@ -181,7 +181,7 @@ def test_topk(g, idtype, descending):
assert F.allclose(val, F.cat(subval, dim=0))
assert F.allclose(indices, F.cat(subidx, dim=0))
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph']))
def test_softmax(g, idtype):
g = g.astype(idtype).to(F.ctx())
......@@ -204,7 +204,7 @@ def test_softmax(g, idtype):
subx.append(F.softmax(sg.edata['h'], dim=0))
assert F.allclose(x, F.cat(subx, dim=0))
@parametrize_dtype
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph']))
def test_broadcast(idtype, g):
g = g.astype(idtype).to(F.ctx())
......
import backend as F
import numpy as np
import dgl
from test_utils import parametrize_dtype
from test_utils import parametrize_idtype
@parametrize_dtype
@parametrize_idtype
def test_node_removal(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -31,7 +31,7 @@ def test_node_removal(idtype):
assert dgl.NID in g.ndata
assert dgl.EID in g.edata
@parametrize_dtype
@parametrize_idtype
def test_multigraph_node_removal(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -59,7 +59,7 @@ def test_multigraph_node_removal(idtype):
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 6
@parametrize_dtype
@parametrize_idtype
def test_multigraph_edge_removal(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -86,7 +86,7 @@ def test_multigraph_edge_removal(idtype):
assert g.number_of_nodes() == 5
assert g.number_of_edges() == 8
@parametrize_dtype
@parametrize_idtype
def test_edge_removal(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -117,7 +117,7 @@ def test_edge_removal(idtype):
assert F.array_equal(g.edata['id'], F.tensor([0, 1, 10, 11, 12, 20, 21, 22, 23, 24, 0]))
assert dgl.EID in g.edata
@parametrize_dtype
@parametrize_idtype
def test_node_and_edge_removal(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -156,7 +156,7 @@ def test_node_and_edge_removal(idtype):
assert g.number_of_nodes() == 10
assert g.number_of_edges() == 48
@parametrize_dtype
@parametrize_idtype
def test_node_frame(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -169,7 +169,7 @@ def test_node_frame(idtype):
g.remove_nodes(range(3, 7))
assert F.allclose(g.ndata['h'], F.tensor(new_data))
@parametrize_dtype
@parametrize_idtype
def test_edge_frame(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
......@@ -183,7 +183,7 @@ def test_edge_frame(idtype):
g.remove_edges(range(3, 7))
assert F.allclose(g.edata['h'], F.tensor(new_data))
@parametrize_dtype
@parametrize_idtype
def test_issue1287(idtype):
# reproduce https://github.com/dmlc/dgl/issues/1287.
# setting features after remove nodes
......
......@@ -9,7 +9,7 @@ import dgl.function as fn
import pickle
import io
import unittest
from utils import parametrize_dtype
from test_utils import parametrize_idtype
import multiprocessing as mp
import os
......@@ -43,7 +43,7 @@ def _assert_is_identical_hetero(g, g2):
assert F.array_equal(dst, dst2)
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@parametrize_dtype
@parametrize_idtype
def test_single_process(idtype):
hg = create_test_graph(idtype=idtype)
hg_share = hg.shared_memory("hg")
......@@ -60,7 +60,7 @@ def sub_proc(hg_origin, name):
_assert_is_identical_hetero(hg_origin, hg_save_again)
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@parametrize_dtype
@parametrize_idtype
def test_multi_process(idtype):
hg = create_test_graph(idtype=idtype)
hg_share = hg.shared_memory("hg1")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment